Commit | Line | Data |
---|---|---|
2e1df07d JA |
1 | /* |
2 | * fio - the flexible io tester | |
3 | * | |
4 | * Copyright (C) 2005 Jens Axboe <axboe@suse.de> | |
5 | * Copyright (C) 2006-2012 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * The license below covers all files distributed with fio unless otherwise | |
8 | * noted in the file itself. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
22 | * | |
23 | */ | |
24 | #include <unistd.h> | |
25 | #include <fcntl.h> | |
26 | #include <string.h> | |
27 | #include <limits.h> | |
28 | #include <signal.h> | |
29 | #include <time.h> | |
30 | #include <locale.h> | |
31 | #include <assert.h> | |
32 | #include <time.h> | |
e43606c2 | 33 | #include <inttypes.h> |
2e1df07d JA |
34 | #include <sys/stat.h> |
35 | #include <sys/wait.h> | |
36 | #include <sys/ipc.h> | |
2e1df07d JA |
37 | #include <sys/mman.h> |
38 | ||
39 | #include "fio.h" | |
a5e0ee11 O |
40 | #ifndef FIO_NO_HAVE_SHM_H |
41 | #include <sys/shm.h> | |
42 | #endif | |
2e1df07d JA |
43 | #include "hash.h" |
44 | #include "smalloc.h" | |
45 | #include "verify.h" | |
46 | #include "trim.h" | |
47 | #include "diskutil.h" | |
48 | #include "cgroup.h" | |
49 | #include "profile.h" | |
50 | #include "lib/rand.h" | |
51 | #include "memalign.h" | |
52 | #include "server.h" | |
44404c5a | 53 | #include "lib/getrusage.h" |
f2a2ce0e | 54 | #include "idletime.h" |
002fe734 | 55 | #include "err.h" |
2e1df07d JA |
56 | |
57 | static pthread_t disk_util_thread; | |
9ec7779f | 58 | static struct fio_mutex *disk_thread_mutex; |
2e1df07d | 59 | static struct fio_mutex *startup_mutex; |
2e1df07d JA |
60 | static struct flist_head *cgroup_list; |
61 | static char *cgroup_mnt; | |
62 | static int exit_value; | |
63 | static volatile int fio_abort; | |
3a5f6bde JA |
64 | static unsigned int nr_process = 0; |
65 | static unsigned int nr_thread = 0; | |
2e1df07d | 66 | |
6eaf09d6 | 67 | struct io_log *agg_io_log[DDIR_RWDIR_CNT]; |
2e1df07d | 68 | |
a3efc919 JA |
69 | int groupid = 0; |
70 | unsigned int thread_number = 0; | |
108fea77 | 71 | unsigned int stat_number = 0; |
a3efc919 JA |
72 | int shm_id = 0; |
73 | int temp_stall_ts; | |
74 | unsigned long done_secs = 0; | |
27357187 | 75 | volatile int disk_util_exit = 0; |
a3efc919 | 76 | |
2e1df07d | 77 | #define PAGE_ALIGN(buf) \ |
e43606c2 | 78 | (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask) |
2e1df07d JA |
79 | |
80 | #define JOB_START_TIMEOUT (5 * 1000) | |
81 | ||
82 | static void sig_int(int sig) | |
83 | { | |
84 | if (threads) { | |
85 | if (is_backend) | |
86 | fio_server_got_signal(sig); | |
87 | else { | |
88 | log_info("\nfio: terminating on signal %d\n", sig); | |
89 | fflush(stdout); | |
90 | exit_value = 128; | |
91 | } | |
92 | ||
93 | fio_terminate_threads(TERMINATE_ALL); | |
94 | } | |
95 | } | |
96 | ||
4c6d91e8 JA |
97 | static void sig_show_status(int sig) |
98 | { | |
99 | show_running_run_stats(); | |
100 | } | |
101 | ||
2e1df07d JA |
102 | static void set_sig_handlers(void) |
103 | { | |
104 | struct sigaction act; | |
105 | ||
106 | memset(&act, 0, sizeof(act)); | |
107 | act.sa_handler = sig_int; | |
108 | act.sa_flags = SA_RESTART; | |
109 | sigaction(SIGINT, &act, NULL); | |
110 | ||
111 | memset(&act, 0, sizeof(act)); | |
112 | act.sa_handler = sig_int; | |
113 | act.sa_flags = SA_RESTART; | |
114 | sigaction(SIGTERM, &act, NULL); | |
115 | ||
2f694507 BC |
116 | /* Windows uses SIGBREAK as a quit signal from other applications */ |
117 | #ifdef WIN32 | |
118 | memset(&act, 0, sizeof(act)); | |
119 | act.sa_handler = sig_int; | |
120 | act.sa_flags = SA_RESTART; | |
121 | sigaction(SIGBREAK, &act, NULL); | |
122 | #endif | |
123 | ||
4c6d91e8 JA |
124 | memset(&act, 0, sizeof(act)); |
125 | act.sa_handler = sig_show_status; | |
126 | act.sa_flags = SA_RESTART; | |
127 | sigaction(SIGUSR1, &act, NULL); | |
128 | ||
2e1df07d JA |
129 | if (is_backend) { |
130 | memset(&act, 0, sizeof(act)); | |
131 | act.sa_handler = sig_int; | |
132 | act.sa_flags = SA_RESTART; | |
133 | sigaction(SIGPIPE, &act, NULL); | |
134 | } | |
135 | } | |
136 | ||
137 | /* | |
138 | * Check if we are above the minimum rate given. | |
139 | */ | |
140 | static int __check_min_rate(struct thread_data *td, struct timeval *now, | |
141 | enum fio_ddir ddir) | |
142 | { | |
143 | unsigned long long bytes = 0; | |
144 | unsigned long iops = 0; | |
145 | unsigned long spent; | |
146 | unsigned long rate; | |
147 | unsigned int ratemin = 0; | |
148 | unsigned int rate_iops = 0; | |
149 | unsigned int rate_iops_min = 0; | |
150 | ||
151 | assert(ddir_rw(ddir)); | |
152 | ||
153 | if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir]) | |
154 | return 0; | |
155 | ||
156 | /* | |
157 | * allow a 2 second settle period in the beginning | |
158 | */ | |
159 | if (mtime_since(&td->start, now) < 2000) | |
160 | return 0; | |
161 | ||
162 | iops += td->this_io_blocks[ddir]; | |
163 | bytes += td->this_io_bytes[ddir]; | |
164 | ratemin += td->o.ratemin[ddir]; | |
165 | rate_iops += td->o.rate_iops[ddir]; | |
166 | rate_iops_min += td->o.rate_iops_min[ddir]; | |
167 | ||
168 | /* | |
169 | * if rate blocks is set, sample is running | |
170 | */ | |
171 | if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) { | |
172 | spent = mtime_since(&td->lastrate[ddir], now); | |
173 | if (spent < td->o.ratecycle) | |
174 | return 0; | |
175 | ||
176 | if (td->o.rate[ddir]) { | |
177 | /* | |
178 | * check bandwidth specified rate | |
179 | */ | |
180 | if (bytes < td->rate_bytes[ddir]) { | |
181 | log_err("%s: min rate %u not met\n", td->o.name, | |
182 | ratemin); | |
183 | return 1; | |
184 | } else { | |
185 | rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent; | |
186 | if (rate < ratemin || | |
187 | bytes < td->rate_bytes[ddir]) { | |
188 | log_err("%s: min rate %u not met, got" | |
189 | " %luKB/sec\n", td->o.name, | |
190 | ratemin, rate); | |
191 | return 1; | |
192 | } | |
193 | } | |
194 | } else { | |
195 | /* | |
196 | * checks iops specified rate | |
197 | */ | |
198 | if (iops < rate_iops) { | |
199 | log_err("%s: min iops rate %u not met\n", | |
200 | td->o.name, rate_iops); | |
201 | return 1; | |
202 | } else { | |
203 | rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent; | |
204 | if (rate < rate_iops_min || | |
205 | iops < td->rate_blocks[ddir]) { | |
206 | log_err("%s: min iops rate %u not met," | |
207 | " got %lu\n", td->o.name, | |
208 | rate_iops_min, rate); | |
209 | } | |
210 | } | |
211 | } | |
212 | } | |
213 | ||
214 | td->rate_bytes[ddir] = bytes; | |
215 | td->rate_blocks[ddir] = iops; | |
216 | memcpy(&td->lastrate[ddir], now, sizeof(*now)); | |
217 | return 0; | |
218 | } | |
219 | ||
220 | static int check_min_rate(struct thread_data *td, struct timeval *now, | |
100f49f1 | 221 | uint64_t *bytes_done) |
2e1df07d JA |
222 | { |
223 | int ret = 0; | |
224 | ||
6eaf09d6 SL |
225 | if (bytes_done[DDIR_READ]) |
226 | ret |= __check_min_rate(td, now, DDIR_READ); | |
227 | if (bytes_done[DDIR_WRITE]) | |
228 | ret |= __check_min_rate(td, now, DDIR_WRITE); | |
229 | if (bytes_done[DDIR_TRIM]) | |
230 | ret |= __check_min_rate(td, now, DDIR_TRIM); | |
2e1df07d JA |
231 | |
232 | return ret; | |
233 | } | |
234 | ||
235 | /* | |
236 | * When job exits, we can cancel the in-flight IO if we are using async | |
237 | * io. Attempt to do so. | |
238 | */ | |
239 | static void cleanup_pending_aio(struct thread_data *td) | |
240 | { | |
2e1df07d JA |
241 | int r; |
242 | ||
243 | /* | |
244 | * get immediately available events, if any | |
245 | */ | |
246 | r = io_u_queued_complete(td, 0, NULL); | |
247 | if (r < 0) | |
248 | return; | |
249 | ||
250 | /* | |
251 | * now cancel remaining active events | |
252 | */ | |
253 | if (td->io_ops->cancel) { | |
2ae0b204 JA |
254 | struct io_u *io_u; |
255 | int i; | |
2e1df07d | 256 | |
2ae0b204 JA |
257 | io_u_qiter(&td->io_u_all, io_u, i) { |
258 | if (io_u->flags & IO_U_F_FLIGHT) { | |
2e1df07d JA |
259 | r = td->io_ops->cancel(td, io_u); |
260 | if (!r) | |
261 | put_io_u(td, io_u); | |
262 | } | |
263 | } | |
264 | } | |
265 | ||
266 | if (td->cur_depth) | |
267 | r = io_u_queued_complete(td, td->cur_depth, NULL); | |
268 | } | |
269 | ||
270 | /* | |
271 | * Helper to handle the final sync of a file. Works just like the normal | |
272 | * io path, just does everything sync. | |
273 | */ | |
274 | static int fio_io_sync(struct thread_data *td, struct fio_file *f) | |
275 | { | |
276 | struct io_u *io_u = __get_io_u(td); | |
277 | int ret; | |
278 | ||
279 | if (!io_u) | |
280 | return 1; | |
281 | ||
282 | io_u->ddir = DDIR_SYNC; | |
283 | io_u->file = f; | |
284 | ||
285 | if (td_io_prep(td, io_u)) { | |
286 | put_io_u(td, io_u); | |
287 | return 1; | |
288 | } | |
289 | ||
290 | requeue: | |
291 | ret = td_io_queue(td, io_u); | |
292 | if (ret < 0) { | |
293 | td_verror(td, io_u->error, "td_io_queue"); | |
294 | put_io_u(td, io_u); | |
295 | return 1; | |
296 | } else if (ret == FIO_Q_QUEUED) { | |
297 | if (io_u_queued_complete(td, 1, NULL) < 0) | |
298 | return 1; | |
299 | } else if (ret == FIO_Q_COMPLETED) { | |
300 | if (io_u->error) { | |
301 | td_verror(td, io_u->error, "td_io_queue"); | |
302 | return 1; | |
303 | } | |
304 | ||
305 | if (io_u_sync_complete(td, io_u, NULL) < 0) | |
306 | return 1; | |
307 | } else if (ret == FIO_Q_BUSY) { | |
308 | if (td_io_commit(td)) | |
309 | return 1; | |
310 | goto requeue; | |
311 | } | |
312 | ||
313 | return 0; | |
314 | } | |
a3efc919 | 315 | |
61ee0f86 JA |
316 | static int fio_file_fsync(struct thread_data *td, struct fio_file *f) |
317 | { | |
318 | int ret; | |
319 | ||
320 | if (fio_file_open(f)) | |
321 | return fio_io_sync(td, f); | |
322 | ||
323 | if (td_io_open_file(td, f)) | |
324 | return 1; | |
325 | ||
326 | ret = fio_io_sync(td, f); | |
327 | td_io_close_file(td, f); | |
328 | return ret; | |
329 | } | |
330 | ||
2e1df07d JA |
331 | static inline void __update_tv_cache(struct thread_data *td) |
332 | { | |
333 | fio_gettime(&td->tv_cache, NULL); | |
334 | } | |
335 | ||
336 | static inline void update_tv_cache(struct thread_data *td) | |
337 | { | |
338 | if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask) | |
339 | __update_tv_cache(td); | |
340 | } | |
341 | ||
342 | static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) | |
343 | { | |
344 | if (in_ramp_time(td)) | |
345 | return 0; | |
346 | if (!td->o.timeout) | |
347 | return 0; | |
0de5b26f | 348 | if (utime_since(&td->epoch, t) >= td->o.timeout) |
2e1df07d JA |
349 | return 1; |
350 | ||
351 | return 0; | |
352 | } | |
353 | ||
354 | static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir, | |
355 | int *retptr) | |
356 | { | |
357 | int ret = *retptr; | |
358 | ||
359 | if (ret < 0 || td->error) { | |
8b28bd41 DM |
360 | int err = td->error; |
361 | enum error_type_bit eb; | |
2e1df07d JA |
362 | |
363 | if (ret < 0) | |
364 | err = -ret; | |
2e1df07d | 365 | |
8b28bd41 DM |
366 | eb = td_error_type(ddir, err); |
367 | if (!(td->o.continue_on_error & (1 << eb))) | |
2e1df07d JA |
368 | return 1; |
369 | ||
8b28bd41 | 370 | if (td_non_fatal_error(td, eb, err)) { |
2e1df07d JA |
371 | /* |
372 | * Continue with the I/Os in case of | |
373 | * a non fatal error. | |
374 | */ | |
375 | update_error_count(td, err); | |
376 | td_clear_error(td); | |
377 | *retptr = 0; | |
378 | return 0; | |
379 | } else if (td->o.fill_device && err == ENOSPC) { | |
380 | /* | |
381 | * We expect to hit this error if | |
382 | * fill_device option is set. | |
383 | */ | |
384 | td_clear_error(td); | |
385 | td->terminate = 1; | |
386 | return 1; | |
387 | } else { | |
388 | /* | |
389 | * Stop the I/O in case of a fatal | |
390 | * error. | |
391 | */ | |
392 | update_error_count(td, err); | |
393 | return 1; | |
394 | } | |
395 | } | |
396 | ||
397 | return 0; | |
398 | } | |
399 | ||
c97f1ad6 JA |
400 | static void check_update_rusage(struct thread_data *td) |
401 | { | |
402 | if (td->update_rusage) { | |
403 | td->update_rusage = 0; | |
404 | update_rusage_stat(td); | |
405 | fio_mutex_up(td->rusage_sem); | |
406 | } | |
407 | } | |
408 | ||
2e1df07d JA |
409 | /* |
410 | * The main verify engine. Runs over the writes we previously submitted, | |
411 | * reads the blocks back in, and checks the crc/md5 of the data. | |
412 | */ | |
100f49f1 | 413 | static void do_verify(struct thread_data *td, uint64_t verify_bytes) |
2e1df07d | 414 | { |
100f49f1 | 415 | uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; |
2e1df07d JA |
416 | struct fio_file *f; |
417 | struct io_u *io_u; | |
418 | int ret, min_events; | |
419 | unsigned int i; | |
420 | ||
421 | dprint(FD_VERIFY, "starting loop\n"); | |
422 | ||
423 | /* | |
424 | * sync io first and invalidate cache, to make sure we really | |
425 | * read from disk. | |
426 | */ | |
427 | for_each_file(td, f, i) { | |
428 | if (!fio_file_open(f)) | |
429 | continue; | |
430 | if (fio_io_sync(td, f)) | |
431 | break; | |
432 | if (file_invalidate_cache(td, f)) | |
433 | break; | |
434 | } | |
435 | ||
c97f1ad6 JA |
436 | check_update_rusage(td); |
437 | ||
2e1df07d JA |
438 | if (td->error) |
439 | return; | |
440 | ||
441 | td_set_runstate(td, TD_VERIFYING); | |
442 | ||
443 | io_u = NULL; | |
444 | while (!td->terminate) { | |
fbccf46c | 445 | enum fio_ddir ddir; |
2e1df07d JA |
446 | int ret2, full; |
447 | ||
448 | update_tv_cache(td); | |
c97f1ad6 | 449 | check_update_rusage(td); |
2e1df07d JA |
450 | |
451 | if (runtime_exceeded(td, &td->tv_cache)) { | |
452 | __update_tv_cache(td); | |
453 | if (runtime_exceeded(td, &td->tv_cache)) { | |
454 | td->terminate = 1; | |
455 | break; | |
456 | } | |
457 | } | |
458 | ||
9e684a49 DE |
459 | if (flow_threshold_exceeded(td)) |
460 | continue; | |
461 | ||
44cbc6da JA |
462 | if (!td->o.experimental_verify) { |
463 | io_u = __get_io_u(td); | |
464 | if (!io_u) | |
465 | break; | |
2e1df07d | 466 | |
44cbc6da JA |
467 | if (get_next_verify(td, io_u)) { |
468 | put_io_u(td, io_u); | |
469 | break; | |
470 | } | |
2e1df07d | 471 | |
44cbc6da JA |
472 | if (td_io_prep(td, io_u)) { |
473 | put_io_u(td, io_u); | |
474 | break; | |
475 | } | |
476 | } else { | |
100f49f1 JA |
477 | if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes) |
478 | break; | |
479 | ||
bcd5abfa | 480 | while ((io_u = get_io_u(td)) != NULL) { |
002fe734 JA |
481 | if (IS_ERR(io_u)) { |
482 | io_u = NULL; | |
483 | ret = FIO_Q_BUSY; | |
484 | goto reap; | |
485 | } | |
486 | ||
bcd5abfa JA |
487 | /* |
488 | * We are only interested in the places where | |
489 | * we wrote or trimmed IOs. Turn those into | |
490 | * reads for verification purposes. | |
491 | */ | |
492 | if (io_u->ddir == DDIR_READ) { | |
493 | /* | |
494 | * Pretend we issued it for rwmix | |
495 | * accounting | |
496 | */ | |
497 | td->io_issues[DDIR_READ]++; | |
498 | put_io_u(td, io_u); | |
499 | continue; | |
500 | } else if (io_u->ddir == DDIR_TRIM) { | |
501 | io_u->ddir = DDIR_READ; | |
502 | io_u->flags |= IO_U_F_TRIMMED; | |
503 | break; | |
504 | } else if (io_u->ddir == DDIR_WRITE) { | |
505 | io_u->ddir = DDIR_READ; | |
506 | break; | |
507 | } else { | |
508 | put_io_u(td, io_u); | |
509 | continue; | |
510 | } | |
511 | } | |
44cbc6da | 512 | |
bcd5abfa | 513 | if (!io_u) |
44cbc6da | 514 | break; |
2e1df07d JA |
515 | } |
516 | ||
517 | if (td->o.verify_async) | |
518 | io_u->end_io = verify_io_u_async; | |
519 | else | |
520 | io_u->end_io = verify_io_u; | |
521 | ||
fbccf46c JA |
522 | ddir = io_u->ddir; |
523 | ||
2e1df07d JA |
524 | ret = td_io_queue(td, io_u); |
525 | switch (ret) { | |
526 | case FIO_Q_COMPLETED: | |
527 | if (io_u->error) { | |
528 | ret = -io_u->error; | |
529 | clear_io_u(td, io_u); | |
530 | } else if (io_u->resid) { | |
531 | int bytes = io_u->xfer_buflen - io_u->resid; | |
532 | ||
533 | /* | |
534 | * zero read, fail | |
535 | */ | |
536 | if (!bytes) { | |
537 | td_verror(td, EIO, "full resid"); | |
538 | put_io_u(td, io_u); | |
539 | break; | |
540 | } | |
541 | ||
542 | io_u->xfer_buflen = io_u->resid; | |
543 | io_u->xfer_buf += bytes; | |
544 | io_u->offset += bytes; | |
545 | ||
546 | if (ddir_rw(io_u->ddir)) | |
547 | td->ts.short_io_u[io_u->ddir]++; | |
548 | ||
549 | f = io_u->file; | |
550 | if (io_u->offset == f->real_file_size) | |
551 | goto sync_done; | |
552 | ||
553 | requeue_io_u(td, &io_u); | |
554 | } else { | |
555 | sync_done: | |
100f49f1 | 556 | ret = io_u_sync_complete(td, io_u, bytes_done); |
2e1df07d JA |
557 | if (ret < 0) |
558 | break; | |
559 | } | |
560 | continue; | |
561 | case FIO_Q_QUEUED: | |
562 | break; | |
563 | case FIO_Q_BUSY: | |
564 | requeue_io_u(td, &io_u); | |
565 | ret2 = td_io_commit(td); | |
566 | if (ret2 < 0) | |
567 | ret = ret2; | |
568 | break; | |
569 | default: | |
570 | assert(ret < 0); | |
571 | td_verror(td, -ret, "td_io_queue"); | |
572 | break; | |
573 | } | |
574 | ||
fbccf46c | 575 | if (break_on_this_error(td, ddir, &ret)) |
2e1df07d JA |
576 | break; |
577 | ||
578 | /* | |
579 | * if we can queue more, do so. but check if there are | |
580 | * completed io_u's first. Note that we can get BUSY even | |
581 | * without IO queued, if the system is resource starved. | |
582 | */ | |
002fe734 | 583 | reap: |
2e1df07d JA |
584 | full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); |
585 | if (full || !td->o.iodepth_batch_complete) { | |
586 | min_events = min(td->o.iodepth_batch_complete, | |
587 | td->cur_depth); | |
8a74b56d JA |
588 | /* |
589 | * if the queue is full, we MUST reap at least 1 event | |
590 | */ | |
591 | if (full && !min_events) | |
2e1df07d JA |
592 | min_events = 1; |
593 | ||
594 | do { | |
595 | /* | |
596 | * Reap required number of io units, if any, | |
597 | * and do the verification on them through | |
598 | * the callback handler | |
599 | */ | |
100f49f1 | 600 | if (io_u_queued_complete(td, min_events, bytes_done) < 0) { |
2e1df07d JA |
601 | ret = -1; |
602 | break; | |
603 | } | |
604 | } while (full && (td->cur_depth > td->o.iodepth_low)); | |
605 | } | |
606 | if (ret < 0) | |
607 | break; | |
608 | } | |
609 | ||
c97f1ad6 JA |
610 | check_update_rusage(td); |
611 | ||
2e1df07d JA |
612 | if (!td->error) { |
613 | min_events = td->cur_depth; | |
614 | ||
615 | if (min_events) | |
616 | ret = io_u_queued_complete(td, min_events, NULL); | |
617 | } else | |
618 | cleanup_pending_aio(td); | |
619 | ||
620 | td_set_runstate(td, TD_RUNNING); | |
621 | ||
622 | dprint(FD_VERIFY, "exiting loop\n"); | |
623 | } | |
624 | ||
3939fe85 JA |
625 | static unsigned int exceeds_number_ios(struct thread_data *td) |
626 | { | |
627 | unsigned long long number_ios; | |
628 | ||
629 | if (!td->o.number_ios) | |
630 | return 0; | |
631 | ||
632 | number_ios = ddir_rw_sum(td->this_io_blocks); | |
633 | number_ios += td->io_u_queued + td->io_u_in_flight; | |
634 | ||
635 | return number_ios >= td->o.number_ios; | |
636 | } | |
637 | ||
f7078f7b JA |
638 | static int io_bytes_exceeded(struct thread_data *td) |
639 | { | |
640 | unsigned long long bytes; | |
641 | ||
642 | if (td_rw(td)) | |
6eaf09d6 | 643 | bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE]; |
f7078f7b | 644 | else if (td_write(td)) |
6eaf09d6 SL |
645 | bytes = td->this_io_bytes[DDIR_WRITE]; |
646 | else if (td_read(td)) | |
647 | bytes = td->this_io_bytes[DDIR_READ]; | |
f7078f7b | 648 | else |
6eaf09d6 | 649 | bytes = td->this_io_bytes[DDIR_TRIM]; |
f7078f7b | 650 | |
3939fe85 | 651 | return bytes >= td->o.size || exceeds_number_ios(td); |
f7078f7b JA |
652 | } |
653 | ||
2e1df07d JA |
654 | /* |
655 | * Main IO worker function. It retrieves io_u's to process and queues | |
656 | * and reaps them, checking for rate and errors along the way. | |
100f49f1 JA |
657 | * |
658 | * Returns number of bytes written and trimmed. | |
2e1df07d | 659 | */ |
100f49f1 | 660 | static uint64_t do_io(struct thread_data *td) |
2e1df07d | 661 | { |
100f49f1 | 662 | uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; |
2e1df07d JA |
663 | unsigned int i; |
664 | int ret = 0; | |
c2703bf3 | 665 | uint64_t total_bytes, bytes_issued = 0; |
2e1df07d JA |
666 | |
667 | if (in_ramp_time(td)) | |
668 | td_set_runstate(td, TD_RAMP); | |
669 | else | |
670 | td_set_runstate(td, TD_RUNNING); | |
671 | ||
3e260a46 JA |
672 | lat_target_init(td); |
673 | ||
78a6469c JA |
674 | /* |
675 | * If verify_backlog is enabled, we'll run the verify in this | |
676 | * handler as well. For that case, we may need up to twice the | |
677 | * amount of bytes. | |
678 | */ | |
c2703bf3 | 679 | total_bytes = td->o.size; |
78a6469c JA |
680 | if (td->o.verify != VERIFY_NONE && |
681 | (td_write(td) && td->o.verify_backlog)) | |
c2703bf3 JA |
682 | total_bytes += td->o.size; |
683 | ||
f7078f7b | 684 | while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || |
c04e4661 DE |
685 | (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) || |
686 | td->o.time_based) { | |
2e1df07d | 687 | struct timeval comp_time; |
2e1df07d JA |
688 | int min_evts = 0; |
689 | struct io_u *io_u; | |
690 | int ret2, full; | |
691 | enum fio_ddir ddir; | |
692 | ||
c97f1ad6 JA |
693 | check_update_rusage(td); |
694 | ||
7d7803fa | 695 | if (td->terminate || td->done) |
2e1df07d JA |
696 | break; |
697 | ||
698 | update_tv_cache(td); | |
699 | ||
700 | if (runtime_exceeded(td, &td->tv_cache)) { | |
701 | __update_tv_cache(td); | |
702 | if (runtime_exceeded(td, &td->tv_cache)) { | |
703 | td->terminate = 1; | |
704 | break; | |
705 | } | |
706 | } | |
707 | ||
9e684a49 DE |
708 | if (flow_threshold_exceeded(td)) |
709 | continue; | |
710 | ||
c2703bf3 | 711 | if (bytes_issued >= total_bytes) |
20876c53 JC |
712 | break; |
713 | ||
2e1df07d | 714 | io_u = get_io_u(td); |
002fe734 JA |
715 | if (IS_ERR_OR_NULL(io_u)) { |
716 | int err = PTR_ERR(io_u); | |
717 | ||
718 | io_u = NULL; | |
719 | if (err == -EBUSY) { | |
720 | ret = FIO_Q_BUSY; | |
721 | goto reap; | |
722 | } | |
3e260a46 JA |
723 | if (td->o.latency_target) |
724 | goto reap; | |
2e1df07d | 725 | break; |
3e260a46 | 726 | } |
2e1df07d JA |
727 | |
728 | ddir = io_u->ddir; | |
729 | ||
730 | /* | |
82af2a7c JA |
731 | * Add verification end_io handler if: |
732 | * - Asked to verify (!td_rw(td)) | |
733 | * - Or the io_u is from our verify list (mixed write/ver) | |
2e1df07d JA |
734 | */ |
735 | if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && | |
82af2a7c | 736 | ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { |
c4b6117b PV |
737 | |
738 | if (!td->o.verify_pattern_bytes) { | |
739 | io_u->rand_seed = __rand(&td->__verify_state); | |
740 | if (sizeof(int) != sizeof(long *)) | |
741 | io_u->rand_seed *= __rand(&td->__verify_state); | |
742 | } | |
743 | ||
2e1df07d JA |
744 | if (td->o.verify_async) |
745 | io_u->end_io = verify_io_u_async; | |
746 | else | |
747 | io_u->end_io = verify_io_u; | |
748 | td_set_runstate(td, TD_VERIFYING); | |
749 | } else if (in_ramp_time(td)) | |
750 | td_set_runstate(td, TD_RAMP); | |
751 | else | |
752 | td_set_runstate(td, TD_RUNNING); | |
753 | ||
9a50c5c5 | 754 | /* |
f9401285 JA |
755 | * Always log IO before it's issued, so we know the specific |
756 | * order of it. The logged unit will track when the IO has | |
757 | * completed. | |
9a50c5c5 | 758 | */ |
c4b6117b PV |
759 | if (td_write(td) && io_u->ddir == DDIR_WRITE && |
760 | td->o.do_verify && | |
761 | td->o.verify != VERIFY_NONE && | |
f9401285 | 762 | !td->o.experimental_verify) |
c4b6117b PV |
763 | log_io_piece(td, io_u); |
764 | ||
2e1df07d JA |
765 | ret = td_io_queue(td, io_u); |
766 | switch (ret) { | |
767 | case FIO_Q_COMPLETED: | |
768 | if (io_u->error) { | |
769 | ret = -io_u->error; | |
770 | clear_io_u(td, io_u); | |
771 | } else if (io_u->resid) { | |
772 | int bytes = io_u->xfer_buflen - io_u->resid; | |
773 | struct fio_file *f = io_u->file; | |
774 | ||
20876c53 | 775 | bytes_issued += bytes; |
2e1df07d JA |
776 | /* |
777 | * zero read, fail | |
778 | */ | |
779 | if (!bytes) { | |
780 | td_verror(td, EIO, "full resid"); | |
781 | put_io_u(td, io_u); | |
782 | break; | |
783 | } | |
784 | ||
785 | io_u->xfer_buflen = io_u->resid; | |
786 | io_u->xfer_buf += bytes; | |
787 | io_u->offset += bytes; | |
788 | ||
789 | if (ddir_rw(io_u->ddir)) | |
790 | td->ts.short_io_u[io_u->ddir]++; | |
791 | ||
792 | if (io_u->offset == f->real_file_size) | |
793 | goto sync_done; | |
794 | ||
795 | requeue_io_u(td, &io_u); | |
796 | } else { | |
797 | sync_done: | |
6eaf09d6 SL |
798 | if (__should_check_rate(td, DDIR_READ) || |
799 | __should_check_rate(td, DDIR_WRITE) || | |
800 | __should_check_rate(td, DDIR_TRIM)) | |
2e1df07d JA |
801 | fio_gettime(&comp_time, NULL); |
802 | ||
803 | ret = io_u_sync_complete(td, io_u, bytes_done); | |
804 | if (ret < 0) | |
805 | break; | |
20876c53 | 806 | bytes_issued += io_u->xfer_buflen; |
2e1df07d JA |
807 | } |
808 | break; | |
809 | case FIO_Q_QUEUED: | |
810 | /* | |
811 | * if the engine doesn't have a commit hook, | |
812 | * the io_u is really queued. if it does have such | |
813 | * a hook, it has to call io_u_queued() itself. | |
814 | */ | |
815 | if (td->io_ops->commit == NULL) | |
816 | io_u_queued(td, io_u); | |
20876c53 | 817 | bytes_issued += io_u->xfer_buflen; |
2e1df07d JA |
818 | break; |
819 | case FIO_Q_BUSY: | |
820 | requeue_io_u(td, &io_u); | |
821 | ret2 = td_io_commit(td); | |
822 | if (ret2 < 0) | |
823 | ret = ret2; | |
824 | break; | |
825 | default: | |
826 | assert(ret < 0); | |
827 | put_io_u(td, io_u); | |
828 | break; | |
829 | } | |
830 | ||
831 | if (break_on_this_error(td, ddir, &ret)) | |
832 | break; | |
833 | ||
834 | /* | |
835 | * See if we need to complete some commands. Note that we | |
836 | * can get BUSY even without IO queued, if the system is | |
837 | * resource starved. | |
838 | */ | |
3e260a46 | 839 | reap: |
2e1df07d JA |
840 | full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); |
841 | if (full || !td->o.iodepth_batch_complete) { | |
842 | min_evts = min(td->o.iodepth_batch_complete, | |
843 | td->cur_depth); | |
8a74b56d JA |
844 | /* |
845 | * if the queue is full, we MUST reap at least 1 event | |
846 | */ | |
847 | if (full && !min_evts) | |
2e1df07d JA |
848 | min_evts = 1; |
849 | ||
6eaf09d6 SL |
850 | if (__should_check_rate(td, DDIR_READ) || |
851 | __should_check_rate(td, DDIR_WRITE) || | |
852 | __should_check_rate(td, DDIR_TRIM)) | |
2e1df07d JA |
853 | fio_gettime(&comp_time, NULL); |
854 | ||
855 | do { | |
856 | ret = io_u_queued_complete(td, min_evts, bytes_done); | |
857 | if (ret < 0) | |
858 | break; | |
859 | ||
860 | } while (full && (td->cur_depth > td->o.iodepth_low)); | |
861 | } | |
862 | ||
863 | if (ret < 0) | |
864 | break; | |
d5abee06 | 865 | if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO)) |
2e1df07d JA |
866 | continue; |
867 | ||
868 | if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) { | |
869 | if (check_min_rate(td, &comp_time, bytes_done)) { | |
870 | if (exitall_on_terminate) | |
871 | fio_terminate_threads(td->groupid); | |
872 | td_verror(td, EIO, "check_min_rate"); | |
873 | break; | |
874 | } | |
875 | } | |
3e260a46 JA |
876 | if (!in_ramp_time(td) && td->o.latency_target) |
877 | lat_target_check(td); | |
2e1df07d JA |
878 | |
879 | if (td->o.thinktime) { | |
880 | unsigned long long b; | |
881 | ||
342f4be4 | 882 | b = ddir_rw_sum(td->io_blocks); |
2e1df07d JA |
883 | if (!(b % td->o.thinktime_blocks)) { |
884 | int left; | |
885 | ||
002e7183 JA |
886 | io_u_quiesce(td); |
887 | ||
2e1df07d JA |
888 | if (td->o.thinktime_spin) |
889 | usec_spin(td->o.thinktime_spin); | |
890 | ||
891 | left = td->o.thinktime - td->o.thinktime_spin; | |
892 | if (left) | |
893 | usec_sleep(td, left); | |
894 | } | |
895 | } | |
896 | } | |
897 | ||
c97f1ad6 JA |
898 | check_update_rusage(td); |
899 | ||
2e1df07d | 900 | if (td->trim_entries) |
4e0a8fa2 | 901 | log_err("fio: %lu trim entries leaked?\n", td->trim_entries); |
2e1df07d JA |
902 | |
903 | if (td->o.fill_device && td->error == ENOSPC) { | |
904 | td->error = 0; | |
905 | td->terminate = 1; | |
906 | } | |
907 | if (!td->error) { | |
908 | struct fio_file *f; | |
909 | ||
910 | i = td->cur_depth; | |
911 | if (i) { | |
5bd5f71a | 912 | ret = io_u_queued_complete(td, i, bytes_done); |
2e1df07d JA |
913 | if (td->o.fill_device && td->error == ENOSPC) |
914 | td->error = 0; | |
915 | } | |
916 | ||
917 | if (should_fsync(td) && td->o.end_fsync) { | |
918 | td_set_runstate(td, TD_FSYNCING); | |
919 | ||
920 | for_each_file(td, f, i) { | |
61ee0f86 | 921 | if (!fio_file_fsync(td, f)) |
2e1df07d | 922 | continue; |
61ee0f86 JA |
923 | |
924 | log_err("fio: end_fsync failed for file %s\n", | |
925 | f->file_name); | |
2e1df07d JA |
926 | } |
927 | } | |
928 | } else | |
929 | cleanup_pending_aio(td); | |
930 | ||
931 | /* | |
932 | * stop job if we failed doing any IO | |
933 | */ | |
342f4be4 | 934 | if (!ddir_rw_sum(td->this_io_bytes)) |
2e1df07d | 935 | td->done = 1; |
100f49f1 JA |
936 | |
937 | return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM]; | |
2e1df07d JA |
938 | } |
939 | ||
940 | static void cleanup_io_u(struct thread_data *td) | |
941 | { | |
2e1df07d JA |
942 | struct io_u *io_u; |
943 | ||
2ae0b204 | 944 | while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) { |
c73ed246 JA |
945 | |
946 | if (td->io_ops->io_u_free) | |
947 | td->io_ops->io_u_free(td, io_u); | |
948 | ||
2e1df07d JA |
949 | fio_memfree(io_u, sizeof(*io_u)); |
950 | } | |
951 | ||
952 | free_io_mem(td); | |
2ae0b204 JA |
953 | |
954 | io_u_rexit(&td->io_u_requeues); | |
955 | io_u_qexit(&td->io_u_freelist); | |
956 | io_u_qexit(&td->io_u_all); | |
2e1df07d JA |
957 | } |
958 | ||
959 | static int init_io_u(struct thread_data *td) | |
960 | { | |
961 | struct io_u *io_u; | |
9c42684e | 962 | unsigned int max_bs, min_write; |
2e1df07d | 963 | int cl_align, i, max_units; |
2ae0b204 | 964 | int data_xfer = 1, err; |
2e1df07d JA |
965 | char *p; |
966 | ||
967 | max_units = td->o.iodepth; | |
74f4b020 | 968 | max_bs = td_max_bs(td); |
9c42684e | 969 | min_write = td->o.min_bs[DDIR_WRITE]; |
2e1df07d JA |
970 | td->orig_buffer_size = (unsigned long long) max_bs |
971 | * (unsigned long long) max_units; | |
972 | ||
88045e04 | 973 | if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td))) |
59d8d0f5 JA |
974 | data_xfer = 0; |
975 | ||
2ae0b204 JA |
976 | err = 0; |
977 | err += io_u_rinit(&td->io_u_requeues, td->o.iodepth); | |
978 | err += io_u_qinit(&td->io_u_freelist, td->o.iodepth); | |
979 | err += io_u_qinit(&td->io_u_all, td->o.iodepth); | |
980 | ||
981 | if (err) { | |
982 | log_err("fio: failed setting up IO queues\n"); | |
983 | return 1; | |
984 | } | |
985 | ||
fd8a09b8 | 986 | /* |
987 | * if we may later need to do address alignment, then add any | |
988 | * possible adjustment here so that we don't cause a buffer | |
989 | * overflow later. this adjustment may be too much if we get | |
990 | * lucky and the allocator gives us an aligned address. | |
991 | */ | |
d01612f3 CM |
992 | if (td->o.odirect || td->o.mem_align || td->o.oatomic || |
993 | (td->io_ops->flags & FIO_RAWIO)) | |
fd8a09b8 | 994 | td->orig_buffer_size += page_mask + td->o.mem_align; |
995 | ||
2e1df07d JA |
996 | if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) { |
997 | unsigned long bs; | |
998 | ||
999 | bs = td->orig_buffer_size + td->o.hugepage_size - 1; | |
1000 | td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1); | |
1001 | } | |
1002 | ||
1003 | if (td->orig_buffer_size != (size_t) td->orig_buffer_size) { | |
1004 | log_err("fio: IO memory too large. Reduce max_bs or iodepth\n"); | |
1005 | return 1; | |
1006 | } | |
1007 | ||
59d8d0f5 | 1008 | if (data_xfer && allocate_io_mem(td)) |
2e1df07d JA |
1009 | return 1; |
1010 | ||
d01612f3 | 1011 | if (td->o.odirect || td->o.mem_align || td->o.oatomic || |
2e1df07d JA |
1012 | (td->io_ops->flags & FIO_RAWIO)) |
1013 | p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align; | |
1014 | else | |
1015 | p = td->orig_buffer; | |
1016 | ||
1017 | cl_align = os_cache_line_size(); | |
1018 | ||
1019 | for (i = 0; i < max_units; i++) { | |
1020 | void *ptr; | |
1021 | ||
1022 | if (td->terminate) | |
1023 | return 1; | |
1024 | ||
1025 | ptr = fio_memalign(cl_align, sizeof(*io_u)); | |
1026 | if (!ptr) { | |
1027 | log_err("fio: unable to allocate aligned memory\n"); | |
1028 | break; | |
1029 | } | |
1030 | ||
1031 | io_u = ptr; | |
1032 | memset(io_u, 0, sizeof(*io_u)); | |
2ae0b204 | 1033 | INIT_FLIST_HEAD(&io_u->verify_list); |
2e1df07d JA |
1034 | dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); |
1035 | ||
59d8d0f5 | 1036 | if (data_xfer) { |
2e1df07d JA |
1037 | io_u->buf = p; |
1038 | dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf); | |
1039 | ||
1040 | if (td_write(td)) | |
9c42684e | 1041 | io_u_fill_buffer(td, io_u, min_write, max_bs); |
2e1df07d JA |
1042 | if (td_write(td) && td->o.verify_pattern_bytes) { |
1043 | /* | |
1044 | * Fill the buffer with the pattern if we are | |
1045 | * going to be doing writes. | |
1046 | */ | |
ce35b1ec | 1047 | fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0); |
2e1df07d JA |
1048 | } |
1049 | } | |
1050 | ||
1051 | io_u->index = i; | |
1052 | io_u->flags = IO_U_F_FREE; | |
2ae0b204 JA |
1053 | io_u_qpush(&td->io_u_freelist, io_u); |
1054 | ||
1055 | /* | |
1056 | * io_u never leaves this stack, used for iteration of all | |
1057 | * io_u buffers. | |
1058 | */ | |
1059 | io_u_qpush(&td->io_u_all, io_u); | |
c73ed246 JA |
1060 | |
1061 | if (td->io_ops->io_u_init) { | |
1062 | int ret = td->io_ops->io_u_init(td, io_u); | |
1063 | ||
1064 | if (ret) { | |
1065 | log_err("fio: failed to init engine data: %d\n", ret); | |
1066 | return 1; | |
1067 | } | |
1068 | } | |
1069 | ||
2e1df07d JA |
1070 | p += max_bs; |
1071 | } | |
1072 | ||
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | static int switch_ioscheduler(struct thread_data *td) | |
1077 | { | |
1078 | char tmp[256], tmp2[128]; | |
1079 | FILE *f; | |
1080 | int ret; | |
1081 | ||
1082 | if (td->io_ops->flags & FIO_DISKLESSIO) | |
1083 | return 0; | |
1084 | ||
1085 | sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); | |
1086 | ||
1087 | f = fopen(tmp, "r+"); | |
1088 | if (!f) { | |
1089 | if (errno == ENOENT) { | |
1090 | log_err("fio: os or kernel doesn't support IO scheduler" | |
1091 | " switching\n"); | |
1092 | return 0; | |
1093 | } | |
1094 | td_verror(td, errno, "fopen iosched"); | |
1095 | return 1; | |
1096 | } | |
1097 | ||
1098 | /* | |
1099 | * Set io scheduler. | |
1100 | */ | |
1101 | ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); | |
1102 | if (ferror(f) || ret != 1) { | |
1103 | td_verror(td, errno, "fwrite"); | |
1104 | fclose(f); | |
1105 | return 1; | |
1106 | } | |
1107 | ||
1108 | rewind(f); | |
1109 | ||
1110 | /* | |
1111 | * Read back and check that the selected scheduler is now the default. | |
1112 | */ | |
1113 | ret = fread(tmp, 1, sizeof(tmp), f); | |
1114 | if (ferror(f) || ret < 0) { | |
1115 | td_verror(td, errno, "fread"); | |
1116 | fclose(f); | |
1117 | return 1; | |
1118 | } | |
1119 | ||
1120 | sprintf(tmp2, "[%s]", td->o.ioscheduler); | |
1121 | if (!strstr(tmp, tmp2)) { | |
1122 | log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); | |
1123 | td_verror(td, EINVAL, "iosched_switch"); | |
1124 | fclose(f); | |
1125 | return 1; | |
1126 | } | |
1127 | ||
1128 | fclose(f); | |
1129 | return 0; | |
1130 | } | |
1131 | ||
1132 | static int keep_running(struct thread_data *td) | |
1133 | { | |
2e1df07d JA |
1134 | if (td->done) |
1135 | return 0; | |
1136 | if (td->o.time_based) | |
1137 | return 1; | |
1138 | if (td->o.loops) { | |
1139 | td->o.loops--; | |
1140 | return 1; | |
1141 | } | |
3939fe85 JA |
1142 | if (exceeds_number_ios(td)) |
1143 | return 0; | |
26251d8d | 1144 | |
ee98e234 | 1145 | if (td->o.size != -1ULL && ddir_rw_sum(td->io_bytes) < td->o.size) { |
5bd5f71a JA |
1146 | uint64_t diff; |
1147 | ||
1148 | /* | |
1149 | * If the difference is less than the minimum IO size, we | |
1150 | * are done. | |
1151 | */ | |
1152 | diff = td->o.size - ddir_rw_sum(td->io_bytes); | |
74f4b020 | 1153 | if (diff < td_max_bs(td)) |
5bd5f71a JA |
1154 | return 0; |
1155 | ||
002fe734 JA |
1156 | if (fio_files_done(td)) |
1157 | return 0; | |
1158 | ||
2e1df07d | 1159 | return 1; |
5bd5f71a | 1160 | } |
2e1df07d JA |
1161 | |
1162 | return 0; | |
1163 | } | |
1164 | ||
ce486495 | 1165 | static int exec_string(struct thread_options *o, const char *string, const char *mode) |
2e1df07d | 1166 | { |
ce486495 | 1167 | int ret, newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1; |
2e1df07d JA |
1168 | char *str; |
1169 | ||
1170 | str = malloc(newlen); | |
ce486495 | 1171 | sprintf(str, "%s &> %s.%s.txt", string, o->name, mode); |
2e1df07d | 1172 | |
ce486495 | 1173 | log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode); |
2e1df07d JA |
1174 | ret = system(str); |
1175 | if (ret == -1) | |
1176 | log_err("fio: exec of cmd <%s> failed\n", str); | |
1177 | ||
1178 | free(str); | |
1179 | return ret; | |
1180 | } | |
1181 | ||
62167762 JC |
1182 | /* |
1183 | * Dry run to compute correct state of numberio for verification. | |
1184 | */ | |
1185 | static uint64_t do_dry_run(struct thread_data *td) | |
1186 | { | |
1187 | uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; | |
1188 | ||
1189 | td_set_runstate(td, TD_RUNNING); | |
1190 | ||
1191 | while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || | |
1192 | (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) { | |
1193 | struct io_u *io_u; | |
1194 | int ret; | |
1195 | ||
1196 | if (td->terminate || td->done) | |
1197 | break; | |
1198 | ||
1199 | io_u = get_io_u(td); | |
1200 | if (!io_u) | |
1201 | break; | |
1202 | ||
1203 | io_u->flags |= IO_U_F_FLIGHT; | |
1204 | io_u->error = 0; | |
1205 | io_u->resid = 0; | |
1206 | if (ddir_rw(acct_ddir(io_u))) | |
1207 | td->io_issues[acct_ddir(io_u)]++; | |
1208 | if (ddir_rw(io_u->ddir)) { | |
1209 | io_u_mark_depth(td, 1); | |
1210 | td->ts.total_io_u[io_u->ddir]++; | |
1211 | } | |
1212 | ||
2e63e96b PV |
1213 | if (td_write(td) && io_u->ddir == DDIR_WRITE && |
1214 | td->o.do_verify && | |
1215 | td->o.verify != VERIFY_NONE && | |
1216 | !td->o.experimental_verify) | |
1217 | log_io_piece(td, io_u); | |
1218 | ||
62167762 JC |
1219 | ret = io_u_sync_complete(td, io_u, bytes_done); |
1220 | (void) ret; | |
1221 | } | |
1222 | ||
1223 | return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM]; | |
1224 | } | |
1225 | ||
2e1df07d JA |
1226 | /* |
1227 | * Entry point for the thread based jobs. The process based jobs end up | |
1228 | * here as well, after a little setup. | |
1229 | */ | |
1230 | static void *thread_main(void *data) | |
1231 | { | |
1232 | unsigned long long elapsed; | |
1233 | struct thread_data *td = data; | |
4896473e | 1234 | struct thread_options *o = &td->o; |
2e1df07d JA |
1235 | pthread_condattr_t attr; |
1236 | int clear_state; | |
28727df7 | 1237 | int ret; |
2e1df07d | 1238 | |
4896473e | 1239 | if (!o->use_thread) { |
2e1df07d JA |
1240 | setsid(); |
1241 | td->pid = getpid(); | |
1242 | } else | |
1243 | td->pid = gettid(); | |
1244 | ||
334185e9 BC |
1245 | /* |
1246 | * fio_time_init() may not have been called yet if running as a server | |
1247 | */ | |
1248 | fio_time_init(); | |
1249 | ||
4896473e | 1250 | fio_local_clock_init(o->use_thread); |
5d879392 | 1251 | |
2e1df07d JA |
1252 | dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid); |
1253 | ||
122c7725 JA |
1254 | if (is_backend) |
1255 | fio_server_send_start(td); | |
1256 | ||
2e1df07d JA |
1257 | INIT_FLIST_HEAD(&td->io_log_list); |
1258 | INIT_FLIST_HEAD(&td->io_hist_list); | |
1259 | INIT_FLIST_HEAD(&td->verify_list); | |
1260 | INIT_FLIST_HEAD(&td->trim_list); | |
1ae83d45 | 1261 | INIT_FLIST_HEAD(&td->next_rand_list); |
2e1df07d JA |
1262 | pthread_mutex_init(&td->io_u_lock, NULL); |
1263 | td->io_hist_tree = RB_ROOT; | |
1264 | ||
1265 | pthread_condattr_init(&attr); | |
1266 | pthread_cond_init(&td->verify_cond, &attr); | |
1267 | pthread_cond_init(&td->free_cond, &attr); | |
1268 | ||
1269 | td_set_runstate(td, TD_INITIALIZED); | |
1270 | dprint(FD_MUTEX, "up startup_mutex\n"); | |
1271 | fio_mutex_up(startup_mutex); | |
1272 | dprint(FD_MUTEX, "wait on td->mutex\n"); | |
1273 | fio_mutex_down(td->mutex); | |
1274 | dprint(FD_MUTEX, "done waiting on td->mutex\n"); | |
1275 | ||
2e1df07d JA |
1276 | /* |
1277 | * A new gid requires privilege, so we need to do this before setting | |
1278 | * the uid. | |
1279 | */ | |
4896473e | 1280 | if (o->gid != -1U && setgid(o->gid)) { |
2e1df07d JA |
1281 | td_verror(td, errno, "setgid"); |
1282 | goto err; | |
1283 | } | |
4896473e | 1284 | if (o->uid != -1U && setuid(o->uid)) { |
2e1df07d JA |
1285 | td_verror(td, errno, "setuid"); |
1286 | goto err; | |
1287 | } | |
1288 | ||
1289 | /* | |
1290 | * If we have a gettimeofday() thread, make sure we exclude that | |
1291 | * thread from this job | |
1292 | */ | |
4896473e JA |
1293 | if (o->gtod_cpu) |
1294 | fio_cpu_clear(&o->cpumask, o->gtod_cpu); | |
2e1df07d JA |
1295 | |
1296 | /* | |
1297 | * Set affinity first, in case it has an impact on the memory | |
1298 | * allocations. | |
1299 | */ | |
4896473e | 1300 | if (o->cpumask_set) { |
c2acfbac | 1301 | if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) { |
30cb4c65 | 1302 | ret = fio_cpus_split(&o->cpumask, td->thread_number - 1); |
c2acfbac JA |
1303 | if (!ret) { |
1304 | log_err("fio: no CPUs set\n"); | |
1305 | log_err("fio: Try increasing number of available CPUs\n"); | |
1306 | td_verror(td, EINVAL, "cpus_split"); | |
1307 | goto err; | |
1308 | } | |
1309 | } | |
28727df7 JA |
1310 | ret = fio_setaffinity(td->pid, o->cpumask); |
1311 | if (ret == -1) { | |
4896473e JA |
1312 | td_verror(td, errno, "cpu_set_affinity"); |
1313 | goto err; | |
1314 | } | |
2e1df07d JA |
1315 | } |
1316 | ||
67bf9823 | 1317 | #ifdef CONFIG_LIBNUMA |
d0b937ed | 1318 | /* numa node setup */ |
4896473e | 1319 | if (o->numa_cpumask_set || o->numa_memmask_set) { |
d0b937ed YR |
1320 | int ret; |
1321 | ||
1322 | if (numa_available() < 0) { | |
1323 | td_verror(td, errno, "Does not support NUMA API\n"); | |
1324 | goto err; | |
1325 | } | |
1326 | ||
4896473e JA |
1327 | if (o->numa_cpumask_set) { |
1328 | ret = numa_run_on_node_mask(o->numa_cpunodesmask); | |
d0b937ed YR |
1329 | if (ret == -1) { |
1330 | td_verror(td, errno, \ | |
1331 | "numa_run_on_node_mask failed\n"); | |
1332 | goto err; | |
1333 | } | |
1334 | } | |
1335 | ||
4896473e | 1336 | if (o->numa_memmask_set) { |
d0b937ed | 1337 | |
4896473e | 1338 | switch (o->numa_mem_mode) { |
d0b937ed | 1339 | case MPOL_INTERLEAVE: |
4896473e | 1340 | numa_set_interleave_mask(o->numa_memnodesmask); |
d0b937ed YR |
1341 | break; |
1342 | case MPOL_BIND: | |
4896473e | 1343 | numa_set_membind(o->numa_memnodesmask); |
d0b937ed YR |
1344 | break; |
1345 | case MPOL_LOCAL: | |
1346 | numa_set_localalloc(); | |
1347 | break; | |
1348 | case MPOL_PREFERRED: | |
4896473e | 1349 | numa_set_preferred(o->numa_mem_prefer_node); |
d0b937ed YR |
1350 | break; |
1351 | case MPOL_DEFAULT: | |
1352 | default: | |
1353 | break; | |
1354 | } | |
1355 | ||
1356 | } | |
1357 | } | |
1358 | #endif | |
1359 | ||
9a3f1100 JA |
1360 | if (fio_pin_memory(td)) |
1361 | goto err; | |
1362 | ||
2e1df07d JA |
1363 | /* |
1364 | * May alter parameters that init_io_u() will use, so we need to | |
1365 | * do this first. | |
1366 | */ | |
1367 | if (init_iolog(td)) | |
1368 | goto err; | |
1369 | ||
1370 | if (init_io_u(td)) | |
1371 | goto err; | |
1372 | ||
4896473e | 1373 | if (o->verify_async && verify_async_init(td)) |
2e1df07d JA |
1374 | goto err; |
1375 | ||
28727df7 JA |
1376 | if (o->ioprio) { |
1377 | ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio); | |
1378 | if (ret == -1) { | |
2e1df07d JA |
1379 | td_verror(td, errno, "ioprio_set"); |
1380 | goto err; | |
1381 | } | |
1382 | } | |
1383 | ||
4896473e | 1384 | if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt)) |
2e1df07d JA |
1385 | goto err; |
1386 | ||
649c10c5 | 1387 | errno = 0; |
4896473e | 1388 | if (nice(o->nice) == -1 && errno != 0) { |
2e1df07d JA |
1389 | td_verror(td, errno, "nice"); |
1390 | goto err; | |
1391 | } | |
1392 | ||
4896473e | 1393 | if (o->ioscheduler && switch_ioscheduler(td)) |
2e1df07d JA |
1394 | goto err; |
1395 | ||
4896473e | 1396 | if (!o->create_serialize && setup_files(td)) |
2e1df07d JA |
1397 | goto err; |
1398 | ||
1399 | if (td_io_init(td)) | |
1400 | goto err; | |
1401 | ||
1402 | if (init_random_map(td)) | |
1403 | goto err; | |
1404 | ||
ce486495 | 1405 | if (o->exec_prerun && exec_string(o, o->exec_prerun, (const char *)"prerun")) |
4896473e | 1406 | goto err; |
2e1df07d | 1407 | |
4896473e | 1408 | if (o->pre_read) { |
2e1df07d JA |
1409 | if (pre_read_files(td) < 0) |
1410 | goto err; | |
1411 | } | |
1412 | ||
dc5bfbb2 JA |
1413 | fio_verify_init(td); |
1414 | ||
2e1df07d | 1415 | fio_gettime(&td->epoch, NULL); |
44404c5a | 1416 | fio_getrusage(&td->ru_start); |
2e1df07d JA |
1417 | clear_state = 0; |
1418 | while (keep_running(td)) { | |
100f49f1 JA |
1419 | uint64_t verify_bytes; |
1420 | ||
2e1df07d JA |
1421 | fio_gettime(&td->start, NULL); |
1422 | memcpy(&td->bw_sample_time, &td->start, sizeof(td->start)); | |
1423 | memcpy(&td->iops_sample_time, &td->start, sizeof(td->start)); | |
1424 | memcpy(&td->tv_cache, &td->start, sizeof(td->start)); | |
1425 | ||
4896473e JA |
1426 | if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] || |
1427 | o->ratemin[DDIR_TRIM]) { | |
6eaf09d6 | 1428 | memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time, |
2e1df07d | 1429 | sizeof(td->bw_sample_time)); |
6eaf09d6 SL |
1430 | memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time, |
1431 | sizeof(td->bw_sample_time)); | |
1432 | memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time, | |
2e1df07d JA |
1433 | sizeof(td->bw_sample_time)); |
1434 | } | |
1435 | ||
1436 | if (clear_state) | |
1437 | clear_io_state(td); | |
1438 | ||
1439 | prune_io_piece_log(td); | |
1440 | ||
62167762 JC |
1441 | if (td->o.verify_only && (td_write(td) || td_rw(td))) |
1442 | verify_bytes = do_dry_run(td); | |
1443 | else | |
1444 | verify_bytes = do_io(td); | |
2e1df07d JA |
1445 | |
1446 | clear_state = 1; | |
1447 | ||
1448 | if (td_read(td) && td->io_bytes[DDIR_READ]) { | |
1449 | elapsed = utime_since_now(&td->start); | |
1450 | td->ts.runtime[DDIR_READ] += elapsed; | |
1451 | } | |
1452 | if (td_write(td) && td->io_bytes[DDIR_WRITE]) { | |
1453 | elapsed = utime_since_now(&td->start); | |
1454 | td->ts.runtime[DDIR_WRITE] += elapsed; | |
1455 | } | |
6eaf09d6 SL |
1456 | if (td_trim(td) && td->io_bytes[DDIR_TRIM]) { |
1457 | elapsed = utime_since_now(&td->start); | |
1458 | td->ts.runtime[DDIR_TRIM] += elapsed; | |
1459 | } | |
2e1df07d JA |
1460 | |
1461 | if (td->error || td->terminate) | |
1462 | break; | |
1463 | ||
4896473e JA |
1464 | if (!o->do_verify || |
1465 | o->verify == VERIFY_NONE || | |
2e1df07d JA |
1466 | (td->io_ops->flags & FIO_UNIDIR)) |
1467 | continue; | |
1468 | ||
1469 | clear_io_state(td); | |
1470 | ||
1471 | fio_gettime(&td->start, NULL); | |
1472 | ||
100f49f1 | 1473 | do_verify(td, verify_bytes); |
2e1df07d JA |
1474 | |
1475 | td->ts.runtime[DDIR_READ] += utime_since_now(&td->start); | |
1476 | ||
1477 | if (td->error || td->terminate) | |
1478 | break; | |
1479 | } | |
1480 | ||
1481 | update_rusage_stat(td); | |
6eaf09d6 SL |
1482 | td->ts.runtime[DDIR_READ] = (td->ts.runtime[DDIR_READ] + 999) / 1000; |
1483 | td->ts.runtime[DDIR_WRITE] = (td->ts.runtime[DDIR_WRITE] + 999) / 1000; | |
1484 | td->ts.runtime[DDIR_TRIM] = (td->ts.runtime[DDIR_TRIM] + 999) / 1000; | |
2e1df07d | 1485 | td->ts.total_run_time = mtime_since_now(&td->epoch); |
6eaf09d6 SL |
1486 | td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; |
1487 | td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; | |
1488 | td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; | |
2e1df07d | 1489 | |
9a3f1100 JA |
1490 | fio_unpin_memory(td); |
1491 | ||
905e3d4f | 1492 | fio_writeout_logs(td); |
2e1df07d | 1493 | |
4896473e | 1494 | if (o->exec_postrun) |
ce486495 | 1495 | exec_string(o, o->exec_postrun, (const char *)"postrun"); |
2e1df07d JA |
1496 | |
1497 | if (exitall_on_terminate) | |
1498 | fio_terminate_threads(td->groupid); | |
1499 | ||
1500 | err: | |
1501 | if (td->error) | |
1502 | log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error, | |
1503 | td->verror); | |
1504 | ||
4896473e | 1505 | if (o->verify_async) |
2e1df07d JA |
1506 | verify_async_exit(td); |
1507 | ||
1508 | close_and_free_files(td); | |
2e1df07d | 1509 | cleanup_io_u(td); |
32dbca2c | 1510 | close_ioengine(td); |
2e1df07d JA |
1511 | cgroup_shutdown(td, &cgroup_mnt); |
1512 | ||
4896473e JA |
1513 | if (o->cpumask_set) { |
1514 | int ret = fio_cpuset_exit(&o->cpumask); | |
2e1df07d JA |
1515 | |
1516 | td_verror(td, ret, "fio_cpuset_exit"); | |
1517 | } | |
1518 | ||
1519 | /* | |
1520 | * do this very late, it will log file closing as well | |
1521 | */ | |
4896473e | 1522 | if (o->write_iolog_file) |
2e1df07d JA |
1523 | write_iolog_close(td); |
1524 | ||
c97f1ad6 JA |
1525 | fio_mutex_remove(td->rusage_sem); |
1526 | td->rusage_sem = NULL; | |
1527 | ||
ea66e04f JA |
1528 | fio_mutex_remove(td->mutex); |
1529 | td->mutex = NULL; | |
1530 | ||
2e1df07d | 1531 | td_set_runstate(td, TD_EXITED); |
e43606c2 | 1532 | return (void *) (uintptr_t) td->error; |
2e1df07d JA |
1533 | } |
1534 | ||
1535 | ||
1536 | /* | |
1537 | * We cannot pass the td data into a forked process, so attach the td and | |
1538 | * pass it to the thread worker. | |
1539 | */ | |
1540 | static int fork_main(int shmid, int offset) | |
1541 | { | |
1542 | struct thread_data *td; | |
1543 | void *data, *ret; | |
1544 | ||
1545 | #ifndef __hpux | |
1546 | data = shmat(shmid, NULL, 0); | |
1547 | if (data == (void *) -1) { | |
1548 | int __err = errno; | |
1549 | ||
1550 | perror("shmat"); | |
1551 | return __err; | |
1552 | } | |
1553 | #else | |
1554 | /* | |
1555 | * HP-UX inherits shm mappings? | |
1556 | */ | |
1557 | data = threads; | |
1558 | #endif | |
1559 | ||
1560 | td = data + offset * sizeof(struct thread_data); | |
1561 | ret = thread_main(td); | |
1562 | shmdt(data); | |
e43606c2 | 1563 | return (int) (uintptr_t) ret; |
2e1df07d JA |
1564 | } |
1565 | ||
1566 | /* | |
1567 | * Run over the job map and reap the threads that have exited, if any. | |
1568 | */ | |
1569 | static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, | |
1570 | unsigned int *m_rate) | |
1571 | { | |
1572 | struct thread_data *td; | |
1573 | unsigned int cputhreads, realthreads, pending; | |
1574 | int i, status, ret; | |
1575 | ||
1576 | /* | |
1577 | * reap exited threads (TD_EXITED -> TD_REAPED) | |
1578 | */ | |
1579 | realthreads = pending = cputhreads = 0; | |
1580 | for_each_td(td, i) { | |
1581 | int flags = 0; | |
1582 | ||
1583 | /* | |
1584 | * ->io_ops is NULL for a thread that has closed its | |
1585 | * io engine | |
1586 | */ | |
1587 | if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) | |
1588 | cputhreads++; | |
1589 | else | |
1590 | realthreads++; | |
1591 | ||
1592 | if (!td->pid) { | |
1593 | pending++; | |
1594 | continue; | |
1595 | } | |
1596 | if (td->runstate == TD_REAPED) | |
1597 | continue; | |
1598 | if (td->o.use_thread) { | |
1599 | if (td->runstate == TD_EXITED) { | |
1600 | td_set_runstate(td, TD_REAPED); | |
1601 | goto reaped; | |
1602 | } | |
1603 | continue; | |
1604 | } | |
1605 | ||
1606 | flags = WNOHANG; | |
1607 | if (td->runstate == TD_EXITED) | |
1608 | flags = 0; | |
1609 | ||
1610 | /* | |
1611 | * check if someone quit or got killed in an unusual way | |
1612 | */ | |
1613 | ret = waitpid(td->pid, &status, flags); | |
1614 | if (ret < 0) { | |
1615 | if (errno == ECHILD) { | |
1616 | log_err("fio: pid=%d disappeared %d\n", | |
1617 | (int) td->pid, td->runstate); | |
a5e371a6 | 1618 | td->sig = ECHILD; |
2e1df07d JA |
1619 | td_set_runstate(td, TD_REAPED); |
1620 | goto reaped; | |
1621 | } | |
1622 | perror("waitpid"); | |
1623 | } else if (ret == td->pid) { | |
1624 | if (WIFSIGNALED(status)) { | |
1625 | int sig = WTERMSIG(status); | |
1626 | ||
36d80bc7 | 1627 | if (sig != SIGTERM && sig != SIGUSR2) |
2e1df07d JA |
1628 | log_err("fio: pid=%d, got signal=%d\n", |
1629 | (int) td->pid, sig); | |
a5e371a6 | 1630 | td->sig = sig; |
2e1df07d JA |
1631 | td_set_runstate(td, TD_REAPED); |
1632 | goto reaped; | |
1633 | } | |
1634 | if (WIFEXITED(status)) { | |
1635 | if (WEXITSTATUS(status) && !td->error) | |
1636 | td->error = WEXITSTATUS(status); | |
1637 | ||
1638 | td_set_runstate(td, TD_REAPED); | |
1639 | goto reaped; | |
1640 | } | |
1641 | } | |
1642 | ||
1643 | /* | |
1644 | * thread is not dead, continue | |
1645 | */ | |
1646 | pending++; | |
1647 | continue; | |
1648 | reaped: | |
1649 | (*nr_running)--; | |
342f4be4 JA |
1650 | (*m_rate) -= ddir_rw_sum(td->o.ratemin); |
1651 | (*t_rate) -= ddir_rw_sum(td->o.rate); | |
2e1df07d JA |
1652 | if (!td->pid) |
1653 | pending--; | |
1654 | ||
1655 | if (td->error) | |
1656 | exit_value++; | |
1657 | ||
1658 | done_secs += mtime_since_now(&td->epoch) / 1000; | |
4a88752a | 1659 | profile_td_exit(td); |
2e1df07d JA |
1660 | } |
1661 | ||
1662 | if (*nr_running == cputhreads && !pending && realthreads) | |
1663 | fio_terminate_threads(TERMINATE_ALL); | |
1664 | } | |
1665 | ||
06464907 JA |
1666 | static void do_usleep(unsigned int usecs) |
1667 | { | |
1668 | check_for_running_stats(); | |
1669 | usleep(usecs); | |
1670 | } | |
1671 | ||
2e1df07d JA |
1672 | /* |
1673 | * Main function for kicking off and reaping jobs, as needed. | |
1674 | */ | |
1675 | static void run_threads(void) | |
1676 | { | |
1677 | struct thread_data *td; | |
2e1df07d | 1678 | unsigned int i, todo, nr_running, m_rate, t_rate, nr_started; |
0de5b26f | 1679 | uint64_t spent; |
2e1df07d | 1680 | |
2e1df07d JA |
1681 | if (fio_gtod_offload && fio_start_gtod_thread()) |
1682 | return; | |
334185e9 | 1683 | |
f2a2ce0e | 1684 | fio_idle_prof_init(); |
2e1df07d JA |
1685 | |
1686 | set_sig_handlers(); | |
1687 | ||
3a5f6bde JA |
1688 | nr_thread = nr_process = 0; |
1689 | for_each_td(td, i) { | |
1690 | if (td->o.use_thread) | |
1691 | nr_thread++; | |
1692 | else | |
1693 | nr_process++; | |
1694 | } | |
1695 | ||
f3afa57e | 1696 | if (output_format == FIO_OUTPUT_NORMAL) { |
2e1df07d JA |
1697 | log_info("Starting "); |
1698 | if (nr_thread) | |
1699 | log_info("%d thread%s", nr_thread, | |
1700 | nr_thread > 1 ? "s" : ""); | |
1701 | if (nr_process) { | |
1702 | if (nr_thread) | |
1703 | log_info(" and "); | |
1704 | log_info("%d process%s", nr_process, | |
1705 | nr_process > 1 ? "es" : ""); | |
1706 | } | |
1707 | log_info("\n"); | |
1708 | fflush(stdout); | |
1709 | } | |
1710 | ||
1711 | todo = thread_number; | |
1712 | nr_running = 0; | |
1713 | nr_started = 0; | |
1714 | m_rate = t_rate = 0; | |
1715 | ||
1716 | for_each_td(td, i) { | |
1717 | print_status_init(td->thread_number - 1); | |
1718 | ||
1719 | if (!td->o.create_serialize) | |
1720 | continue; | |
1721 | ||
1722 | /* | |
1723 | * do file setup here so it happens sequentially, | |
1724 | * we don't want X number of threads getting their | |
1725 | * client data interspersed on disk | |
1726 | */ | |
1727 | if (setup_files(td)) { | |
1728 | exit_value++; | |
1729 | if (td->error) | |
1730 | log_err("fio: pid=%d, err=%d/%s\n", | |
1731 | (int) td->pid, td->error, td->verror); | |
1732 | td_set_runstate(td, TD_REAPED); | |
1733 | todo--; | |
1734 | } else { | |
1735 | struct fio_file *f; | |
1736 | unsigned int j; | |
1737 | ||
1738 | /* | |
1739 | * for sharing to work, each job must always open | |
1740 | * its own files. so close them, if we opened them | |
1741 | * for creation | |
1742 | */ | |
1743 | for_each_file(td, f, j) { | |
1744 | if (fio_file_open(f)) | |
1745 | td_io_close_file(td, f); | |
1746 | } | |
1747 | } | |
1748 | } | |
1749 | ||
f2a2ce0e HL |
1750 | /* start idle threads before io threads start to run */ |
1751 | fio_idle_prof_start(); | |
1752 | ||
2e1df07d JA |
1753 | set_genesis_time(); |
1754 | ||
1755 | while (todo) { | |
1756 | struct thread_data *map[REAL_MAX_JOBS]; | |
1757 | struct timeval this_start; | |
1758 | int this_jobs = 0, left; | |
1759 | ||
1760 | /* | |
1761 | * create threads (TD_NOT_CREATED -> TD_CREATED) | |
1762 | */ | |
1763 | for_each_td(td, i) { | |
1764 | if (td->runstate != TD_NOT_CREATED) | |
1765 | continue; | |
1766 | ||
1767 | /* | |
1768 | * never got a chance to start, killed by other | |
1769 | * thread for some reason | |
1770 | */ | |
1771 | if (td->terminate) { | |
1772 | todo--; | |
1773 | continue; | |
1774 | } | |
1775 | ||
1776 | if (td->o.start_delay) { | |
0de5b26f | 1777 | spent = utime_since_genesis(); |
2e1df07d | 1778 | |
74454ce4 | 1779 | if (td->o.start_delay > spent) |
2e1df07d JA |
1780 | continue; |
1781 | } | |
1782 | ||
1783 | if (td->o.stonewall && (nr_started || nr_running)) { | |
1784 | dprint(FD_PROCESS, "%s: stonewall wait\n", | |
1785 | td->o.name); | |
1786 | break; | |
1787 | } | |
1788 | ||
1789 | init_disk_util(td); | |
1790 | ||
c97f1ad6 JA |
1791 | td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED); |
1792 | td->update_rusage = 0; | |
1793 | ||
2e1df07d JA |
1794 | /* |
1795 | * Set state to created. Thread will transition | |
1796 | * to TD_INITIALIZED when it's done setting up. | |
1797 | */ | |
1798 | td_set_runstate(td, TD_CREATED); | |
1799 | map[this_jobs++] = td; | |
1800 | nr_started++; | |
1801 | ||
1802 | if (td->o.use_thread) { | |
1803 | int ret; | |
1804 | ||
1805 | dprint(FD_PROCESS, "will pthread_create\n"); | |
1806 | ret = pthread_create(&td->thread, NULL, | |
1807 | thread_main, td); | |
1808 | if (ret) { | |
1809 | log_err("pthread_create: %s\n", | |
1810 | strerror(ret)); | |
1811 | nr_started--; | |
1812 | break; | |
1813 | } | |
1814 | ret = pthread_detach(td->thread); | |
1815 | if (ret) | |
1816 | log_err("pthread_detach: %s", | |
1817 | strerror(ret)); | |
1818 | } else { | |
1819 | pid_t pid; | |
1820 | dprint(FD_PROCESS, "will fork\n"); | |
1821 | pid = fork(); | |
1822 | if (!pid) { | |
1823 | int ret = fork_main(shm_id, i); | |
1824 | ||
1825 | _exit(ret); | |
1826 | } else if (i == fio_debug_jobno) | |
1827 | *fio_debug_jobp = pid; | |
1828 | } | |
1829 | dprint(FD_MUTEX, "wait on startup_mutex\n"); | |
1830 | if (fio_mutex_down_timeout(startup_mutex, 10)) { | |
1831 | log_err("fio: job startup hung? exiting.\n"); | |
1832 | fio_terminate_threads(TERMINATE_ALL); | |
1833 | fio_abort = 1; | |
1834 | nr_started--; | |
1835 | break; | |
1836 | } | |
1837 | dprint(FD_MUTEX, "done waiting on startup_mutex\n"); | |
1838 | } | |
1839 | ||
1840 | /* | |
1841 | * Wait for the started threads to transition to | |
1842 | * TD_INITIALIZED. | |
1843 | */ | |
1844 | fio_gettime(&this_start, NULL); | |
1845 | left = this_jobs; | |
1846 | while (left && !fio_abort) { | |
1847 | if (mtime_since_now(&this_start) > JOB_START_TIMEOUT) | |
1848 | break; | |
1849 | ||
06464907 | 1850 | do_usleep(100000); |
2e1df07d JA |
1851 | |
1852 | for (i = 0; i < this_jobs; i++) { | |
1853 | td = map[i]; | |
1854 | if (!td) | |
1855 | continue; | |
1856 | if (td->runstate == TD_INITIALIZED) { | |
1857 | map[i] = NULL; | |
1858 | left--; | |
1859 | } else if (td->runstate >= TD_EXITED) { | |
1860 | map[i] = NULL; | |
1861 | left--; | |
1862 | todo--; | |
1863 | nr_running++; /* work-around... */ | |
1864 | } | |
1865 | } | |
1866 | } | |
1867 | ||
1868 | if (left) { | |
4e87c37a JA |
1869 | log_err("fio: %d job%s failed to start\n", left, |
1870 | left > 1 ? "s" : ""); | |
2e1df07d JA |
1871 | for (i = 0; i < this_jobs; i++) { |
1872 | td = map[i]; | |
1873 | if (!td) | |
1874 | continue; | |
1875 | kill(td->pid, SIGTERM); | |
1876 | } | |
1877 | break; | |
1878 | } | |
1879 | ||
1880 | /* | |
1881 | * start created threads (TD_INITIALIZED -> TD_RUNNING). | |
1882 | */ | |
1883 | for_each_td(td, i) { | |
1884 | if (td->runstate != TD_INITIALIZED) | |
1885 | continue; | |
1886 | ||
1887 | if (in_ramp_time(td)) | |
1888 | td_set_runstate(td, TD_RAMP); | |
1889 | else | |
1890 | td_set_runstate(td, TD_RUNNING); | |
1891 | nr_running++; | |
1892 | nr_started--; | |
342f4be4 JA |
1893 | m_rate += ddir_rw_sum(td->o.ratemin); |
1894 | t_rate += ddir_rw_sum(td->o.rate); | |
2e1df07d JA |
1895 | todo--; |
1896 | fio_mutex_up(td->mutex); | |
1897 | } | |
1898 | ||
1899 | reap_threads(&nr_running, &t_rate, &m_rate); | |
1900 | ||
122c7725 | 1901 | if (todo) |
06464907 | 1902 | do_usleep(100000); |
2e1df07d JA |
1903 | } |
1904 | ||
1905 | while (nr_running) { | |
1906 | reap_threads(&nr_running, &t_rate, &m_rate); | |
06464907 | 1907 | do_usleep(10000); |
2e1df07d JA |
1908 | } |
1909 | ||
f2a2ce0e HL |
1910 | fio_idle_prof_stop(); |
1911 | ||
2e1df07d | 1912 | update_io_ticks(); |
2e1df07d JA |
1913 | } |
1914 | ||
9ec7779f JA |
1915 | void wait_for_disk_thread_exit(void) |
1916 | { | |
1917 | fio_mutex_down(disk_thread_mutex); | |
1918 | } | |
1919 | ||
27357187 JA |
1920 | static void free_disk_util(void) |
1921 | { | |
1922 | disk_util_start_exit(); | |
1923 | wait_for_disk_thread_exit(); | |
1924 | disk_util_prune_entries(); | |
1925 | } | |
1926 | ||
2e1df07d JA |
1927 | static void *disk_thread_main(void *data) |
1928 | { | |
9ec7779f JA |
1929 | int ret = 0; |
1930 | ||
2e1df07d JA |
1931 | fio_mutex_up(startup_mutex); |
1932 | ||
9ec7779f | 1933 | while (threads && !ret) { |
2e1df07d JA |
1934 | usleep(DISK_UTIL_MSEC * 1000); |
1935 | if (!threads) | |
1936 | break; | |
9ec7779f | 1937 | ret = update_io_ticks(); |
2e1df07d JA |
1938 | |
1939 | if (!is_backend) | |
1940 | print_thread_status(); | |
1941 | } | |
1942 | ||
9ec7779f | 1943 | fio_mutex_up(disk_thread_mutex); |
2e1df07d JA |
1944 | return NULL; |
1945 | } | |
1946 | ||
1947 | static int create_disk_util_thread(void) | |
1948 | { | |
1949 | int ret; | |
1950 | ||
9ec7779f JA |
1951 | setup_disk_util(); |
1952 | ||
521da527 | 1953 | disk_thread_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); |
9ec7779f | 1954 | |
2e1df07d JA |
1955 | ret = pthread_create(&disk_util_thread, NULL, disk_thread_main, NULL); |
1956 | if (ret) { | |
9ec7779f | 1957 | fio_mutex_remove(disk_thread_mutex); |
2e1df07d JA |
1958 | log_err("Can't create disk util thread: %s\n", strerror(ret)); |
1959 | return 1; | |
1960 | } | |
1961 | ||
1962 | ret = pthread_detach(disk_util_thread); | |
1963 | if (ret) { | |
9ec7779f | 1964 | fio_mutex_remove(disk_thread_mutex); |
2e1df07d JA |
1965 | log_err("Can't detatch disk util thread: %s\n", strerror(ret)); |
1966 | return 1; | |
1967 | } | |
1968 | ||
1969 | dprint(FD_MUTEX, "wait on startup_mutex\n"); | |
1970 | fio_mutex_down(startup_mutex); | |
1971 | dprint(FD_MUTEX, "done waiting on startup_mutex\n"); | |
1972 | return 0; | |
1973 | } | |
1974 | ||
2e1df07d JA |
1975 | int fio_backend(void) |
1976 | { | |
1977 | struct thread_data *td; | |
1978 | int i; | |
1979 | ||
1980 | if (exec_profile) { | |
1981 | if (load_profile(exec_profile)) | |
1982 | return 1; | |
1983 | free(exec_profile); | |
1984 | exec_profile = NULL; | |
1985 | } | |
1986 | if (!thread_number) | |
1987 | return 0; | |
1988 | ||
1989 | if (write_bw_log) { | |
5a812f9b JA |
1990 | setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW); |
1991 | setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW); | |
1992 | setup_log(&agg_io_log[DDIR_TRIM], 0, IO_LOG_TYPE_BW); | |
2e1df07d JA |
1993 | } |
1994 | ||
521da527 | 1995 | startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); |
2e1df07d JA |
1996 | if (startup_mutex == NULL) |
1997 | return 1; | |
2e1df07d JA |
1998 | |
1999 | set_genesis_time(); | |
cef9175e | 2000 | stat_init(); |
2e1df07d JA |
2001 | create_disk_util_thread(); |
2002 | ||
2003 | cgroup_list = smalloc(sizeof(*cgroup_list)); | |
2004 | INIT_FLIST_HEAD(cgroup_list); | |
2005 | ||
2006 | run_threads(); | |
2007 | ||
2008 | if (!fio_abort) { | |
2009 | show_run_stats(); | |
2010 | if (write_bw_log) { | |
2011 | __finish_log(agg_io_log[DDIR_READ], "agg-read_bw.log"); | |
2012 | __finish_log(agg_io_log[DDIR_WRITE], | |
2013 | "agg-write_bw.log"); | |
6eaf09d6 SL |
2014 | __finish_log(agg_io_log[DDIR_TRIM], |
2015 | "agg-write_bw.log"); | |
2e1df07d JA |
2016 | } |
2017 | } | |
2018 | ||
2019 | for_each_td(td, i) | |
2020 | fio_options_free(td); | |
2021 | ||
a462baef | 2022 | free_disk_util(); |
2e1df07d JA |
2023 | cgroup_kill(cgroup_list); |
2024 | sfree(cgroup_list); | |
2025 | sfree(cgroup_mnt); | |
2026 | ||
2027 | fio_mutex_remove(startup_mutex); | |
9ec7779f | 2028 | fio_mutex_remove(disk_thread_mutex); |
cef9175e | 2029 | stat_exit(); |
2e1df07d JA |
2030 | return exit_value; |
2031 | } |