io_u: rate cleanup and spelling error
[fio.git] / backend.c
1 /*
2  * fio - the flexible io tester
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  * Copyright (C) 2006-2012 Jens Axboe <axboe@kernel.dk>
6  *
7  * The license below covers all files distributed with fio unless otherwise
8  * noted in the file itself.
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to the Free Software
21  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22  *
23  */
24 #include <unistd.h>
25 #include <fcntl.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <signal.h>
29 #include <time.h>
30 #include <locale.h>
31 #include <assert.h>
32 #include <time.h>
33 #include <inttypes.h>
34 #include <sys/stat.h>
35 #include <sys/wait.h>
36 #include <sys/ipc.h>
37 #include <sys/mman.h>
38 #include <math.h>
39
40 #include "fio.h"
41 #ifndef FIO_NO_HAVE_SHM_H
42 #include <sys/shm.h>
43 #endif
44 #include "hash.h"
45 #include "smalloc.h"
46 #include "verify.h"
47 #include "trim.h"
48 #include "diskutil.h"
49 #include "cgroup.h"
50 #include "profile.h"
51 #include "lib/rand.h"
52 #include "lib/memalign.h"
53 #include "server.h"
54 #include "lib/getrusage.h"
55 #include "idletime.h"
56 #include "err.h"
57 #include "workqueue.h"
58 #include "lib/mountcheck.h"
59 #include "rate-submit.h"
60 #include "helper_thread.h"
61
62 static struct fio_mutex *startup_mutex;
63 static struct flist_head *cgroup_list;
64 static char *cgroup_mnt;
65 static int exit_value;
66 static volatile int fio_abort;
67 static unsigned int nr_process = 0;
68 static unsigned int nr_thread = 0;
69
70 struct io_log *agg_io_log[DDIR_RWDIR_CNT];
71
72 int groupid = 0;
73 unsigned int thread_number = 0;
74 unsigned int stat_number = 0;
75 int shm_id = 0;
76 int temp_stall_ts;
77 unsigned long done_secs = 0;
78
79 #define JOB_START_TIMEOUT       (5 * 1000)
80
81 static void sig_int(int sig)
82 {
83         if (threads) {
84                 if (is_backend)
85                         fio_server_got_signal(sig);
86                 else {
87                         log_info("\nfio: terminating on signal %d\n", sig);
88                         log_info_flush();
89                         exit_value = 128;
90                 }
91
92                 fio_terminate_threads(TERMINATE_ALL);
93         }
94 }
95
96 void sig_show_status(int sig)
97 {
98         show_running_run_stats();
99 }
100
101 static void set_sig_handlers(void)
102 {
103         struct sigaction act;
104
105         memset(&act, 0, sizeof(act));
106         act.sa_handler = sig_int;
107         act.sa_flags = SA_RESTART;
108         sigaction(SIGINT, &act, NULL);
109
110         memset(&act, 0, sizeof(act));
111         act.sa_handler = sig_int;
112         act.sa_flags = SA_RESTART;
113         sigaction(SIGTERM, &act, NULL);
114
115 /* Windows uses SIGBREAK as a quit signal from other applications */
116 #ifdef WIN32
117         memset(&act, 0, sizeof(act));
118         act.sa_handler = sig_int;
119         act.sa_flags = SA_RESTART;
120         sigaction(SIGBREAK, &act, NULL);
121 #endif
122
123         memset(&act, 0, sizeof(act));
124         act.sa_handler = sig_show_status;
125         act.sa_flags = SA_RESTART;
126         sigaction(SIGUSR1, &act, NULL);
127
128         if (is_backend) {
129                 memset(&act, 0, sizeof(act));
130                 act.sa_handler = sig_int;
131                 act.sa_flags = SA_RESTART;
132                 sigaction(SIGPIPE, &act, NULL);
133         }
134 }
135
136 /*
137  * Check if we are above the minimum rate given.
138  */
139 static bool __check_min_rate(struct thread_data *td, struct timespec *now,
140                              enum fio_ddir ddir)
141 {
142         unsigned long long bytes = 0;
143         unsigned long iops = 0;
144         unsigned long spent;
145         unsigned long rate;
146         unsigned int ratemin = 0;
147         unsigned int rate_iops = 0;
148         unsigned int rate_iops_min = 0;
149
150         assert(ddir_rw(ddir));
151
152         if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir])
153                 return false;
154
155         /*
156          * allow a 2 second settle period in the beginning
157          */
158         if (mtime_since(&td->start, now) < 2000)
159                 return false;
160
161         iops += td->this_io_blocks[ddir];
162         bytes += td->this_io_bytes[ddir];
163         ratemin += td->o.ratemin[ddir];
164         rate_iops += td->o.rate_iops[ddir];
165         rate_iops_min += td->o.rate_iops_min[ddir];
166
167         /*
168          * if rate blocks is set, sample is running
169          */
170         if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) {
171                 spent = mtime_since(&td->lastrate[ddir], now);
172                 if (spent < td->o.ratecycle)
173                         return false;
174
175                 if (td->o.rate[ddir] || td->o.ratemin[ddir]) {
176                         /*
177                          * check bandwidth specified rate
178                          */
179                         if (bytes < td->rate_bytes[ddir]) {
180                                 log_err("%s: rate_min=%uB/s not met, only transferred %lluB\n",
181                                         td->o.name, ratemin, bytes);
182                                 return true;
183                         } else {
184                                 if (spent)
185                                         rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
186                                 else
187                                         rate = 0;
188
189                                 if (rate < ratemin ||
190                                     bytes < td->rate_bytes[ddir]) {
191                                         log_err("%s: rate_min=%uB/s not met, got %luB/s\n",
192                                                 td->o.name, ratemin, rate);
193                                         return true;
194                                 }
195                         }
196                 } else {
197                         /*
198                          * checks iops specified rate
199                          */
200                         if (iops < rate_iops) {
201                                 log_err("%s: rate_iops_min=%u not met, only performed %lu IOs\n",
202                                                 td->o.name, rate_iops, iops);
203                                 return true;
204                         } else {
205                                 if (spent)
206                                         rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
207                                 else
208                                         rate = 0;
209
210                                 if (rate < rate_iops_min ||
211                                     iops < td->rate_blocks[ddir]) {
212                                         log_err("%s: rate_iops_min=%u not met, got %lu IOPS\n",
213                                                 td->o.name, rate_iops_min, rate);
214                                         return true;
215                                 }
216                         }
217                 }
218         }
219
220         td->rate_bytes[ddir] = bytes;
221         td->rate_blocks[ddir] = iops;
222         memcpy(&td->lastrate[ddir], now, sizeof(*now));
223         return false;
224 }
225
226 static bool check_min_rate(struct thread_data *td, struct timespec *now)
227 {
228         bool ret = false;
229
230         if (td->bytes_done[DDIR_READ])
231                 ret |= __check_min_rate(td, now, DDIR_READ);
232         if (td->bytes_done[DDIR_WRITE])
233                 ret |= __check_min_rate(td, now, DDIR_WRITE);
234         if (td->bytes_done[DDIR_TRIM])
235                 ret |= __check_min_rate(td, now, DDIR_TRIM);
236
237         return ret;
238 }
239
240 /*
241  * When job exits, we can cancel the in-flight IO if we are using async
242  * io. Attempt to do so.
243  */
244 static void cleanup_pending_aio(struct thread_data *td)
245 {
246         int r;
247
248         /*
249          * get immediately available events, if any
250          */
251         r = io_u_queued_complete(td, 0);
252         if (r < 0)
253                 return;
254
255         /*
256          * now cancel remaining active events
257          */
258         if (td->io_ops->cancel) {
259                 struct io_u *io_u;
260                 int i;
261
262                 io_u_qiter(&td->io_u_all, io_u, i) {
263                         if (io_u->flags & IO_U_F_FLIGHT) {
264                                 r = td->io_ops->cancel(td, io_u);
265                                 if (!r)
266                                         put_io_u(td, io_u);
267                         }
268                 }
269         }
270
271         if (td->cur_depth)
272                 r = io_u_queued_complete(td, td->cur_depth);
273 }
274
275 /*
276  * Helper to handle the final sync of a file. Works just like the normal
277  * io path, just does everything sync.
278  */
279 static bool fio_io_sync(struct thread_data *td, struct fio_file *f)
280 {
281         struct io_u *io_u = __get_io_u(td);
282         int ret;
283
284         if (!io_u)
285                 return true;
286
287         io_u->ddir = DDIR_SYNC;
288         io_u->file = f;
289
290         if (td_io_prep(td, io_u)) {
291                 put_io_u(td, io_u);
292                 return true;
293         }
294
295 requeue:
296         ret = td_io_queue(td, io_u);
297         if (ret < 0) {
298                 td_verror(td, io_u->error, "td_io_queue");
299                 put_io_u(td, io_u);
300                 return true;
301         } else if (ret == FIO_Q_QUEUED) {
302                 if (td_io_commit(td))
303                         return true;
304                 if (io_u_queued_complete(td, 1) < 0)
305                         return true;
306         } else if (ret == FIO_Q_COMPLETED) {
307                 if (io_u->error) {
308                         td_verror(td, io_u->error, "td_io_queue");
309                         return true;
310                 }
311
312                 if (io_u_sync_complete(td, io_u) < 0)
313                         return true;
314         } else if (ret == FIO_Q_BUSY) {
315                 if (td_io_commit(td))
316                         return true;
317                 goto requeue;
318         }
319
320         return false;
321 }
322
323 static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
324 {
325         int ret;
326
327         if (fio_file_open(f))
328                 return fio_io_sync(td, f);
329
330         if (td_io_open_file(td, f))
331                 return 1;
332
333         ret = fio_io_sync(td, f);
334         td_io_close_file(td, f);
335         return ret;
336 }
337
338 static inline void __update_ts_cache(struct thread_data *td)
339 {
340         fio_gettime(&td->ts_cache, NULL);
341 }
342
343 static inline void update_ts_cache(struct thread_data *td)
344 {
345         if ((++td->ts_cache_nr & td->ts_cache_mask) == td->ts_cache_mask)
346                 __update_ts_cache(td);
347 }
348
349 static inline bool runtime_exceeded(struct thread_data *td, struct timespec *t)
350 {
351         if (in_ramp_time(td))
352                 return false;
353         if (!td->o.timeout)
354                 return false;
355         if (utime_since(&td->epoch, t) >= td->o.timeout)
356                 return true;
357
358         return false;
359 }
360
361 /*
362  * We need to update the runtime consistently in ms, but keep a running
363  * tally of the current elapsed time in microseconds for sub millisecond
364  * updates.
365  */
366 static inline void update_runtime(struct thread_data *td,
367                                   unsigned long long *elapsed_us,
368                                   const enum fio_ddir ddir)
369 {
370         if (ddir == DDIR_WRITE && td_write(td) && td->o.verify_only)
371                 return;
372
373         td->ts.runtime[ddir] -= (elapsed_us[ddir] + 999) / 1000;
374         elapsed_us[ddir] += utime_since_now(&td->start);
375         td->ts.runtime[ddir] += (elapsed_us[ddir] + 999) / 1000;
376 }
377
378 static bool break_on_this_error(struct thread_data *td, enum fio_ddir ddir,
379                                 int *retptr)
380 {
381         int ret = *retptr;
382
383         if (ret < 0 || td->error) {
384                 int err = td->error;
385                 enum error_type_bit eb;
386
387                 if (ret < 0)
388                         err = -ret;
389
390                 eb = td_error_type(ddir, err);
391                 if (!(td->o.continue_on_error & (1 << eb)))
392                         return true;
393
394                 if (td_non_fatal_error(td, eb, err)) {
395                         /*
396                          * Continue with the I/Os in case of
397                          * a non fatal error.
398                          */
399                         update_error_count(td, err);
400                         td_clear_error(td);
401                         *retptr = 0;
402                         return false;
403                 } else if (td->o.fill_device && err == ENOSPC) {
404                         /*
405                          * We expect to hit this error if
406                          * fill_device option is set.
407                          */
408                         td_clear_error(td);
409                         fio_mark_td_terminate(td);
410                         return true;
411                 } else {
412                         /*
413                          * Stop the I/O in case of a fatal
414                          * error.
415                          */
416                         update_error_count(td, err);
417                         return true;
418                 }
419         }
420
421         return false;
422 }
423
424 static void check_update_rusage(struct thread_data *td)
425 {
426         if (td->update_rusage) {
427                 td->update_rusage = 0;
428                 update_rusage_stat(td);
429                 fio_mutex_up(td->rusage_sem);
430         }
431 }
432
433 static int wait_for_completions(struct thread_data *td, struct timespec *time)
434 {
435         const int full = queue_full(td);
436         int min_evts = 0;
437         int ret;
438
439         if (td->flags & TD_F_REGROW_LOGS)
440                 return io_u_quiesce(td);
441
442         /*
443          * if the queue is full, we MUST reap at least 1 event
444          */
445         min_evts = min(td->o.iodepth_batch_complete_min, td->cur_depth);
446         if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
447                 min_evts = 1;
448
449         if (time && (__should_check_rate(td, DDIR_READ) ||
450             __should_check_rate(td, DDIR_WRITE) ||
451             __should_check_rate(td, DDIR_TRIM)))
452                 fio_gettime(time, NULL);
453
454         do {
455                 ret = io_u_queued_complete(td, min_evts);
456                 if (ret < 0)
457                         break;
458         } while (full && (td->cur_depth > td->o.iodepth_low));
459
460         return ret;
461 }
462
463 int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret,
464                    enum fio_ddir ddir, uint64_t *bytes_issued, int from_verify,
465                    struct timespec *comp_time)
466 {
467         int ret2;
468
469         switch (*ret) {
470         case FIO_Q_COMPLETED:
471                 if (io_u->error) {
472                         *ret = -io_u->error;
473                         clear_io_u(td, io_u);
474                 } else if (io_u->resid) {
475                         int bytes = io_u->xfer_buflen - io_u->resid;
476                         struct fio_file *f = io_u->file;
477
478                         if (bytes_issued)
479                                 *bytes_issued += bytes;
480
481                         if (!from_verify)
482                                 trim_io_piece(td, io_u);
483
484                         /*
485                          * zero read, fail
486                          */
487                         if (!bytes) {
488                                 if (!from_verify)
489                                         unlog_io_piece(td, io_u);
490                                 td_verror(td, EIO, "full resid");
491                                 put_io_u(td, io_u);
492                                 break;
493                         }
494
495                         io_u->xfer_buflen = io_u->resid;
496                         io_u->xfer_buf += bytes;
497                         io_u->offset += bytes;
498
499                         if (ddir_rw(io_u->ddir))
500                                 td->ts.short_io_u[io_u->ddir]++;
501
502                         if (io_u->offset == f->real_file_size)
503                                 goto sync_done;
504
505                         requeue_io_u(td, &io_u);
506                 } else {
507 sync_done:
508                         if (comp_time && (__should_check_rate(td, DDIR_READ) ||
509                             __should_check_rate(td, DDIR_WRITE) ||
510                             __should_check_rate(td, DDIR_TRIM)))
511                                 fio_gettime(comp_time, NULL);
512
513                         *ret = io_u_sync_complete(td, io_u);
514                         if (*ret < 0)
515                                 break;
516                 }
517
518                 if (td->flags & TD_F_REGROW_LOGS)
519                         regrow_logs(td);
520
521                 /*
522                  * when doing I/O (not when verifying),
523                  * check for any errors that are to be ignored
524                  */
525                 if (!from_verify)
526                         break;
527
528                 return 0;
529         case FIO_Q_QUEUED:
530                 /*
531                  * if the engine doesn't have a commit hook,
532                  * the io_u is really queued. if it does have such
533                  * a hook, it has to call io_u_queued() itself.
534                  */
535                 if (td->io_ops->commit == NULL)
536                         io_u_queued(td, io_u);
537                 if (bytes_issued)
538                         *bytes_issued += io_u->xfer_buflen;
539                 break;
540         case FIO_Q_BUSY:
541                 if (!from_verify)
542                         unlog_io_piece(td, io_u);
543                 requeue_io_u(td, &io_u);
544                 ret2 = td_io_commit(td);
545                 if (ret2 < 0)
546                         *ret = ret2;
547                 break;
548         default:
549                 assert(*ret < 0);
550                 td_verror(td, -(*ret), "td_io_queue");
551                 break;
552         }
553
554         if (break_on_this_error(td, ddir, ret))
555                 return 1;
556
557         return 0;
558 }
559
560 static inline bool io_in_polling(struct thread_data *td)
561 {
562         return !td->o.iodepth_batch_complete_min &&
563                    !td->o.iodepth_batch_complete_max;
564 }
565 /*
566  * Unlinks files from thread data fio_file structure
567  */
568 static int unlink_all_files(struct thread_data *td)
569 {
570         struct fio_file *f;
571         unsigned int i;
572         int ret = 0;
573
574         for_each_file(td, f, i) {
575                 if (f->filetype != FIO_TYPE_FILE)
576                         continue;
577                 ret = td_io_unlink_file(td, f);
578                 if (ret)
579                         break;
580         }
581
582         if (ret)
583                 td_verror(td, ret, "unlink_all_files");
584
585         return ret;
586 }
587
588 /*
589  * Check if io_u will overlap an in-flight IO in the queue
590  */
591 static bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u)
592 {
593         bool overlap;
594         struct io_u *check_io_u;
595         unsigned long long x1, x2, y1, y2;
596         int i;
597
598         x1 = io_u->offset;
599         x2 = io_u->offset + io_u->buflen;
600         overlap = false;
601         io_u_qiter(q, check_io_u, i) {
602                 if (check_io_u->flags & IO_U_F_FLIGHT) {
603                         y1 = check_io_u->offset;
604                         y2 = check_io_u->offset + check_io_u->buflen;
605
606                         if (x1 < y2 && y1 < x2) {
607                                 overlap = true;
608                                 dprint(FD_IO, "in-flight overlap: %llu/%lu, %llu/%lu\n",
609                                                 x1, io_u->buflen,
610                                                 y1, check_io_u->buflen);
611                                 break;
612                         }
613                 }
614         }
615
616         return overlap;
617 }
618
619 static int io_u_submit(struct thread_data *td, struct io_u *io_u)
620 {
621         /*
622          * Check for overlap if the user asked us to, and we have
623          * at least one IO in flight besides this one.
624          */
625         if (td->o.serialize_overlap && td->cur_depth > 1 &&
626             in_flight_overlap(&td->io_u_all, io_u))
627                 return FIO_Q_BUSY;
628
629         return td_io_queue(td, io_u);
630 }
631
632 /*
633  * The main verify engine. Runs over the writes we previously submitted,
634  * reads the blocks back in, and checks the crc/md5 of the data.
635  */
636 static void do_verify(struct thread_data *td, uint64_t verify_bytes)
637 {
638         struct fio_file *f;
639         struct io_u *io_u;
640         int ret, min_events;
641         unsigned int i;
642
643         dprint(FD_VERIFY, "starting loop\n");
644
645         /*
646          * sync io first and invalidate cache, to make sure we really
647          * read from disk.
648          */
649         for_each_file(td, f, i) {
650                 if (!fio_file_open(f))
651                         continue;
652                 if (fio_io_sync(td, f))
653                         break;
654                 if (file_invalidate_cache(td, f))
655                         break;
656         }
657
658         check_update_rusage(td);
659
660         if (td->error)
661                 return;
662
663         /*
664          * verify_state needs to be reset before verification
665          * proceeds so that expected random seeds match actual
666          * random seeds in headers. The main loop will reset
667          * all random number generators if randrepeat is set.
668          */
669         if (!td->o.rand_repeatable)
670                 td_fill_verify_state_seed(td);
671
672         td_set_runstate(td, TD_VERIFYING);
673
674         io_u = NULL;
675         while (!td->terminate) {
676                 enum fio_ddir ddir;
677                 int full;
678
679                 update_ts_cache(td);
680                 check_update_rusage(td);
681
682                 if (runtime_exceeded(td, &td->ts_cache)) {
683                         __update_ts_cache(td);
684                         if (runtime_exceeded(td, &td->ts_cache)) {
685                                 fio_mark_td_terminate(td);
686                                 break;
687                         }
688                 }
689
690                 if (flow_threshold_exceeded(td))
691                         continue;
692
693                 if (!td->o.experimental_verify) {
694                         io_u = __get_io_u(td);
695                         if (!io_u)
696                                 break;
697
698                         if (get_next_verify(td, io_u)) {
699                                 put_io_u(td, io_u);
700                                 break;
701                         }
702
703                         if (td_io_prep(td, io_u)) {
704                                 put_io_u(td, io_u);
705                                 break;
706                         }
707                 } else {
708                         if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
709                                 break;
710
711                         while ((io_u = get_io_u(td)) != NULL) {
712                                 if (IS_ERR_OR_NULL(io_u)) {
713                                         io_u = NULL;
714                                         ret = FIO_Q_BUSY;
715                                         goto reap;
716                                 }
717
718                                 /*
719                                  * We are only interested in the places where
720                                  * we wrote or trimmed IOs. Turn those into
721                                  * reads for verification purposes.
722                                  */
723                                 if (io_u->ddir == DDIR_READ) {
724                                         /*
725                                          * Pretend we issued it for rwmix
726                                          * accounting
727                                          */
728                                         td->io_issues[DDIR_READ]++;
729                                         put_io_u(td, io_u);
730                                         continue;
731                                 } else if (io_u->ddir == DDIR_TRIM) {
732                                         io_u->ddir = DDIR_READ;
733                                         io_u_set(td, io_u, IO_U_F_TRIMMED);
734                                         break;
735                                 } else if (io_u->ddir == DDIR_WRITE) {
736                                         io_u->ddir = DDIR_READ;
737                                         break;
738                                 } else {
739                                         put_io_u(td, io_u);
740                                         continue;
741                                 }
742                         }
743
744                         if (!io_u)
745                                 break;
746                 }
747
748                 if (verify_state_should_stop(td, io_u)) {
749                         put_io_u(td, io_u);
750                         break;
751                 }
752
753                 if (td->o.verify_async)
754                         io_u->end_io = verify_io_u_async;
755                 else
756                         io_u->end_io = verify_io_u;
757
758                 ddir = io_u->ddir;
759                 if (!td->o.disable_slat)
760                         fio_gettime(&io_u->start_time, NULL);
761
762                 ret = io_u_submit(td, io_u);
763
764                 if (io_queue_event(td, io_u, &ret, ddir, NULL, 1, NULL))
765                         break;
766
767                 /*
768                  * if we can queue more, do so. but check if there are
769                  * completed io_u's first. Note that we can get BUSY even
770                  * without IO queued, if the system is resource starved.
771                  */
772 reap:
773                 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
774                 if (full || io_in_polling(td))
775                         ret = wait_for_completions(td, NULL);
776
777                 if (ret < 0)
778                         break;
779         }
780
781         check_update_rusage(td);
782
783         if (!td->error) {
784                 min_events = td->cur_depth;
785
786                 if (min_events)
787                         ret = io_u_queued_complete(td, min_events);
788         } else
789                 cleanup_pending_aio(td);
790
791         td_set_runstate(td, TD_RUNNING);
792
793         dprint(FD_VERIFY, "exiting loop\n");
794 }
795
796 static bool exceeds_number_ios(struct thread_data *td)
797 {
798         unsigned long long number_ios;
799
800         if (!td->o.number_ios)
801                 return false;
802
803         number_ios = ddir_rw_sum(td->io_blocks);
804         number_ios += td->io_u_queued + td->io_u_in_flight;
805
806         return number_ios >= (td->o.number_ios * td->loops);
807 }
808
809 static bool io_bytes_exceeded(struct thread_data *td, uint64_t *this_bytes)
810 {
811         unsigned long long bytes, limit;
812
813         if (td_rw(td))
814                 bytes = this_bytes[DDIR_READ] + this_bytes[DDIR_WRITE];
815         else if (td_write(td))
816                 bytes = this_bytes[DDIR_WRITE];
817         else if (td_read(td))
818                 bytes = this_bytes[DDIR_READ];
819         else
820                 bytes = this_bytes[DDIR_TRIM];
821
822         if (td->o.io_size)
823                 limit = td->o.io_size;
824         else
825                 limit = td->o.size;
826
827         limit *= td->loops;
828         return bytes >= limit || exceeds_number_ios(td);
829 }
830
831 static bool io_issue_bytes_exceeded(struct thread_data *td)
832 {
833         return io_bytes_exceeded(td, td->io_issue_bytes);
834 }
835
836 static bool io_complete_bytes_exceeded(struct thread_data *td)
837 {
838         return io_bytes_exceeded(td, td->this_io_bytes);
839 }
840
841 /*
842  * used to calculate the next io time for rate control
843  *
844  */
845 static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
846 {
847         uint64_t bps = td->rate_bps[ddir];
848
849         assert(!(td->flags & TD_F_CHILD));
850
851         if (td->o.rate_process == RATE_PROCESS_POISSON) {
852                 uint64_t val, iops;
853
854                 iops = bps / td->o.bs[ddir];
855                 val = (int64_t) (1000000 / iops) *
856                                 -logf(__rand_0_1(&td->poisson_state[ddir]));
857                 if (val) {
858                         dprint(FD_RATE, "poisson rate iops=%llu, ddir=%d\n",
859                                         (unsigned long long) 1000000 / val,
860                                         ddir);
861                 }
862                 td->last_usec[ddir] += val;
863                 return td->last_usec[ddir];
864         } else if (bps) {
865                 uint64_t bytes = td->rate_io_issue_bytes[ddir];
866                 uint64_t secs = bytes / bps;
867                 uint64_t remainder = bytes % bps;
868
869                 return remainder * 1000000 / bps + secs * 1000000;
870         }
871
872         return 0;
873 }
874
875 static void handle_thinktime(struct thread_data *td, enum fio_ddir ddir)
876 {
877         unsigned long long b;
878         uint64_t total;
879         int left;
880
881         b = ddir_rw_sum(td->io_blocks);
882         if (b % td->o.thinktime_blocks)
883                 return;
884
885         io_u_quiesce(td);
886
887         total = 0;
888         if (td->o.thinktime_spin)
889                 total = usec_spin(td->o.thinktime_spin);
890
891         left = td->o.thinktime - total;
892         if (left)
893                 total += usec_sleep(td, left);
894
895         /*
896          * If we're ignoring thinktime for the rate, add the number of bytes
897          * we would have done while sleeping.
898          */
899         if (total && td->rate_bps[ddir] && td->o.rate_ign_think)
900                 td->rate_io_issue_bytes[ddir] += (td->rate_bps[ddir] * 1000000) / total;
901 }
902
903 /*
904  * Main IO worker function. It retrieves io_u's to process and queues
905  * and reaps them, checking for rate and errors along the way.
906  *
907  * Returns number of bytes written and trimmed.
908  */
909 static void do_io(struct thread_data *td, uint64_t *bytes_done)
910 {
911         unsigned int i;
912         int ret = 0;
913         uint64_t total_bytes, bytes_issued = 0;
914
915         for (i = 0; i < DDIR_RWDIR_CNT; i++)
916                 bytes_done[i] = td->bytes_done[i];
917
918         if (in_ramp_time(td))
919                 td_set_runstate(td, TD_RAMP);
920         else
921                 td_set_runstate(td, TD_RUNNING);
922
923         lat_target_init(td);
924
925         total_bytes = td->o.size;
926         /*
927         * Allow random overwrite workloads to write up to io_size
928         * before starting verification phase as 'size' doesn't apply.
929         */
930         if (td_write(td) && td_random(td) && td->o.norandommap)
931                 total_bytes = max(total_bytes, (uint64_t) td->o.io_size);
932         /*
933          * If verify_backlog is enabled, we'll run the verify in this
934          * handler as well. For that case, we may need up to twice the
935          * amount of bytes.
936          */
937         if (td->o.verify != VERIFY_NONE &&
938            (td_write(td) && td->o.verify_backlog))
939                 total_bytes += td->o.size;
940
941         /* In trimwrite mode, each byte is trimmed and then written, so
942          * allow total_bytes to be twice as big */
943         if (td_trimwrite(td))
944                 total_bytes += td->total_io_size;
945
946         while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
947                 (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
948                 td->o.time_based) {
949                 struct timespec comp_time;
950                 struct io_u *io_u;
951                 int full;
952                 enum fio_ddir ddir;
953
954                 check_update_rusage(td);
955
956                 if (td->terminate || td->done)
957                         break;
958
959                 update_ts_cache(td);
960
961                 if (runtime_exceeded(td, &td->ts_cache)) {
962                         __update_ts_cache(td);
963                         if (runtime_exceeded(td, &td->ts_cache)) {
964                                 fio_mark_td_terminate(td);
965                                 break;
966                         }
967                 }
968
969                 if (flow_threshold_exceeded(td))
970                         continue;
971
972                 /*
973                  * Break if we exceeded the bytes. The exception is time
974                  * based runs, but we still need to break out of the loop
975                  * for those to run verification, if enabled.
976                  */
977                 if (bytes_issued >= total_bytes &&
978                     (!td->o.time_based ||
979                      (td->o.time_based && td->o.verify != VERIFY_NONE)))
980                         break;
981
982                 io_u = get_io_u(td);
983                 if (IS_ERR_OR_NULL(io_u)) {
984                         int err = PTR_ERR(io_u);
985
986                         io_u = NULL;
987                         ddir = DDIR_INVAL;
988                         if (err == -EBUSY) {
989                                 ret = FIO_Q_BUSY;
990                                 goto reap;
991                         }
992                         if (td->o.latency_target)
993                                 goto reap;
994                         break;
995                 }
996
997                 ddir = io_u->ddir;
998
999                 /*
1000                  * Add verification end_io handler if:
1001                  *      - Asked to verify (!td_rw(td))
1002                  *      - Or the io_u is from our verify list (mixed write/ver)
1003                  */
1004                 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
1005                     ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
1006
1007                         if (!td->o.verify_pattern_bytes) {
1008                                 io_u->rand_seed = __rand(&td->verify_state);
1009                                 if (sizeof(int) != sizeof(long *))
1010                                         io_u->rand_seed *= __rand(&td->verify_state);
1011                         }
1012
1013                         if (verify_state_should_stop(td, io_u)) {
1014                                 put_io_u(td, io_u);
1015                                 break;
1016                         }
1017
1018                         if (td->o.verify_async)
1019                                 io_u->end_io = verify_io_u_async;
1020                         else
1021                                 io_u->end_io = verify_io_u;
1022                         td_set_runstate(td, TD_VERIFYING);
1023                 } else if (in_ramp_time(td))
1024                         td_set_runstate(td, TD_RAMP);
1025                 else
1026                         td_set_runstate(td, TD_RUNNING);
1027
1028                 /*
1029                  * Always log IO before it's issued, so we know the specific
1030                  * order of it. The logged unit will track when the IO has
1031                  * completed.
1032                  */
1033                 if (td_write(td) && io_u->ddir == DDIR_WRITE &&
1034                     td->o.do_verify &&
1035                     td->o.verify != VERIFY_NONE &&
1036                     !td->o.experimental_verify)
1037                         log_io_piece(td, io_u);
1038
1039                 if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
1040                         const unsigned long blen = io_u->xfer_buflen;
1041                         const enum fio_ddir ddir = acct_ddir(io_u);
1042
1043                         if (td->error)
1044                                 break;
1045
1046                         workqueue_enqueue(&td->io_wq, &io_u->work);
1047                         ret = FIO_Q_QUEUED;
1048
1049                         if (ddir_rw(ddir)) {
1050                                 td->io_issues[ddir]++;
1051                                 td->io_issue_bytes[ddir] += blen;
1052                                 td->rate_io_issue_bytes[ddir] += blen;
1053                         }
1054
1055                         if (should_check_rate(td))
1056                                 td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
1057
1058                 } else {
1059                         ret = io_u_submit(td, io_u);
1060
1061                         if (should_check_rate(td))
1062                                 td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
1063
1064                         if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 0, &comp_time))
1065                                 break;
1066
1067                         /*
1068                          * See if we need to complete some commands. Note that
1069                          * we can get BUSY even without IO queued, if the
1070                          * system is resource starved.
1071                          */
1072 reap:
1073                         full = queue_full(td) ||
1074                                 (ret == FIO_Q_BUSY && td->cur_depth);
1075                         if (full || io_in_polling(td))
1076                                 ret = wait_for_completions(td, &comp_time);
1077                 }
1078                 if (ret < 0)
1079                         break;
1080                 if (!ddir_rw_sum(td->bytes_done) &&
1081                     !td_ioengine_flagged(td, FIO_NOIO))
1082                         continue;
1083
1084                 if (!in_ramp_time(td) && should_check_rate(td)) {
1085                         if (check_min_rate(td, &comp_time)) {
1086                                 if (exitall_on_terminate || td->o.exitall_error)
1087                                         fio_terminate_threads(td->groupid);
1088                                 td_verror(td, EIO, "check_min_rate");
1089                                 break;
1090                         }
1091                 }
1092                 if (!in_ramp_time(td) && td->o.latency_target)
1093                         lat_target_check(td);
1094
1095                 if (ddir_rw(ddir) && td->o.thinktime)
1096                         handle_thinktime(td, ddir);
1097         }
1098
1099         check_update_rusage(td);
1100
1101         if (td->trim_entries)
1102                 log_err("fio: %lu trim entries leaked?\n", td->trim_entries);
1103
1104         if (td->o.fill_device && td->error == ENOSPC) {
1105                 td->error = 0;
1106                 fio_mark_td_terminate(td);
1107         }
1108         if (!td->error) {
1109                 struct fio_file *f;
1110
1111                 if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
1112                         workqueue_flush(&td->io_wq);
1113                         i = 0;
1114                 } else
1115                         i = td->cur_depth;
1116
1117                 if (i) {
1118                         ret = io_u_queued_complete(td, i);
1119                         if (td->o.fill_device && td->error == ENOSPC)
1120                                 td->error = 0;
1121                 }
1122
1123                 if (should_fsync(td) && td->o.end_fsync) {
1124                         td_set_runstate(td, TD_FSYNCING);
1125
1126                         for_each_file(td, f, i) {
1127                                 if (!fio_file_fsync(td, f))
1128                                         continue;
1129
1130                                 log_err("fio: end_fsync failed for file %s\n",
1131                                                                 f->file_name);
1132                         }
1133                 }
1134         } else
1135                 cleanup_pending_aio(td);
1136
1137         /*
1138          * stop job if we failed doing any IO
1139          */
1140         if (!ddir_rw_sum(td->this_io_bytes))
1141                 td->done = 1;
1142
1143         for (i = 0; i < DDIR_RWDIR_CNT; i++)
1144                 bytes_done[i] = td->bytes_done[i] - bytes_done[i];
1145 }
1146
1147 static void free_file_completion_logging(struct thread_data *td)
1148 {
1149         struct fio_file *f;
1150         unsigned int i;
1151
1152         for_each_file(td, f, i) {
1153                 if (!f->last_write_comp)
1154                         break;
1155                 sfree(f->last_write_comp);
1156         }
1157 }
1158
1159 static int init_file_completion_logging(struct thread_data *td,
1160                                         unsigned int depth)
1161 {
1162         struct fio_file *f;
1163         unsigned int i;
1164
1165         if (td->o.verify == VERIFY_NONE || !td->o.verify_state_save)
1166                 return 0;
1167
1168         for_each_file(td, f, i) {
1169                 f->last_write_comp = scalloc(depth, sizeof(uint64_t));
1170                 if (!f->last_write_comp)
1171                         goto cleanup;
1172         }
1173
1174         return 0;
1175
1176 cleanup:
1177         free_file_completion_logging(td);
1178         log_err("fio: failed to alloc write comp data\n");
1179         return 1;
1180 }
1181
1182 static void cleanup_io_u(struct thread_data *td)
1183 {
1184         struct io_u *io_u;
1185
1186         while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) {
1187
1188                 if (td->io_ops->io_u_free)
1189                         td->io_ops->io_u_free(td, io_u);
1190
1191                 fio_memfree(io_u, sizeof(*io_u));
1192         }
1193
1194         free_io_mem(td);
1195
1196         io_u_rexit(&td->io_u_requeues);
1197         io_u_qexit(&td->io_u_freelist);
1198         io_u_qexit(&td->io_u_all);
1199
1200         free_file_completion_logging(td);
1201 }
1202
1203 static int init_io_u(struct thread_data *td)
1204 {
1205         struct io_u *io_u;
1206         unsigned int max_bs, min_write;
1207         int cl_align, i, max_units;
1208         int data_xfer = 1, err;
1209         char *p;
1210
1211         max_units = td->o.iodepth;
1212         max_bs = td_max_bs(td);
1213         min_write = td->o.min_bs[DDIR_WRITE];
1214         td->orig_buffer_size = (unsigned long long) max_bs
1215                                         * (unsigned long long) max_units;
1216
1217         if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td)))
1218                 data_xfer = 0;
1219
1220         err = 0;
1221         err += !io_u_rinit(&td->io_u_requeues, td->o.iodepth);
1222         err += !io_u_qinit(&td->io_u_freelist, td->o.iodepth);
1223         err += !io_u_qinit(&td->io_u_all, td->o.iodepth);
1224
1225         if (err) {
1226                 log_err("fio: failed setting up IO queues\n");
1227                 return 1;
1228         }
1229
1230         /*
1231          * if we may later need to do address alignment, then add any
1232          * possible adjustment here so that we don't cause a buffer
1233          * overflow later. this adjustment may be too much if we get
1234          * lucky and the allocator gives us an aligned address.
1235          */
1236         if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
1237             td_ioengine_flagged(td, FIO_RAWIO))
1238                 td->orig_buffer_size += page_mask + td->o.mem_align;
1239
1240         if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
1241                 unsigned long bs;
1242
1243                 bs = td->orig_buffer_size + td->o.hugepage_size - 1;
1244                 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
1245         }
1246
1247         if (td->orig_buffer_size != (size_t) td->orig_buffer_size) {
1248                 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
1249                 return 1;
1250         }
1251
1252         if (data_xfer && allocate_io_mem(td))
1253                 return 1;
1254
1255         if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
1256             td_ioengine_flagged(td, FIO_RAWIO))
1257                 p = PTR_ALIGN(td->orig_buffer, page_mask) + td->o.mem_align;
1258         else
1259                 p = td->orig_buffer;
1260
1261         cl_align = os_cache_line_size();
1262
1263         for (i = 0; i < max_units; i++) {
1264                 void *ptr;
1265
1266                 if (td->terminate)
1267                         return 1;
1268
1269                 ptr = fio_memalign(cl_align, sizeof(*io_u));
1270                 if (!ptr) {
1271                         log_err("fio: unable to allocate aligned memory\n");
1272                         break;
1273                 }
1274
1275                 io_u = ptr;
1276                 memset(io_u, 0, sizeof(*io_u));
1277                 INIT_FLIST_HEAD(&io_u->verify_list);
1278                 dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
1279
1280                 if (data_xfer) {
1281                         io_u->buf = p;
1282                         dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
1283
1284                         if (td_write(td))
1285                                 io_u_fill_buffer(td, io_u, min_write, max_bs);
1286                         if (td_write(td) && td->o.verify_pattern_bytes) {
1287                                 /*
1288                                  * Fill the buffer with the pattern if we are
1289                                  * going to be doing writes.
1290                                  */
1291                                 fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
1292                         }
1293                 }
1294
1295                 io_u->index = i;
1296                 io_u->flags = IO_U_F_FREE;
1297                 io_u_qpush(&td->io_u_freelist, io_u);
1298
1299                 /*
1300                  * io_u never leaves this stack, used for iteration of all
1301                  * io_u buffers.
1302                  */
1303                 io_u_qpush(&td->io_u_all, io_u);
1304
1305                 if (td->io_ops->io_u_init) {
1306                         int ret = td->io_ops->io_u_init(td, io_u);
1307
1308                         if (ret) {
1309                                 log_err("fio: failed to init engine data: %d\n", ret);
1310                                 return 1;
1311                         }
1312                 }
1313
1314                 p += max_bs;
1315         }
1316
1317         if (init_file_completion_logging(td, max_units))
1318                 return 1;
1319
1320         return 0;
1321 }
1322
1323 /*
1324  * This function is Linux specific.
1325  * FIO_HAVE_IOSCHED_SWITCH enabled currently means it's Linux.
1326  */
1327 static int switch_ioscheduler(struct thread_data *td)
1328 {
1329 #ifdef FIO_HAVE_IOSCHED_SWITCH
1330         char tmp[256], tmp2[128];
1331         FILE *f;
1332         int ret;
1333
1334         if (td_ioengine_flagged(td, FIO_DISKLESSIO))
1335                 return 0;
1336
1337         assert(td->files && td->files[0]);
1338         sprintf(tmp, "%s/queue/scheduler", td->files[0]->du->sysfs_root);
1339
1340         f = fopen(tmp, "r+");
1341         if (!f) {
1342                 if (errno == ENOENT) {
1343                         log_err("fio: os or kernel doesn't support IO scheduler"
1344                                 " switching\n");
1345                         return 0;
1346                 }
1347                 td_verror(td, errno, "fopen iosched");
1348                 return 1;
1349         }
1350
1351         /*
1352          * Set io scheduler.
1353          */
1354         ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
1355         if (ferror(f) || ret != 1) {
1356                 td_verror(td, errno, "fwrite");
1357                 fclose(f);
1358                 return 1;
1359         }
1360
1361         rewind(f);
1362
1363         /*
1364          * Read back and check that the selected scheduler is now the default.
1365          */
1366         memset(tmp, 0, sizeof(tmp));
1367         ret = fread(tmp, sizeof(tmp), 1, f);
1368         if (ferror(f) || ret < 0) {
1369                 td_verror(td, errno, "fread");
1370                 fclose(f);
1371                 return 1;
1372         }
1373         /*
1374          * either a list of io schedulers or "none\n" is expected.
1375          */
1376         tmp[strlen(tmp) - 1] = '\0';
1377
1378         /*
1379          * Write to "none" entry doesn't fail, so check the result here.
1380          */
1381         if (!strcmp(tmp, "none")) {
1382                 log_err("fio: io scheduler is not tunable\n");
1383                 fclose(f);
1384                 return 0;
1385         }
1386
1387         sprintf(tmp2, "[%s]", td->o.ioscheduler);
1388         if (!strstr(tmp, tmp2)) {
1389                 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
1390                 td_verror(td, EINVAL, "iosched_switch");
1391                 fclose(f);
1392                 return 1;
1393         }
1394
1395         fclose(f);
1396         return 0;
1397 #else
1398         return 0;
1399 #endif
1400 }
1401
1402 static bool keep_running(struct thread_data *td)
1403 {
1404         unsigned long long limit;
1405
1406         if (td->done)
1407                 return false;
1408         if (td->terminate)
1409                 return false;
1410         if (td->o.time_based)
1411                 return true;
1412         if (td->o.loops) {
1413                 td->o.loops--;
1414                 return true;
1415         }
1416         if (exceeds_number_ios(td))
1417                 return false;
1418
1419         if (td->o.io_size)
1420                 limit = td->o.io_size;
1421         else
1422                 limit = td->o.size;
1423
1424         if (limit != -1ULL && ddir_rw_sum(td->io_bytes) < limit) {
1425                 uint64_t diff;
1426
1427                 /*
1428                  * If the difference is less than the maximum IO size, we
1429                  * are done.
1430                  */
1431                 diff = limit - ddir_rw_sum(td->io_bytes);
1432                 if (diff < td_max_bs(td))
1433                         return false;
1434
1435                 if (fio_files_done(td) && !td->o.io_size)
1436                         return false;
1437
1438                 return true;
1439         }
1440
1441         return false;
1442 }
1443
1444 static int exec_string(struct thread_options *o, const char *string, const char *mode)
1445 {
1446         size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
1447         int ret;
1448         char *str;
1449
1450         str = malloc(newlen);
1451         sprintf(str, "%s &> %s.%s.txt", string, o->name, mode);
1452
1453         log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode);
1454         ret = system(str);
1455         if (ret == -1)
1456                 log_err("fio: exec of cmd <%s> failed\n", str);
1457
1458         free(str);
1459         return ret;
1460 }
1461
1462 /*
1463  * Dry run to compute correct state of numberio for verification.
1464  */
1465 static uint64_t do_dry_run(struct thread_data *td)
1466 {
1467         td_set_runstate(td, TD_RUNNING);
1468
1469         while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
1470                 (!flist_empty(&td->trim_list)) || !io_complete_bytes_exceeded(td)) {
1471                 struct io_u *io_u;
1472                 int ret;
1473
1474                 if (td->terminate || td->done)
1475                         break;
1476
1477                 io_u = get_io_u(td);
1478                 if (IS_ERR_OR_NULL(io_u))
1479                         break;
1480
1481                 io_u_set(td, io_u, IO_U_F_FLIGHT);
1482                 io_u->error = 0;
1483                 io_u->resid = 0;
1484                 if (ddir_rw(acct_ddir(io_u)))
1485                         td->io_issues[acct_ddir(io_u)]++;
1486                 if (ddir_rw(io_u->ddir)) {
1487                         io_u_mark_depth(td, 1);
1488                         td->ts.total_io_u[io_u->ddir]++;
1489                 }
1490
1491                 if (td_write(td) && io_u->ddir == DDIR_WRITE &&
1492                     td->o.do_verify &&
1493                     td->o.verify != VERIFY_NONE &&
1494                     !td->o.experimental_verify)
1495                         log_io_piece(td, io_u);
1496
1497                 ret = io_u_sync_complete(td, io_u);
1498                 (void) ret;
1499         }
1500
1501         return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
1502 }
1503
1504 struct fork_data {
1505         struct thread_data *td;
1506         struct sk_out *sk_out;
1507 };
1508
1509 /*
1510  * Entry point for the thread based jobs. The process based jobs end up
1511  * here as well, after a little setup.
1512  */
1513 static void *thread_main(void *data)
1514 {
1515         struct fork_data *fd = data;
1516         unsigned long long elapsed_us[DDIR_RWDIR_CNT] = { 0, };
1517         struct thread_data *td = fd->td;
1518         struct thread_options *o = &td->o;
1519         struct sk_out *sk_out = fd->sk_out;
1520         uint64_t bytes_done[DDIR_RWDIR_CNT];
1521         int deadlock_loop_cnt;
1522         bool clear_state, did_some_io;
1523         int ret;
1524
1525         sk_out_assign(sk_out);
1526         free(fd);
1527
1528         if (!o->use_thread) {
1529                 setsid();
1530                 td->pid = getpid();
1531         } else
1532                 td->pid = gettid();
1533
1534         fio_local_clock_init(o->use_thread);
1535
1536         dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
1537
1538         if (is_backend)
1539                 fio_server_send_start(td);
1540
1541         INIT_FLIST_HEAD(&td->io_log_list);
1542         INIT_FLIST_HEAD(&td->io_hist_list);
1543         INIT_FLIST_HEAD(&td->verify_list);
1544         INIT_FLIST_HEAD(&td->trim_list);
1545         INIT_FLIST_HEAD(&td->next_rand_list);
1546         td->io_hist_tree = RB_ROOT;
1547
1548         ret = mutex_cond_init_pshared(&td->io_u_lock, &td->free_cond);
1549         if (ret) {
1550                 td_verror(td, ret, "mutex_cond_init_pshared");
1551                 goto err;
1552         }
1553         ret = cond_init_pshared(&td->verify_cond);
1554         if (ret) {
1555                 td_verror(td, ret, "mutex_cond_pshared");
1556                 goto err;
1557         }
1558
1559         td_set_runstate(td, TD_INITIALIZED);
1560         dprint(FD_MUTEX, "up startup_mutex\n");
1561         fio_mutex_up(startup_mutex);
1562         dprint(FD_MUTEX, "wait on td->mutex\n");
1563         fio_mutex_down(td->mutex);
1564         dprint(FD_MUTEX, "done waiting on td->mutex\n");
1565
1566         /*
1567          * A new gid requires privilege, so we need to do this before setting
1568          * the uid.
1569          */
1570         if (o->gid != -1U && setgid(o->gid)) {
1571                 td_verror(td, errno, "setgid");
1572                 goto err;
1573         }
1574         if (o->uid != -1U && setuid(o->uid)) {
1575                 td_verror(td, errno, "setuid");
1576                 goto err;
1577         }
1578
1579         /*
1580          * Do this early, we don't want the compress threads to be limited
1581          * to the same CPUs as the IO workers. So do this before we set
1582          * any potential CPU affinity
1583          */
1584         if (iolog_compress_init(td, sk_out))
1585                 goto err;
1586
1587         /*
1588          * If we have a gettimeofday() thread, make sure we exclude that
1589          * thread from this job
1590          */
1591         if (o->gtod_cpu)
1592                 fio_cpu_clear(&o->cpumask, o->gtod_cpu);
1593
1594         /*
1595          * Set affinity first, in case it has an impact on the memory
1596          * allocations.
1597          */
1598         if (fio_option_is_set(o, cpumask)) {
1599                 if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
1600                         ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
1601                         if (!ret) {
1602                                 log_err("fio: no CPUs set\n");
1603                                 log_err("fio: Try increasing number of available CPUs\n");
1604                                 td_verror(td, EINVAL, "cpus_split");
1605                                 goto err;
1606                         }
1607                 }
1608                 ret = fio_setaffinity(td->pid, o->cpumask);
1609                 if (ret == -1) {
1610                         td_verror(td, errno, "cpu_set_affinity");
1611                         goto err;
1612                 }
1613         }
1614
1615 #ifdef CONFIG_LIBNUMA
1616         /* numa node setup */
1617         if (fio_option_is_set(o, numa_cpunodes) ||
1618             fio_option_is_set(o, numa_memnodes)) {
1619                 struct bitmask *mask;
1620
1621                 if (numa_available() < 0) {
1622                         td_verror(td, errno, "Does not support NUMA API\n");
1623                         goto err;
1624                 }
1625
1626                 if (fio_option_is_set(o, numa_cpunodes)) {
1627                         mask = numa_parse_nodestring(o->numa_cpunodes);
1628                         ret = numa_run_on_node_mask(mask);
1629                         numa_free_nodemask(mask);
1630                         if (ret == -1) {
1631                                 td_verror(td, errno, \
1632                                         "numa_run_on_node_mask failed\n");
1633                                 goto err;
1634                         }
1635                 }
1636
1637                 if (fio_option_is_set(o, numa_memnodes)) {
1638                         mask = NULL;
1639                         if (o->numa_memnodes)
1640                                 mask = numa_parse_nodestring(o->numa_memnodes);
1641
1642                         switch (o->numa_mem_mode) {
1643                         case MPOL_INTERLEAVE:
1644                                 numa_set_interleave_mask(mask);
1645                                 break;
1646                         case MPOL_BIND:
1647                                 numa_set_membind(mask);
1648                                 break;
1649                         case MPOL_LOCAL:
1650                                 numa_set_localalloc();
1651                                 break;
1652                         case MPOL_PREFERRED:
1653                                 numa_set_preferred(o->numa_mem_prefer_node);
1654                                 break;
1655                         case MPOL_DEFAULT:
1656                         default:
1657                                 break;
1658                         }
1659
1660                         if (mask)
1661                                 numa_free_nodemask(mask);
1662
1663                 }
1664         }
1665 #endif
1666
1667         if (fio_pin_memory(td))
1668                 goto err;
1669
1670         /*
1671          * May alter parameters that init_io_u() will use, so we need to
1672          * do this first.
1673          */
1674         if (init_iolog(td))
1675                 goto err;
1676
1677         if (init_io_u(td))
1678                 goto err;
1679
1680         if (o->verify_async && verify_async_init(td))
1681                 goto err;
1682
1683         if (fio_option_is_set(o, ioprio) ||
1684             fio_option_is_set(o, ioprio_class)) {
1685                 ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
1686                 if (ret == -1) {
1687                         td_verror(td, errno, "ioprio_set");
1688                         goto err;
1689                 }
1690         }
1691
1692         if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt))
1693                 goto err;
1694
1695         errno = 0;
1696         if (nice(o->nice) == -1 && errno != 0) {
1697                 td_verror(td, errno, "nice");
1698                 goto err;
1699         }
1700
1701         if (o->ioscheduler && switch_ioscheduler(td))
1702                 goto err;
1703
1704         if (!o->create_serialize && setup_files(td))
1705                 goto err;
1706
1707         if (td_io_init(td))
1708                 goto err;
1709
1710         if (!init_random_map(td))
1711                 goto err;
1712
1713         if (o->exec_prerun && exec_string(o, o->exec_prerun, (const char *)"prerun"))
1714                 goto err;
1715
1716         if (o->pre_read && !pre_read_files(td))
1717                 goto err;
1718
1719         fio_verify_init(td);
1720
1721         if (rate_submit_init(td, sk_out))
1722                 goto err;
1723
1724         set_epoch_time(td, o->log_unix_epoch);
1725         fio_getrusage(&td->ru_start);
1726         memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
1727         memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
1728         memcpy(&td->ss.prev_time, &td->epoch, sizeof(td->epoch));
1729
1730         if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
1731                         o->ratemin[DDIR_TRIM]) {
1732                 memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
1733                                         sizeof(td->bw_sample_time));
1734                 memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
1735                                         sizeof(td->bw_sample_time));
1736                 memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
1737                                         sizeof(td->bw_sample_time));
1738         }
1739
1740         memset(bytes_done, 0, sizeof(bytes_done));
1741         clear_state = false;
1742         did_some_io = false;
1743
1744         while (keep_running(td)) {
1745                 uint64_t verify_bytes;
1746
1747                 fio_gettime(&td->start, NULL);
1748                 memcpy(&td->ts_cache, &td->start, sizeof(td->start));
1749
1750                 if (clear_state) {
1751                         clear_io_state(td, 0);
1752
1753                         if (o->unlink_each_loop && unlink_all_files(td))
1754                                 break;
1755                 }
1756
1757                 prune_io_piece_log(td);
1758
1759                 if (td->o.verify_only && td_write(td))
1760                         verify_bytes = do_dry_run(td);
1761                 else {
1762                         do_io(td, bytes_done);
1763
1764                         if (!ddir_rw_sum(bytes_done)) {
1765                                 fio_mark_td_terminate(td);
1766                                 verify_bytes = 0;
1767                         } else {
1768                                 verify_bytes = bytes_done[DDIR_WRITE] +
1769                                                 bytes_done[DDIR_TRIM];
1770                         }
1771                 }
1772
1773                 /*
1774                  * If we took too long to shut down, the main thread could
1775                  * already consider us reaped/exited. If that happens, break
1776                  * out and clean up.
1777                  */
1778                 if (td->runstate >= TD_EXITED)
1779                         break;
1780
1781                 clear_state = true;
1782
1783                 /*
1784                  * Make sure we've successfully updated the rusage stats
1785                  * before waiting on the stat mutex. Otherwise we could have
1786                  * the stat thread holding stat mutex and waiting for
1787                  * the rusage_sem, which would never get upped because
1788                  * this thread is waiting for the stat mutex.
1789                  */
1790                 deadlock_loop_cnt = 0;
1791                 do {
1792                         check_update_rusage(td);
1793                         if (!fio_mutex_down_trylock(stat_mutex))
1794                                 break;
1795                         usleep(1000);
1796                         if (deadlock_loop_cnt++ > 5000) {
1797                                 log_err("fio seems to be stuck grabbing stat_mutex, forcibly exiting\n");
1798                                 td->error = EDEADLK;
1799                                 goto err;
1800                         }
1801                 } while (1);
1802
1803                 if (td_read(td) && td->io_bytes[DDIR_READ])
1804                         update_runtime(td, elapsed_us, DDIR_READ);
1805                 if (td_write(td) && td->io_bytes[DDIR_WRITE])
1806                         update_runtime(td, elapsed_us, DDIR_WRITE);
1807                 if (td_trim(td) && td->io_bytes[DDIR_TRIM])
1808                         update_runtime(td, elapsed_us, DDIR_TRIM);
1809                 fio_gettime(&td->start, NULL);
1810                 fio_mutex_up(stat_mutex);
1811
1812                 if (td->error || td->terminate)
1813                         break;
1814
1815                 if (!o->do_verify ||
1816                     o->verify == VERIFY_NONE ||
1817                     td_ioengine_flagged(td, FIO_UNIDIR))
1818                         continue;
1819
1820                 if (ddir_rw_sum(bytes_done))
1821                         did_some_io = true;
1822
1823                 clear_io_state(td, 0);
1824
1825                 fio_gettime(&td->start, NULL);
1826
1827                 do_verify(td, verify_bytes);
1828
1829                 /*
1830                  * See comment further up for why this is done here.
1831                  */
1832                 check_update_rusage(td);
1833
1834                 fio_mutex_down(stat_mutex);
1835                 update_runtime(td, elapsed_us, DDIR_READ);
1836                 fio_gettime(&td->start, NULL);
1837                 fio_mutex_up(stat_mutex);
1838
1839                 if (td->error || td->terminate)
1840                         break;
1841         }
1842
1843         /*
1844          * If td ended up with no I/O when it should have had,
1845          * then something went wrong unless FIO_NOIO or FIO_DISKLESSIO.
1846          * (Are we not missing other flags that can be ignored ?)
1847          */
1848         if ((td->o.size || td->o.io_size) && !ddir_rw_sum(bytes_done) &&
1849             !did_some_io && !td->o.create_only &&
1850             !(td_ioengine_flagged(td, FIO_NOIO) ||
1851               td_ioengine_flagged(td, FIO_DISKLESSIO)))
1852                 log_err("%s: No I/O performed by %s, "
1853                          "perhaps try --debug=io option for details?\n",
1854                          td->o.name, td->io_ops->name);
1855
1856         td_set_runstate(td, TD_FINISHING);
1857
1858         update_rusage_stat(td);
1859         td->ts.total_run_time = mtime_since_now(&td->epoch);
1860         td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1861         td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1862         td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1863
1864         if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
1865             (td->o.verify != VERIFY_NONE && td_write(td)))
1866                 verify_save_state(td->thread_number);
1867
1868         fio_unpin_memory(td);
1869
1870         td_writeout_logs(td, true);
1871
1872         iolog_compress_exit(td);
1873         rate_submit_exit(td);
1874
1875         if (o->exec_postrun)
1876                 exec_string(o, o->exec_postrun, (const char *)"postrun");
1877
1878         if (exitall_on_terminate || (o->exitall_error && td->error))
1879                 fio_terminate_threads(td->groupid);
1880
1881 err:
1882         if (td->error)
1883                 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error,
1884                                                         td->verror);
1885
1886         if (o->verify_async)
1887                 verify_async_exit(td);
1888
1889         close_and_free_files(td);
1890         cleanup_io_u(td);
1891         close_ioengine(td);
1892         cgroup_shutdown(td, &cgroup_mnt);
1893         verify_free_state(td);
1894
1895         if (td->zone_state_index) {
1896                 int i;
1897
1898                 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1899                         free(td->zone_state_index[i]);
1900                 free(td->zone_state_index);
1901                 td->zone_state_index = NULL;
1902         }
1903
1904         if (fio_option_is_set(o, cpumask)) {
1905                 ret = fio_cpuset_exit(&o->cpumask);
1906                 if (ret)
1907                         td_verror(td, ret, "fio_cpuset_exit");
1908         }
1909
1910         /*
1911          * do this very late, it will log file closing as well
1912          */
1913         if (o->write_iolog_file)
1914                 write_iolog_close(td);
1915
1916         td_set_runstate(td, TD_EXITED);
1917
1918         /*
1919          * Do this last after setting our runstate to exited, so we
1920          * know that the stat thread is signaled.
1921          */
1922         check_update_rusage(td);
1923
1924         sk_out_drop();
1925         return (void *) (uintptr_t) td->error;
1926 }
1927
1928 /*
1929  * Run over the job map and reap the threads that have exited, if any.
1930  */
1931 static void reap_threads(unsigned int *nr_running, uint64_t *t_rate,
1932                          uint64_t *m_rate)
1933 {
1934         struct thread_data *td;
1935         unsigned int cputhreads, realthreads, pending;
1936         int i, status, ret;
1937
1938         /*
1939          * reap exited threads (TD_EXITED -> TD_REAPED)
1940          */
1941         realthreads = pending = cputhreads = 0;
1942         for_each_td(td, i) {
1943                 int flags = 0;
1944
1945                  if (!strcmp(td->o.ioengine, "cpuio"))
1946                         cputhreads++;
1947                 else
1948                         realthreads++;
1949
1950                 if (!td->pid) {
1951                         pending++;
1952                         continue;
1953                 }
1954                 if (td->runstate == TD_REAPED)
1955                         continue;
1956                 if (td->o.use_thread) {
1957                         if (td->runstate == TD_EXITED) {
1958                                 td_set_runstate(td, TD_REAPED);
1959                                 goto reaped;
1960                         }
1961                         continue;
1962                 }
1963
1964                 flags = WNOHANG;
1965                 if (td->runstate == TD_EXITED)
1966                         flags = 0;
1967
1968                 /*
1969                  * check if someone quit or got killed in an unusual way
1970                  */
1971                 ret = waitpid(td->pid, &status, flags);
1972                 if (ret < 0) {
1973                         if (errno == ECHILD) {
1974                                 log_err("fio: pid=%d disappeared %d\n",
1975                                                 (int) td->pid, td->runstate);
1976                                 td->sig = ECHILD;
1977                                 td_set_runstate(td, TD_REAPED);
1978                                 goto reaped;
1979                         }
1980                         perror("waitpid");
1981                 } else if (ret == td->pid) {
1982                         if (WIFSIGNALED(status)) {
1983                                 int sig = WTERMSIG(status);
1984
1985                                 if (sig != SIGTERM && sig != SIGUSR2)
1986                                         log_err("fio: pid=%d, got signal=%d\n",
1987                                                         (int) td->pid, sig);
1988                                 td->sig = sig;
1989                                 td_set_runstate(td, TD_REAPED);
1990                                 goto reaped;
1991                         }
1992                         if (WIFEXITED(status)) {
1993                                 if (WEXITSTATUS(status) && !td->error)
1994                                         td->error = WEXITSTATUS(status);
1995
1996                                 td_set_runstate(td, TD_REAPED);
1997                                 goto reaped;
1998                         }
1999                 }
2000
2001                 /*
2002                  * If the job is stuck, do a forceful timeout of it and
2003                  * move on.
2004                  */
2005                 if (td->terminate &&
2006                     td->runstate < TD_FSYNCING &&
2007                     time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) {
2008                         log_err("fio: job '%s' (state=%d) hasn't exited in "
2009                                 "%lu seconds, it appears to be stuck. Doing "
2010                                 "forceful exit of this job.\n",
2011                                 td->o.name, td->runstate,
2012                                 (unsigned long) time_since_now(&td->terminate_time));
2013                         td_set_runstate(td, TD_REAPED);
2014                         goto reaped;
2015                 }
2016
2017                 /*
2018                  * thread is not dead, continue
2019                  */
2020                 pending++;
2021                 continue;
2022 reaped:
2023                 (*nr_running)--;
2024                 (*m_rate) -= ddir_rw_sum(td->o.ratemin);
2025                 (*t_rate) -= ddir_rw_sum(td->o.rate);
2026                 if (!td->pid)
2027                         pending--;
2028
2029                 if (td->error)
2030                         exit_value++;
2031
2032                 done_secs += mtime_since_now(&td->epoch) / 1000;
2033                 profile_td_exit(td);
2034         }
2035
2036         if (*nr_running == cputhreads && !pending && realthreads)
2037                 fio_terminate_threads(TERMINATE_ALL);
2038 }
2039
2040 static bool __check_trigger_file(void)
2041 {
2042         struct stat sb;
2043
2044         if (!trigger_file)
2045                 return false;
2046
2047         if (stat(trigger_file, &sb))
2048                 return false;
2049
2050         if (unlink(trigger_file) < 0)
2051                 log_err("fio: failed to unlink %s: %s\n", trigger_file,
2052                                                         strerror(errno));
2053
2054         return true;
2055 }
2056
2057 static bool trigger_timedout(void)
2058 {
2059         if (trigger_timeout)
2060                 if (time_since_genesis() >= trigger_timeout) {
2061                         trigger_timeout = 0;
2062                         return true;
2063                 }
2064
2065         return false;
2066 }
2067
2068 void exec_trigger(const char *cmd)
2069 {
2070         int ret;
2071
2072         if (!cmd || cmd[0] == '\0')
2073                 return;
2074
2075         ret = system(cmd);
2076         if (ret == -1)
2077                 log_err("fio: failed executing %s trigger\n", cmd);
2078 }
2079
2080 void check_trigger_file(void)
2081 {
2082         if (__check_trigger_file() || trigger_timedout()) {
2083                 if (nr_clients)
2084                         fio_clients_send_trigger(trigger_remote_cmd);
2085                 else {
2086                         verify_save_state(IO_LIST_ALL);
2087                         fio_terminate_threads(TERMINATE_ALL);
2088                         exec_trigger(trigger_cmd);
2089                 }
2090         }
2091 }
2092
2093 static int fio_verify_load_state(struct thread_data *td)
2094 {
2095         int ret;
2096
2097         if (!td->o.verify_state)
2098                 return 0;
2099
2100         if (is_backend) {
2101                 void *data;
2102
2103                 ret = fio_server_get_verify_state(td->o.name,
2104                                         td->thread_number - 1, &data);
2105                 if (!ret)
2106                         verify_assign_state(td, data);
2107         } else
2108                 ret = verify_load_state(td, "local");
2109
2110         return ret;
2111 }
2112
2113 static void do_usleep(unsigned int usecs)
2114 {
2115         check_for_running_stats();
2116         check_trigger_file();
2117         usleep(usecs);
2118 }
2119
2120 static bool check_mount_writes(struct thread_data *td)
2121 {
2122         struct fio_file *f;
2123         unsigned int i;
2124
2125         if (!td_write(td) || td->o.allow_mounted_write)
2126                 return false;
2127
2128         /*
2129          * If FIO_HAVE_CHARDEV_SIZE is defined, it's likely that chrdevs
2130          * are mkfs'd and mounted.
2131          */
2132         for_each_file(td, f, i) {
2133 #ifdef FIO_HAVE_CHARDEV_SIZE
2134                 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR)
2135 #else
2136                 if (f->filetype != FIO_TYPE_BLOCK)
2137 #endif
2138                         continue;
2139                 if (device_is_mounted(f->file_name))
2140                         goto mounted;
2141         }
2142
2143         return false;
2144 mounted:
2145         log_err("fio: %s appears mounted, and 'allow_mounted_write' isn't set. Aborting.\n", f->file_name);
2146         return true;
2147 }
2148
2149 static bool waitee_running(struct thread_data *me)
2150 {
2151         const char *waitee = me->o.wait_for;
2152         const char *self = me->o.name;
2153         struct thread_data *td;
2154         int i;
2155
2156         if (!waitee)
2157                 return false;
2158
2159         for_each_td(td, i) {
2160                 if (!strcmp(td->o.name, self) || strcmp(td->o.name, waitee))
2161                         continue;
2162
2163                 if (td->runstate < TD_EXITED) {
2164                         dprint(FD_PROCESS, "%s fenced by %s(%s)\n",
2165                                         self, td->o.name,
2166                                         runstate_to_name(td->runstate));
2167                         return true;
2168                 }
2169         }
2170
2171         dprint(FD_PROCESS, "%s: %s completed, can run\n", self, waitee);
2172         return false;
2173 }
2174
2175 /*
2176  * Main function for kicking off and reaping jobs, as needed.
2177  */
2178 static void run_threads(struct sk_out *sk_out)
2179 {
2180         struct thread_data *td;
2181         unsigned int i, todo, nr_running, nr_started;
2182         uint64_t m_rate, t_rate;
2183         uint64_t spent;
2184
2185         if (fio_gtod_offload && fio_start_gtod_thread())
2186                 return;
2187
2188         fio_idle_prof_init();
2189
2190         set_sig_handlers();
2191
2192         nr_thread = nr_process = 0;
2193         for_each_td(td, i) {
2194                 if (check_mount_writes(td))
2195                         return;
2196                 if (td->o.use_thread)
2197                         nr_thread++;
2198                 else
2199                         nr_process++;
2200         }
2201
2202         if (output_format & FIO_OUTPUT_NORMAL) {
2203                 log_info("Starting ");
2204                 if (nr_thread)
2205                         log_info("%d thread%s", nr_thread,
2206                                                 nr_thread > 1 ? "s" : "");
2207                 if (nr_process) {
2208                         if (nr_thread)
2209                                 log_info(" and ");
2210                         log_info("%d process%s", nr_process,
2211                                                 nr_process > 1 ? "es" : "");
2212                 }
2213                 log_info("\n");
2214                 log_info_flush();
2215         }
2216
2217         todo = thread_number;
2218         nr_running = 0;
2219         nr_started = 0;
2220         m_rate = t_rate = 0;
2221
2222         for_each_td(td, i) {
2223                 print_status_init(td->thread_number - 1);
2224
2225                 if (!td->o.create_serialize)
2226                         continue;
2227
2228                 if (fio_verify_load_state(td))
2229                         goto reap;
2230
2231                 /*
2232                  * do file setup here so it happens sequentially,
2233                  * we don't want X number of threads getting their
2234                  * client data interspersed on disk
2235                  */
2236                 if (setup_files(td)) {
2237 reap:
2238                         exit_value++;
2239                         if (td->error)
2240                                 log_err("fio: pid=%d, err=%d/%s\n",
2241                                         (int) td->pid, td->error, td->verror);
2242                         td_set_runstate(td, TD_REAPED);
2243                         todo--;
2244                 } else {
2245                         struct fio_file *f;
2246                         unsigned int j;
2247
2248                         /*
2249                          * for sharing to work, each job must always open
2250                          * its own files. so close them, if we opened them
2251                          * for creation
2252                          */
2253                         for_each_file(td, f, j) {
2254                                 if (fio_file_open(f))
2255                                         td_io_close_file(td, f);
2256                         }
2257                 }
2258         }
2259
2260         /* start idle threads before io threads start to run */
2261         fio_idle_prof_start();
2262
2263         set_genesis_time();
2264
2265         while (todo) {
2266                 struct thread_data *map[REAL_MAX_JOBS];
2267                 struct timespec this_start;
2268                 int this_jobs = 0, left;
2269                 struct fork_data *fd;
2270
2271                 /*
2272                  * create threads (TD_NOT_CREATED -> TD_CREATED)
2273                  */
2274                 for_each_td(td, i) {
2275                         if (td->runstate != TD_NOT_CREATED)
2276                                 continue;
2277
2278                         /*
2279                          * never got a chance to start, killed by other
2280                          * thread for some reason
2281                          */
2282                         if (td->terminate) {
2283                                 todo--;
2284                                 continue;
2285                         }
2286
2287                         if (td->o.start_delay) {
2288                                 spent = utime_since_genesis();
2289
2290                                 if (td->o.start_delay > spent)
2291                                         continue;
2292                         }
2293
2294                         if (td->o.stonewall && (nr_started || nr_running)) {
2295                                 dprint(FD_PROCESS, "%s: stonewall wait\n",
2296                                                         td->o.name);
2297                                 break;
2298                         }
2299
2300                         if (waitee_running(td)) {
2301                                 dprint(FD_PROCESS, "%s: waiting for %s\n",
2302                                                 td->o.name, td->o.wait_for);
2303                                 continue;
2304                         }
2305
2306                         init_disk_util(td);
2307
2308                         td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED);
2309                         td->update_rusage = 0;
2310
2311                         /*
2312                          * Set state to created. Thread will transition
2313                          * to TD_INITIALIZED when it's done setting up.
2314                          */
2315                         td_set_runstate(td, TD_CREATED);
2316                         map[this_jobs++] = td;
2317                         nr_started++;
2318
2319                         fd = calloc(1, sizeof(*fd));
2320                         fd->td = td;
2321                         fd->sk_out = sk_out;
2322
2323                         if (td->o.use_thread) {
2324                                 int ret;
2325
2326                                 dprint(FD_PROCESS, "will pthread_create\n");
2327                                 ret = pthread_create(&td->thread, NULL,
2328                                                         thread_main, fd);
2329                                 if (ret) {
2330                                         log_err("pthread_create: %s\n",
2331                                                         strerror(ret));
2332                                         free(fd);
2333                                         nr_started--;
2334                                         break;
2335                                 }
2336                                 fd = NULL;
2337                                 ret = pthread_detach(td->thread);
2338                                 if (ret)
2339                                         log_err("pthread_detach: %s",
2340                                                         strerror(ret));
2341                         } else {
2342                                 pid_t pid;
2343                                 dprint(FD_PROCESS, "will fork\n");
2344                                 pid = fork();
2345                                 if (!pid) {
2346                                         int ret;
2347
2348                                         ret = (int)(uintptr_t)thread_main(fd);
2349                                         _exit(ret);
2350                                 } else if (i == fio_debug_jobno)
2351                                         *fio_debug_jobp = pid;
2352                         }
2353                         dprint(FD_MUTEX, "wait on startup_mutex\n");
2354                         if (fio_mutex_down_timeout(startup_mutex, 10000)) {
2355                                 log_err("fio: job startup hung? exiting.\n");
2356                                 fio_terminate_threads(TERMINATE_ALL);
2357                                 fio_abort = 1;
2358                                 nr_started--;
2359                                 free(fd);
2360                                 break;
2361                         }
2362                         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
2363                 }
2364
2365                 /*
2366                  * Wait for the started threads to transition to
2367                  * TD_INITIALIZED.
2368                  */
2369                 fio_gettime(&this_start, NULL);
2370                 left = this_jobs;
2371                 while (left && !fio_abort) {
2372                         if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
2373                                 break;
2374
2375                         do_usleep(100000);
2376
2377                         for (i = 0; i < this_jobs; i++) {
2378                                 td = map[i];
2379                                 if (!td)
2380                                         continue;
2381                                 if (td->runstate == TD_INITIALIZED) {
2382                                         map[i] = NULL;
2383                                         left--;
2384                                 } else if (td->runstate >= TD_EXITED) {
2385                                         map[i] = NULL;
2386                                         left--;
2387                                         todo--;
2388                                         nr_running++; /* work-around... */
2389                                 }
2390                         }
2391                 }
2392
2393                 if (left) {
2394                         log_err("fio: %d job%s failed to start\n", left,
2395                                         left > 1 ? "s" : "");
2396                         for (i = 0; i < this_jobs; i++) {
2397                                 td = map[i];
2398                                 if (!td)
2399                                         continue;
2400                                 kill(td->pid, SIGTERM);
2401                         }
2402                         break;
2403                 }
2404
2405                 /*
2406                  * start created threads (TD_INITIALIZED -> TD_RUNNING).
2407                  */
2408                 for_each_td(td, i) {
2409                         if (td->runstate != TD_INITIALIZED)
2410                                 continue;
2411
2412                         if (in_ramp_time(td))
2413                                 td_set_runstate(td, TD_RAMP);
2414                         else
2415                                 td_set_runstate(td, TD_RUNNING);
2416                         nr_running++;
2417                         nr_started--;
2418                         m_rate += ddir_rw_sum(td->o.ratemin);
2419                         t_rate += ddir_rw_sum(td->o.rate);
2420                         todo--;
2421                         fio_mutex_up(td->mutex);
2422                 }
2423
2424                 reap_threads(&nr_running, &t_rate, &m_rate);
2425
2426                 if (todo)
2427                         do_usleep(100000);
2428         }
2429
2430         while (nr_running) {
2431                 reap_threads(&nr_running, &t_rate, &m_rate);
2432                 do_usleep(10000);
2433         }
2434
2435         fio_idle_prof_stop();
2436
2437         update_io_ticks();
2438 }
2439
2440 static void free_disk_util(void)
2441 {
2442         disk_util_prune_entries();
2443         helper_thread_destroy();
2444 }
2445
2446 int fio_backend(struct sk_out *sk_out)
2447 {
2448         struct thread_data *td;
2449         int i;
2450
2451         if (exec_profile) {
2452                 if (load_profile(exec_profile))
2453                         return 1;
2454                 free(exec_profile);
2455                 exec_profile = NULL;
2456         }
2457         if (!thread_number)
2458                 return 0;
2459
2460         if (write_bw_log) {
2461                 struct log_params p = {
2462                         .log_type = IO_LOG_TYPE_BW,
2463                 };
2464
2465                 setup_log(&agg_io_log[DDIR_READ], &p, "agg-read_bw.log");
2466                 setup_log(&agg_io_log[DDIR_WRITE], &p, "agg-write_bw.log");
2467                 setup_log(&agg_io_log[DDIR_TRIM], &p, "agg-trim_bw.log");
2468         }
2469
2470         startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
2471         if (startup_mutex == NULL)
2472                 return 1;
2473
2474         set_genesis_time();
2475         stat_init();
2476         helper_thread_create(startup_mutex, sk_out);
2477
2478         cgroup_list = smalloc(sizeof(*cgroup_list));
2479         INIT_FLIST_HEAD(cgroup_list);
2480
2481         run_threads(sk_out);
2482
2483         helper_thread_exit();
2484
2485         if (!fio_abort) {
2486                 __show_run_stats();
2487                 if (write_bw_log) {
2488                         for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2489                                 struct io_log *log = agg_io_log[i];
2490
2491                                 flush_log(log, false);
2492                                 free_log(log);
2493                         }
2494                 }
2495         }
2496
2497         for_each_td(td, i) {
2498                 steadystate_free(td);
2499                 fio_options_free(td);
2500                 if (td->rusage_sem) {
2501                         fio_mutex_remove(td->rusage_sem);
2502                         td->rusage_sem = NULL;
2503                 }
2504                 fio_mutex_remove(td->mutex);
2505                 td->mutex = NULL;
2506         }
2507
2508         free_disk_util();
2509         cgroup_kill(cgroup_list);
2510         sfree(cgroup_list);
2511         sfree(cgroup_mnt);
2512
2513         fio_mutex_remove(startup_mutex);
2514         stat_exit();
2515         return exit_value;
2516 }