Add CPU affinity support for DragonFlyBSD
[fio.git] / backend.c
1 /*
2  * fio - the flexible io tester
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  * Copyright (C) 2006-2012 Jens Axboe <axboe@kernel.dk>
6  *
7  * The license below covers all files distributed with fio unless otherwise
8  * noted in the file itself.
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to the Free Software
21  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  *
23  */
24 #include <unistd.h>
25 #include <fcntl.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <signal.h>
29 #include <time.h>
30 #include <locale.h>
31 #include <assert.h>
32 #include <time.h>
33 #include <inttypes.h>
34 #include <sys/stat.h>
35 #include <sys/wait.h>
36 #include <sys/ipc.h>
37 #include <sys/mman.h>
38 #include <math.h>
39
40 #include "fio.h"
41 #ifndef FIO_NO_HAVE_SHM_H
42 #include <sys/shm.h>
43 #endif
44 #include "hash.h"
45 #include "smalloc.h"
46 #include "verify.h"
47 #include "trim.h"
48 #include "diskutil.h"
49 #include "cgroup.h"
50 #include "profile.h"
51 #include "lib/rand.h"
52 #include "lib/memalign.h"
53 #include "server.h"
54 #include "lib/getrusage.h"
55 #include "idletime.h"
56 #include "err.h"
57 #include "workqueue.h"
58 #include "lib/mountcheck.h"
59 #include "rate-submit.h"
60 #include "helper_thread.h"
61
62 static struct fio_mutex *startup_mutex;
63 static struct flist_head *cgroup_list;
64 static char *cgroup_mnt;
65 static int exit_value;
66 static volatile int fio_abort;
67 static unsigned int nr_process = 0;
68 static unsigned int nr_thread = 0;
69
70 struct io_log *agg_io_log[DDIR_RWDIR_CNT];
71
72 int groupid = 0;
73 unsigned int thread_number = 0;
74 unsigned int stat_number = 0;
75 int shm_id = 0;
76 int temp_stall_ts;
77 unsigned long done_secs = 0;
78
79 #define PAGE_ALIGN(buf) \
80         (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask)
81
82 #define JOB_START_TIMEOUT       (5 * 1000)
83
84 static void sig_int(int sig)
85 {
86         if (threads) {
87                 if (is_backend)
88                         fio_server_got_signal(sig);
89                 else {
90                         log_info("\nfio: terminating on signal %d\n", sig);
91                         log_info_flush();
92                         exit_value = 128;
93                 }
94
95                 fio_terminate_threads(TERMINATE_ALL);
96         }
97 }
98
99 void sig_show_status(int sig)
100 {
101         show_running_run_stats();
102 }
103
104 static void set_sig_handlers(void)
105 {
106         struct sigaction act;
107
108         memset(&act, 0, sizeof(act));
109         act.sa_handler = sig_int;
110         act.sa_flags = SA_RESTART;
111         sigaction(SIGINT, &act, NULL);
112
113         memset(&act, 0, sizeof(act));
114         act.sa_handler = sig_int;
115         act.sa_flags = SA_RESTART;
116         sigaction(SIGTERM, &act, NULL);
117
118 /* Windows uses SIGBREAK as a quit signal from other applications */
119 #ifdef WIN32
120         memset(&act, 0, sizeof(act));
121         act.sa_handler = sig_int;
122         act.sa_flags = SA_RESTART;
123         sigaction(SIGBREAK, &act, NULL);
124 #endif
125
126         memset(&act, 0, sizeof(act));
127         act.sa_handler = sig_show_status;
128         act.sa_flags = SA_RESTART;
129         sigaction(SIGUSR1, &act, NULL);
130
131         if (is_backend) {
132                 memset(&act, 0, sizeof(act));
133                 act.sa_handler = sig_int;
134                 act.sa_flags = SA_RESTART;
135                 sigaction(SIGPIPE, &act, NULL);
136         }
137 }
138
139 /*
140  * Check if we are above the minimum rate given.
141  */
142 static bool __check_min_rate(struct thread_data *td, struct timeval *now,
143                              enum fio_ddir ddir)
144 {
145         unsigned long long bytes = 0;
146         unsigned long iops = 0;
147         unsigned long spent;
148         unsigned long rate;
149         unsigned int ratemin = 0;
150         unsigned int rate_iops = 0;
151         unsigned int rate_iops_min = 0;
152
153         assert(ddir_rw(ddir));
154
155         if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir])
156                 return false;
157
158         /*
159          * allow a 2 second settle period in the beginning
160          */
161         if (mtime_since(&td->start, now) < 2000)
162                 return false;
163
164         iops += td->this_io_blocks[ddir];
165         bytes += td->this_io_bytes[ddir];
166         ratemin += td->o.ratemin[ddir];
167         rate_iops += td->o.rate_iops[ddir];
168         rate_iops_min += td->o.rate_iops_min[ddir];
169
170         /*
171          * if rate blocks is set, sample is running
172          */
173         if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) {
174                 spent = mtime_since(&td->lastrate[ddir], now);
175                 if (spent < td->o.ratecycle)
176                         return false;
177
178                 if (td->o.rate[ddir] || td->o.ratemin[ddir]) {
179                         /*
180                          * check bandwidth specified rate
181                          */
182                         if (bytes < td->rate_bytes[ddir]) {
183                                 log_err("%s: min rate %u not met\n", td->o.name,
184                                                                 ratemin);
185                                 return true;
186                         } else {
187                                 if (spent)
188                                         rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
189                                 else
190                                         rate = 0;
191
192                                 if (rate < ratemin ||
193                                     bytes < td->rate_bytes[ddir]) {
194                                         log_err("%s: min rate %u not met, got"
195                                                 " %luKB/sec\n", td->o.name,
196                                                         ratemin, rate);
197                                         return true;
198                                 }
199                         }
200                 } else {
201                         /*
202                          * checks iops specified rate
203                          */
204                         if (iops < rate_iops) {
205                                 log_err("%s: min iops rate %u not met\n",
206                                                 td->o.name, rate_iops);
207                                 return true;
208                         } else {
209                                 if (spent)
210                                         rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
211                                 else
212                                         rate = 0;
213
214                                 if (rate < rate_iops_min ||
215                                     iops < td->rate_blocks[ddir]) {
216                                         log_err("%s: min iops rate %u not met,"
217                                                 " got %lu\n", td->o.name,
218                                                         rate_iops_min, rate);
219                                         return true;
220                                 }
221                         }
222                 }
223         }
224
225         td->rate_bytes[ddir] = bytes;
226         td->rate_blocks[ddir] = iops;
227         memcpy(&td->lastrate[ddir], now, sizeof(*now));
228         return false;
229 }
230
231 static bool check_min_rate(struct thread_data *td, struct timeval *now)
232 {
233         bool ret = false;
234
235         if (td->bytes_done[DDIR_READ])
236                 ret |= __check_min_rate(td, now, DDIR_READ);
237         if (td->bytes_done[DDIR_WRITE])
238                 ret |= __check_min_rate(td, now, DDIR_WRITE);
239         if (td->bytes_done[DDIR_TRIM])
240                 ret |= __check_min_rate(td, now, DDIR_TRIM);
241
242         return ret;
243 }
244
245 /*
246  * When job exits, we can cancel the in-flight IO if we are using async
247  * io. Attempt to do so.
248  */
249 static void cleanup_pending_aio(struct thread_data *td)
250 {
251         int r;
252
253         /*
254          * get immediately available events, if any
255          */
256         r = io_u_queued_complete(td, 0);
257         if (r < 0)
258                 return;
259
260         /*
261          * now cancel remaining active events
262          */
263         if (td->io_ops->cancel) {
264                 struct io_u *io_u;
265                 int i;
266
267                 io_u_qiter(&td->io_u_all, io_u, i) {
268                         if (io_u->flags & IO_U_F_FLIGHT) {
269                                 r = td->io_ops->cancel(td, io_u);
270                                 if (!r)
271                                         put_io_u(td, io_u);
272                         }
273                 }
274         }
275
276         if (td->cur_depth)
277                 r = io_u_queued_complete(td, td->cur_depth);
278 }
279
280 /*
281  * Helper to handle the final sync of a file. Works just like the normal
282  * io path, just does everything sync.
283  */
284 static bool fio_io_sync(struct thread_data *td, struct fio_file *f)
285 {
286         struct io_u *io_u = __get_io_u(td);
287         int ret;
288
289         if (!io_u)
290                 return true;
291
292         io_u->ddir = DDIR_SYNC;
293         io_u->file = f;
294
295         if (td_io_prep(td, io_u)) {
296                 put_io_u(td, io_u);
297                 return true;
298         }
299
300 requeue:
301         ret = td_io_queue(td, io_u);
302         if (ret < 0) {
303                 td_verror(td, io_u->error, "td_io_queue");
304                 put_io_u(td, io_u);
305                 return true;
306         } else if (ret == FIO_Q_QUEUED) {
307                 if (td_io_commit(td))
308                         return true;
309                 if (io_u_queued_complete(td, 1) < 0)
310                         return true;
311         } else if (ret == FIO_Q_COMPLETED) {
312                 if (io_u->error) {
313                         td_verror(td, io_u->error, "td_io_queue");
314                         return true;
315                 }
316
317                 if (io_u_sync_complete(td, io_u) < 0)
318                         return true;
319         } else if (ret == FIO_Q_BUSY) {
320                 if (td_io_commit(td))
321                         return true;
322                 goto requeue;
323         }
324
325         return false;
326 }
327
328 static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
329 {
330         int ret;
331
332         if (fio_file_open(f))
333                 return fio_io_sync(td, f);
334
335         if (td_io_open_file(td, f))
336                 return 1;
337
338         ret = fio_io_sync(td, f);
339         td_io_close_file(td, f);
340         return ret;
341 }
342
343 static inline void __update_tv_cache(struct thread_data *td)
344 {
345         fio_gettime(&td->tv_cache, NULL);
346 }
347
348 static inline void update_tv_cache(struct thread_data *td)
349 {
350         if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask)
351                 __update_tv_cache(td);
352 }
353
354 static inline bool runtime_exceeded(struct thread_data *td, struct timeval *t)
355 {
356         if (in_ramp_time(td))
357                 return false;
358         if (!td->o.timeout)
359                 return false;
360         if (utime_since(&td->epoch, t) >= td->o.timeout)
361                 return true;
362
363         return false;
364 }
365
366 /*
367  * We need to update the runtime consistently in ms, but keep a running
368  * tally of the current elapsed time in microseconds for sub millisecond
369  * updates.
370  */
371 static inline void update_runtime(struct thread_data *td,
372                                   unsigned long long *elapsed_us,
373                                   const enum fio_ddir ddir)
374 {
375         if (ddir == DDIR_WRITE && td_write(td) && td->o.verify_only)
376                 return;
377
378         td->ts.runtime[ddir] -= (elapsed_us[ddir] + 999) / 1000;
379         elapsed_us[ddir] += utime_since_now(&td->start);
380         td->ts.runtime[ddir] += (elapsed_us[ddir] + 999) / 1000;
381 }
382
383 static bool break_on_this_error(struct thread_data *td, enum fio_ddir ddir,
384                                 int *retptr)
385 {
386         int ret = *retptr;
387
388         if (ret < 0 || td->error) {
389                 int err = td->error;
390                 enum error_type_bit eb;
391
392                 if (ret < 0)
393                         err = -ret;
394
395                 eb = td_error_type(ddir, err);
396                 if (!(td->o.continue_on_error & (1 << eb)))
397                         return true;
398
399                 if (td_non_fatal_error(td, eb, err)) {
400                         /*
401                          * Continue with the I/Os in case of
402                          * a non fatal error.
403                          */
404                         update_error_count(td, err);
405                         td_clear_error(td);
406                         *retptr = 0;
407                         return false;
408                 } else if (td->o.fill_device && err == ENOSPC) {
409                         /*
410                          * We expect to hit this error if
411                          * fill_device option is set.
412                          */
413                         td_clear_error(td);
414                         fio_mark_td_terminate(td);
415                         return true;
416                 } else {
417                         /*
418                          * Stop the I/O in case of a fatal
419                          * error.
420                          */
421                         update_error_count(td, err);
422                         return true;
423                 }
424         }
425
426         return false;
427 }
428
429 static void check_update_rusage(struct thread_data *td)
430 {
431         if (td->update_rusage) {
432                 td->update_rusage = 0;
433                 update_rusage_stat(td);
434                 fio_mutex_up(td->rusage_sem);
435         }
436 }
437
438 static int wait_for_completions(struct thread_data *td, struct timeval *time)
439 {
440         const int full = queue_full(td);
441         int min_evts = 0;
442         int ret;
443
444         if (td->flags & TD_F_REGROW_LOGS) {
445                 ret = io_u_quiesce(td);
446                 regrow_logs(td);
447                 return ret;
448         }
449
450         /*
451          * if the queue is full, we MUST reap at least 1 event
452          */
453         min_evts = min(td->o.iodepth_batch_complete_min, td->cur_depth);
454         if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
455                 min_evts = 1;
456
457         if (time && (__should_check_rate(td, DDIR_READ) ||
458             __should_check_rate(td, DDIR_WRITE) ||
459             __should_check_rate(td, DDIR_TRIM)))
460                 fio_gettime(time, NULL);
461
462         do {
463                 ret = io_u_queued_complete(td, min_evts);
464                 if (ret < 0)
465                         break;
466         } while (full && (td->cur_depth > td->o.iodepth_low));
467
468         return ret;
469 }
470
471 int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret,
472                    enum fio_ddir ddir, uint64_t *bytes_issued, int from_verify,
473                    struct timeval *comp_time)
474 {
475         int ret2;
476
477         switch (*ret) {
478         case FIO_Q_COMPLETED:
479                 if (io_u->error) {
480                         *ret = -io_u->error;
481                         clear_io_u(td, io_u);
482                 } else if (io_u->resid) {
483                         int bytes = io_u->xfer_buflen - io_u->resid;
484                         struct fio_file *f = io_u->file;
485
486                         if (bytes_issued)
487                                 *bytes_issued += bytes;
488
489                         if (!from_verify)
490                                 trim_io_piece(td, io_u);
491
492                         /*
493                          * zero read, fail
494                          */
495                         if (!bytes) {
496                                 if (!from_verify)
497                                         unlog_io_piece(td, io_u);
498                                 td_verror(td, EIO, "full resid");
499                                 put_io_u(td, io_u);
500                                 break;
501                         }
502
503                         io_u->xfer_buflen = io_u->resid;
504                         io_u->xfer_buf += bytes;
505                         io_u->offset += bytes;
506
507                         if (ddir_rw(io_u->ddir))
508                                 td->ts.short_io_u[io_u->ddir]++;
509
510                         f = io_u->file;
511                         if (io_u->offset == f->real_file_size)
512                                 goto sync_done;
513
514                         requeue_io_u(td, &io_u);
515                 } else {
516 sync_done:
517                         if (comp_time && (__should_check_rate(td, DDIR_READ) ||
518                             __should_check_rate(td, DDIR_WRITE) ||
519                             __should_check_rate(td, DDIR_TRIM)))
520                                 fio_gettime(comp_time, NULL);
521
522                         *ret = io_u_sync_complete(td, io_u);
523                         if (*ret < 0)
524                                 break;
525                 }
526
527                 if (td->flags & TD_F_REGROW_LOGS)
528                         regrow_logs(td);
529
530                 /*
531                  * when doing I/O (not when verifying),
532                  * check for any errors that are to be ignored
533                  */
534                 if (!from_verify)
535                         break;
536
537                 return 0;
538         case FIO_Q_QUEUED:
539                 /*
540                  * if the engine doesn't have a commit hook,
541                  * the io_u is really queued. if it does have such
542                  * a hook, it has to call io_u_queued() itself.
543                  */
544                 if (td->io_ops->commit == NULL)
545                         io_u_queued(td, io_u);
546                 if (bytes_issued)
547                         *bytes_issued += io_u->xfer_buflen;
548                 break;
549         case FIO_Q_BUSY:
550                 if (!from_verify)
551                         unlog_io_piece(td, io_u);
552                 requeue_io_u(td, &io_u);
553                 ret2 = td_io_commit(td);
554                 if (ret2 < 0)
555                         *ret = ret2;
556                 break;
557         default:
558                 assert(*ret < 0);
559                 td_verror(td, -(*ret), "td_io_queue");
560                 break;
561         }
562
563         if (break_on_this_error(td, ddir, ret))
564                 return 1;
565
566         return 0;
567 }
568
569 static inline bool io_in_polling(struct thread_data *td)
570 {
571         return !td->o.iodepth_batch_complete_min &&
572                    !td->o.iodepth_batch_complete_max;
573 }
574
575 /*
576  * The main verify engine. Runs over the writes we previously submitted,
577  * reads the blocks back in, and checks the crc/md5 of the data.
578  */
579 static void do_verify(struct thread_data *td, uint64_t verify_bytes)
580 {
581         struct fio_file *f;
582         struct io_u *io_u;
583         int ret, min_events;
584         unsigned int i;
585
586         dprint(FD_VERIFY, "starting loop\n");
587
588         /*
589          * sync io first and invalidate cache, to make sure we really
590          * read from disk.
591          */
592         for_each_file(td, f, i) {
593                 if (!fio_file_open(f))
594                         continue;
595                 if (fio_io_sync(td, f))
596                         break;
597                 if (file_invalidate_cache(td, f))
598                         break;
599         }
600
601         check_update_rusage(td);
602
603         if (td->error)
604                 return;
605
606         /*
607          * verify_state needs to be reset before verification
608          * proceeds so that expected random seeds match actual
609          * random seeds in headers. The main loop will reset
610          * all random number generators if randrepeat is set.
611          */
612         if (!td->o.rand_repeatable)
613                 td_fill_verify_state_seed(td);
614
615         td_set_runstate(td, TD_VERIFYING);
616
617         io_u = NULL;
618         while (!td->terminate) {
619                 enum fio_ddir ddir;
620                 int full;
621
622                 update_tv_cache(td);
623                 check_update_rusage(td);
624
625                 if (runtime_exceeded(td, &td->tv_cache)) {
626                         __update_tv_cache(td);
627                         if (runtime_exceeded(td, &td->tv_cache)) {
628                                 fio_mark_td_terminate(td);
629                                 break;
630                         }
631                 }
632
633                 if (flow_threshold_exceeded(td))
634                         continue;
635
636                 if (!td->o.experimental_verify) {
637                         io_u = __get_io_u(td);
638                         if (!io_u)
639                                 break;
640
641                         if (get_next_verify(td, io_u)) {
642                                 put_io_u(td, io_u);
643                                 break;
644                         }
645
646                         if (td_io_prep(td, io_u)) {
647                                 put_io_u(td, io_u);
648                                 break;
649                         }
650                 } else {
651                         if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
652                                 break;
653
654                         while ((io_u = get_io_u(td)) != NULL) {
655                                 if (IS_ERR(io_u)) {
656                                         io_u = NULL;
657                                         ret = FIO_Q_BUSY;
658                                         goto reap;
659                                 }
660
661                                 /*
662                                  * We are only interested in the places where
663                                  * we wrote or trimmed IOs. Turn those into
664                                  * reads for verification purposes.
665                                  */
666                                 if (io_u->ddir == DDIR_READ) {
667                                         /*
668                                          * Pretend we issued it for rwmix
669                                          * accounting
670                                          */
671                                         td->io_issues[DDIR_READ]++;
672                                         put_io_u(td, io_u);
673                                         continue;
674                                 } else if (io_u->ddir == DDIR_TRIM) {
675                                         io_u->ddir = DDIR_READ;
676                                         io_u_set(io_u, IO_U_F_TRIMMED);
677                                         break;
678                                 } else if (io_u->ddir == DDIR_WRITE) {
679                                         io_u->ddir = DDIR_READ;
680                                         break;
681                                 } else {
682                                         put_io_u(td, io_u);
683                                         continue;
684                                 }
685                         }
686
687                         if (!io_u)
688                                 break;
689                 }
690
691                 if (verify_state_should_stop(td, io_u)) {
692                         put_io_u(td, io_u);
693                         break;
694                 }
695
696                 if (td->o.verify_async)
697                         io_u->end_io = verify_io_u_async;
698                 else
699                         io_u->end_io = verify_io_u;
700
701                 ddir = io_u->ddir;
702                 if (!td->o.disable_slat)
703                         fio_gettime(&io_u->start_time, NULL);
704
705                 ret = td_io_queue(td, io_u);
706
707                 if (io_queue_event(td, io_u, &ret, ddir, NULL, 1, NULL))
708                         break;
709
710                 /*
711                  * if we can queue more, do so. but check if there are
712                  * completed io_u's first. Note that we can get BUSY even
713                  * without IO queued, if the system is resource starved.
714                  */
715 reap:
716                 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
717                 if (full || io_in_polling(td))
718                         ret = wait_for_completions(td, NULL);
719
720                 if (ret < 0)
721                         break;
722         }
723
724         check_update_rusage(td);
725
726         if (!td->error) {
727                 min_events = td->cur_depth;
728
729                 if (min_events)
730                         ret = io_u_queued_complete(td, min_events);
731         } else
732                 cleanup_pending_aio(td);
733
734         td_set_runstate(td, TD_RUNNING);
735
736         dprint(FD_VERIFY, "exiting loop\n");
737 }
738
739 static bool exceeds_number_ios(struct thread_data *td)
740 {
741         unsigned long long number_ios;
742
743         if (!td->o.number_ios)
744                 return false;
745
746         number_ios = ddir_rw_sum(td->io_blocks);
747         number_ios += td->io_u_queued + td->io_u_in_flight;
748
749         return number_ios >= (td->o.number_ios * td->loops);
750 }
751
752 static bool io_issue_bytes_exceeded(struct thread_data *td)
753 {
754         unsigned long long bytes, limit;
755
756         if (td_rw(td))
757                 bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE];
758         else if (td_write(td))
759                 bytes = td->io_issue_bytes[DDIR_WRITE];
760         else if (td_read(td))
761                 bytes = td->io_issue_bytes[DDIR_READ];
762         else
763                 bytes = td->io_issue_bytes[DDIR_TRIM];
764
765         if (td->o.io_limit)
766                 limit = td->o.io_limit;
767         else
768                 limit = td->o.size;
769
770         limit *= td->loops;
771         return bytes >= limit || exceeds_number_ios(td);
772 }
773
774 static bool io_complete_bytes_exceeded(struct thread_data *td)
775 {
776         unsigned long long bytes, limit;
777
778         if (td_rw(td))
779                 bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE];
780         else if (td_write(td))
781                 bytes = td->this_io_bytes[DDIR_WRITE];
782         else if (td_read(td))
783                 bytes = td->this_io_bytes[DDIR_READ];
784         else
785                 bytes = td->this_io_bytes[DDIR_TRIM];
786
787         if (td->o.io_limit)
788                 limit = td->o.io_limit;
789         else
790                 limit = td->o.size;
791
792         limit *= td->loops;
793         return bytes >= limit || exceeds_number_ios(td);
794 }
795
796 /*
797  * used to calculate the next io time for rate control
798  *
799  */
800 static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
801 {
802         uint64_t secs, remainder, bps, bytes, iops;
803
804         assert(!(td->flags & TD_F_CHILD));
805         bytes = td->rate_io_issue_bytes[ddir];
806         bps = td->rate_bps[ddir];
807
808         if (td->o.rate_process == RATE_PROCESS_POISSON) {
809                 uint64_t val;
810                 iops = bps / td->o.bs[ddir];
811                 val = (int64_t) (1000000 / iops) *
812                                 -logf(__rand_0_1(&td->poisson_state));
813                 if (val) {
814                         dprint(FD_RATE, "poisson rate iops=%llu\n",
815                                         (unsigned long long) 1000000 / val);
816                 }
817                 td->last_usec += val;
818                 return td->last_usec;
819         } else if (bps) {
820                 secs = bytes / bps;
821                 remainder = bytes % bps;
822                 return remainder * 1000000 / bps + secs * 1000000;
823         }
824
825         return 0;
826 }
827
828 /*
829  * Main IO worker function. It retrieves io_u's to process and queues
830  * and reaps them, checking for rate and errors along the way.
831  *
832  * Returns number of bytes written and trimmed.
833  */
834 static void do_io(struct thread_data *td, uint64_t *bytes_done)
835 {
836         unsigned int i;
837         int ret = 0;
838         uint64_t total_bytes, bytes_issued = 0;
839
840         for (i = 0; i < DDIR_RWDIR_CNT; i++)
841                 bytes_done[i] = td->bytes_done[i];
842
843         if (in_ramp_time(td))
844                 td_set_runstate(td, TD_RAMP);
845         else
846                 td_set_runstate(td, TD_RUNNING);
847
848         lat_target_init(td);
849
850         total_bytes = td->o.size;
851         /*
852         * Allow random overwrite workloads to write up to io_limit
853         * before starting verification phase as 'size' doesn't apply.
854         */
855         if (td_write(td) && td_random(td) && td->o.norandommap)
856                 total_bytes = max(total_bytes, (uint64_t) td->o.io_limit);
857         /*
858          * If verify_backlog is enabled, we'll run the verify in this
859          * handler as well. For that case, we may need up to twice the
860          * amount of bytes.
861          */
862         if (td->o.verify != VERIFY_NONE &&
863            (td_write(td) && td->o.verify_backlog))
864                 total_bytes += td->o.size;
865
866         /* In trimwrite mode, each byte is trimmed and then written, so
867          * allow total_bytes to be twice as big */
868         if (td_trimwrite(td))
869                 total_bytes += td->total_io_size;
870
871         while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
872                 (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
873                 td->o.time_based) {
874                 struct timeval comp_time;
875                 struct io_u *io_u;
876                 int full;
877                 enum fio_ddir ddir;
878
879                 check_update_rusage(td);
880
881                 if (td->terminate || td->done)
882                         break;
883
884                 update_tv_cache(td);
885
886                 if (runtime_exceeded(td, &td->tv_cache)) {
887                         __update_tv_cache(td);
888                         if (runtime_exceeded(td, &td->tv_cache)) {
889                                 fio_mark_td_terminate(td);
890                                 break;
891                         }
892                 }
893
894                 if (flow_threshold_exceeded(td))
895                         continue;
896
897                 /*
898                  * Break if we exceeded the bytes. The exception is time
899                  * based runs, but we still need to break out of the loop
900                  * for those to run verification, if enabled.
901                  */
902                 if (bytes_issued >= total_bytes &&
903                     (!td->o.time_based ||
904                      (td->o.time_based && td->o.verify != VERIFY_NONE)))
905                         break;
906
907                 io_u = get_io_u(td);
908                 if (IS_ERR_OR_NULL(io_u)) {
909                         int err = PTR_ERR(io_u);
910
911                         io_u = NULL;
912                         if (err == -EBUSY) {
913                                 ret = FIO_Q_BUSY;
914                                 goto reap;
915                         }
916                         if (td->o.latency_target)
917                                 goto reap;
918                         break;
919                 }
920
921                 ddir = io_u->ddir;
922
923                 /*
924                  * Add verification end_io handler if:
925                  *      - Asked to verify (!td_rw(td))
926                  *      - Or the io_u is from our verify list (mixed write/ver)
927                  */
928                 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
929                     ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
930
931                         if (!td->o.verify_pattern_bytes) {
932                                 io_u->rand_seed = __rand(&td->verify_state);
933                                 if (sizeof(int) != sizeof(long *))
934                                         io_u->rand_seed *= __rand(&td->verify_state);
935                         }
936
937                         if (verify_state_should_stop(td, io_u)) {
938                                 put_io_u(td, io_u);
939                                 break;
940                         }
941
942                         if (td->o.verify_async)
943                                 io_u->end_io = verify_io_u_async;
944                         else
945                                 io_u->end_io = verify_io_u;
946                         td_set_runstate(td, TD_VERIFYING);
947                 } else if (in_ramp_time(td))
948                         td_set_runstate(td, TD_RAMP);
949                 else
950                         td_set_runstate(td, TD_RUNNING);
951
952                 /*
953                  * Always log IO before it's issued, so we know the specific
954                  * order of it. The logged unit will track when the IO has
955                  * completed.
956                  */
957                 if (td_write(td) && io_u->ddir == DDIR_WRITE &&
958                     td->o.do_verify &&
959                     td->o.verify != VERIFY_NONE &&
960                     !td->o.experimental_verify)
961                         log_io_piece(td, io_u);
962
963                 if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
964                         const unsigned long blen = io_u->xfer_buflen;
965                         const enum fio_ddir ddir = acct_ddir(io_u);
966
967                         if (td->error)
968                                 break;
969
970                         workqueue_enqueue(&td->io_wq, &io_u->work);
971                         ret = FIO_Q_QUEUED;
972
973                         if (ddir_rw(ddir)) {
974                                 td->io_issues[ddir]++;
975                                 td->io_issue_bytes[ddir] += blen;
976                                 td->rate_io_issue_bytes[ddir] += blen;
977                         }
978
979                         if (should_check_rate(td))
980                                 td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
981
982                 } else {
983                         ret = td_io_queue(td, io_u);
984
985                         if (should_check_rate(td))
986                                 td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
987
988                         if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 0, &comp_time))
989                                 break;
990
991                         /*
992                          * See if we need to complete some commands. Note that
993                          * we can get BUSY even without IO queued, if the
994                          * system is resource starved.
995                          */
996 reap:
997                         full = queue_full(td) ||
998                                 (ret == FIO_Q_BUSY && td->cur_depth);
999                         if (full || io_in_polling(td))
1000                                 ret = wait_for_completions(td, &comp_time);
1001                 }
1002                 if (ret < 0)
1003                         break;
1004                 if (!ddir_rw_sum(td->bytes_done) &&
1005                     !(td->io_ops->flags & FIO_NOIO))
1006                         continue;
1007
1008                 if (!in_ramp_time(td) && should_check_rate(td)) {
1009                         if (check_min_rate(td, &comp_time)) {
1010                                 if (exitall_on_terminate || td->o.exitall_error)
1011                                         fio_terminate_threads(td->groupid);
1012                                 td_verror(td, EIO, "check_min_rate");
1013                                 break;
1014                         }
1015                 }
1016                 if (!in_ramp_time(td) && td->o.latency_target)
1017                         lat_target_check(td);
1018
1019                 if (td->o.thinktime) {
1020                         unsigned long long b;
1021
1022                         b = ddir_rw_sum(td->io_blocks);
1023                         if (!(b % td->o.thinktime_blocks)) {
1024                                 int left;
1025
1026                                 io_u_quiesce(td);
1027
1028                                 if (td->o.thinktime_spin)
1029                                         usec_spin(td->o.thinktime_spin);
1030
1031                                 left = td->o.thinktime - td->o.thinktime_spin;
1032                                 if (left)
1033                                         usec_sleep(td, left);
1034                         }
1035                 }
1036         }
1037
1038         check_update_rusage(td);
1039
1040         if (td->trim_entries)
1041                 log_err("fio: %lu trim entries leaked?\n", td->trim_entries);
1042
1043         if (td->o.fill_device && td->error == ENOSPC) {
1044                 td->error = 0;
1045                 fio_mark_td_terminate(td);
1046         }
1047         if (!td->error) {
1048                 struct fio_file *f;
1049
1050                 if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
1051                         workqueue_flush(&td->io_wq);
1052                         i = 0;
1053                 } else
1054                         i = td->cur_depth;
1055
1056                 if (i) {
1057                         ret = io_u_queued_complete(td, i);
1058                         if (td->o.fill_device && td->error == ENOSPC)
1059                                 td->error = 0;
1060                 }
1061
1062                 if (should_fsync(td) && td->o.end_fsync) {
1063                         td_set_runstate(td, TD_FSYNCING);
1064
1065                         for_each_file(td, f, i) {
1066                                 if (!fio_file_fsync(td, f))
1067                                         continue;
1068
1069                                 log_err("fio: end_fsync failed for file %s\n",
1070                                                                 f->file_name);
1071                         }
1072                 }
1073         } else
1074                 cleanup_pending_aio(td);
1075
1076         /*
1077          * stop job if we failed doing any IO
1078          */
1079         if (!ddir_rw_sum(td->this_io_bytes))
1080                 td->done = 1;
1081
1082         for (i = 0; i < DDIR_RWDIR_CNT; i++)
1083                 bytes_done[i] = td->bytes_done[i] - bytes_done[i];
1084 }
1085
1086 static void free_file_completion_logging(struct thread_data *td)
1087 {
1088         struct fio_file *f;
1089         unsigned int i;
1090
1091         for_each_file(td, f, i) {
1092                 if (!f->last_write_comp)
1093                         break;
1094                 sfree(f->last_write_comp);
1095         }
1096 }
1097
1098 static int init_file_completion_logging(struct thread_data *td,
1099                                         unsigned int depth)
1100 {
1101         struct fio_file *f;
1102         unsigned int i;
1103
1104         if (td->o.verify == VERIFY_NONE || !td->o.verify_state_save)
1105                 return 0;
1106
1107         for_each_file(td, f, i) {
1108                 f->last_write_comp = scalloc(depth, sizeof(uint64_t));
1109                 if (!f->last_write_comp)
1110                         goto cleanup;
1111         }
1112
1113         return 0;
1114
1115 cleanup:
1116         free_file_completion_logging(td);
1117         log_err("fio: failed to alloc write comp data\n");
1118         return 1;
1119 }
1120
1121 static void cleanup_io_u(struct thread_data *td)
1122 {
1123         struct io_u *io_u;
1124
1125         while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) {
1126
1127                 if (td->io_ops->io_u_free)
1128                         td->io_ops->io_u_free(td, io_u);
1129
1130                 fio_memfree(io_u, sizeof(*io_u));
1131         }
1132
1133         free_io_mem(td);
1134
1135         io_u_rexit(&td->io_u_requeues);
1136         io_u_qexit(&td->io_u_freelist);
1137         io_u_qexit(&td->io_u_all);
1138
1139         free_file_completion_logging(td);
1140 }
1141
1142 static int init_io_u(struct thread_data *td)
1143 {
1144         struct io_u *io_u;
1145         unsigned int max_bs, min_write;
1146         int cl_align, i, max_units;
1147         int data_xfer = 1, err;
1148         char *p;
1149
1150         max_units = td->o.iodepth;
1151         max_bs = td_max_bs(td);
1152         min_write = td->o.min_bs[DDIR_WRITE];
1153         td->orig_buffer_size = (unsigned long long) max_bs
1154                                         * (unsigned long long) max_units;
1155
1156         if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td)))
1157                 data_xfer = 0;
1158
1159         err = 0;
1160         err += io_u_rinit(&td->io_u_requeues, td->o.iodepth);
1161         err += io_u_qinit(&td->io_u_freelist, td->o.iodepth);
1162         err += io_u_qinit(&td->io_u_all, td->o.iodepth);
1163
1164         if (err) {
1165                 log_err("fio: failed setting up IO queues\n");
1166                 return 1;
1167         }
1168
1169         /*
1170          * if we may later need to do address alignment, then add any
1171          * possible adjustment here so that we don't cause a buffer
1172          * overflow later. this adjustment may be too much if we get
1173          * lucky and the allocator gives us an aligned address.
1174          */
1175         if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
1176             (td->io_ops->flags & FIO_RAWIO))
1177                 td->orig_buffer_size += page_mask + td->o.mem_align;
1178
1179         if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
1180                 unsigned long bs;
1181
1182                 bs = td->orig_buffer_size + td->o.hugepage_size - 1;
1183                 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
1184         }
1185
1186         if (td->orig_buffer_size != (size_t) td->orig_buffer_size) {
1187                 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
1188                 return 1;
1189         }
1190
1191         if (data_xfer && allocate_io_mem(td))
1192                 return 1;
1193
1194         if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
1195             (td->io_ops->flags & FIO_RAWIO))
1196                 p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
1197         else
1198                 p = td->orig_buffer;
1199
1200         cl_align = os_cache_line_size();
1201
1202         for (i = 0; i < max_units; i++) {
1203                 void *ptr;
1204
1205                 if (td->terminate)
1206                         return 1;
1207
1208                 ptr = fio_memalign(cl_align, sizeof(*io_u));
1209                 if (!ptr) {
1210                         log_err("fio: unable to allocate aligned memory\n");
1211                         break;
1212                 }
1213
1214                 io_u = ptr;
1215                 memset(io_u, 0, sizeof(*io_u));
1216                 INIT_FLIST_HEAD(&io_u->verify_list);
1217                 dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
1218
1219                 if (data_xfer) {
1220                         io_u->buf = p;
1221                         dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
1222
1223                         if (td_write(td))
1224                                 io_u_fill_buffer(td, io_u, min_write, max_bs);
1225                         if (td_write(td) && td->o.verify_pattern_bytes) {
1226                                 /*
1227                                  * Fill the buffer with the pattern if we are
1228                                  * going to be doing writes.
1229                                  */
1230                                 fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
1231                         }
1232                 }
1233
1234                 io_u->index = i;
1235                 io_u->flags = IO_U_F_FREE;
1236                 io_u_qpush(&td->io_u_freelist, io_u);
1237
1238                 /*
1239                  * io_u never leaves this stack, used for iteration of all
1240                  * io_u buffers.
1241                  */
1242                 io_u_qpush(&td->io_u_all, io_u);
1243
1244                 if (td->io_ops->io_u_init) {
1245                         int ret = td->io_ops->io_u_init(td, io_u);
1246
1247                         if (ret) {
1248                                 log_err("fio: failed to init engine data: %d\n", ret);
1249                                 return 1;
1250                         }
1251                 }
1252
1253                 p += max_bs;
1254         }
1255
1256         if (init_file_completion_logging(td, max_units))
1257                 return 1;
1258
1259         return 0;
1260 }
1261
1262 static int switch_ioscheduler(struct thread_data *td)
1263 {
1264         char tmp[256], tmp2[128];
1265         FILE *f;
1266         int ret;
1267
1268         if (td->io_ops->flags & FIO_DISKLESSIO)
1269                 return 0;
1270
1271         sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
1272
1273         f = fopen(tmp, "r+");
1274         if (!f) {
1275                 if (errno == ENOENT) {
1276                         log_err("fio: os or kernel doesn't support IO scheduler"
1277                                 " switching\n");
1278                         return 0;
1279                 }
1280                 td_verror(td, errno, "fopen iosched");
1281                 return 1;
1282         }
1283
1284         /*
1285          * Set io scheduler.
1286          */
1287         ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
1288         if (ferror(f) || ret != 1) {
1289                 td_verror(td, errno, "fwrite");
1290                 fclose(f);
1291                 return 1;
1292         }
1293
1294         rewind(f);
1295
1296         /*
1297          * Read back and check that the selected scheduler is now the default.
1298          */
1299         memset(tmp, 0, sizeof(tmp));
1300         ret = fread(tmp, sizeof(tmp), 1, f);
1301         if (ferror(f) || ret < 0) {
1302                 td_verror(td, errno, "fread");
1303                 fclose(f);
1304                 return 1;
1305         }
1306         /*
1307          * either a list of io schedulers or "none\n" is expected.
1308          */
1309         tmp[strlen(tmp) - 1] = '\0';
1310
1311
1312         sprintf(tmp2, "[%s]", td->o.ioscheduler);
1313         if (!strstr(tmp, tmp2)) {
1314                 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
1315                 td_verror(td, EINVAL, "iosched_switch");
1316                 fclose(f);
1317                 return 1;
1318         }
1319
1320         fclose(f);
1321         return 0;
1322 }
1323
1324 static bool keep_running(struct thread_data *td)
1325 {
1326         unsigned long long limit;
1327
1328         if (td->done)
1329                 return false;
1330         if (td->o.time_based)
1331                 return true;
1332         if (td->o.loops) {
1333                 td->o.loops--;
1334                 return true;
1335         }
1336         if (exceeds_number_ios(td))
1337                 return false;
1338
1339         if (td->o.io_limit)
1340                 limit = td->o.io_limit;
1341         else
1342                 limit = td->o.size;
1343
1344         if (limit != -1ULL && ddir_rw_sum(td->io_bytes) < limit) {
1345                 uint64_t diff;
1346
1347                 /*
1348                  * If the difference is less than the minimum IO size, we
1349                  * are done.
1350                  */
1351                 diff = limit - ddir_rw_sum(td->io_bytes);
1352                 if (diff < td_max_bs(td))
1353                         return false;
1354
1355                 if (fio_files_done(td) && !td->o.io_limit)
1356                         return false;
1357
1358                 return true;
1359         }
1360
1361         return false;
1362 }
1363
1364 static int exec_string(struct thread_options *o, const char *string, const char *mode)
1365 {
1366         size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
1367         int ret;
1368         char *str;
1369
1370         str = malloc(newlen);
1371         sprintf(str, "%s &> %s.%s.txt", string, o->name, mode);
1372
1373         log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode);
1374         ret = system(str);
1375         if (ret == -1)
1376                 log_err("fio: exec of cmd <%s> failed\n", str);
1377
1378         free(str);
1379         return ret;
1380 }
1381
1382 /*
1383  * Dry run to compute correct state of numberio for verification.
1384  */
1385 static uint64_t do_dry_run(struct thread_data *td)
1386 {
1387         td_set_runstate(td, TD_RUNNING);
1388
1389         while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
1390                 (!flist_empty(&td->trim_list)) || !io_complete_bytes_exceeded(td)) {
1391                 struct io_u *io_u;
1392                 int ret;
1393
1394                 if (td->terminate || td->done)
1395                         break;
1396
1397                 io_u = get_io_u(td);
1398                 if (!io_u)
1399                         break;
1400
1401                 io_u_set(io_u, IO_U_F_FLIGHT);
1402                 io_u->error = 0;
1403                 io_u->resid = 0;
1404                 if (ddir_rw(acct_ddir(io_u)))
1405                         td->io_issues[acct_ddir(io_u)]++;
1406                 if (ddir_rw(io_u->ddir)) {
1407                         io_u_mark_depth(td, 1);
1408                         td->ts.total_io_u[io_u->ddir]++;
1409                 }
1410
1411                 if (td_write(td) && io_u->ddir == DDIR_WRITE &&
1412                     td->o.do_verify &&
1413                     td->o.verify != VERIFY_NONE &&
1414                     !td->o.experimental_verify)
1415                         log_io_piece(td, io_u);
1416
1417                 ret = io_u_sync_complete(td, io_u);
1418                 (void) ret;
1419         }
1420
1421         return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
1422 }
1423
1424 struct fork_data {
1425         struct thread_data *td;
1426         struct sk_out *sk_out;
1427 };
1428
1429 /*
1430  * Entry point for the thread based jobs. The process based jobs end up
1431  * here as well, after a little setup.
1432  */
1433 static void *thread_main(void *data)
1434 {
1435         struct fork_data *fd = data;
1436         unsigned long long elapsed_us[DDIR_RWDIR_CNT] = { 0, };
1437         struct thread_data *td = fd->td;
1438         struct thread_options *o = &td->o;
1439         struct sk_out *sk_out = fd->sk_out;
1440         int clear_state;
1441         int ret;
1442
1443         sk_out_assign(sk_out);
1444         free(fd);
1445
1446         if (!o->use_thread) {
1447                 setsid();
1448                 td->pid = getpid();
1449         } else
1450                 td->pid = gettid();
1451
1452         fio_local_clock_init(o->use_thread);
1453
1454         dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
1455
1456         if (is_backend)
1457                 fio_server_send_start(td);
1458
1459         INIT_FLIST_HEAD(&td->io_log_list);
1460         INIT_FLIST_HEAD(&td->io_hist_list);
1461         INIT_FLIST_HEAD(&td->verify_list);
1462         INIT_FLIST_HEAD(&td->trim_list);
1463         INIT_FLIST_HEAD(&td->next_rand_list);
1464         td->io_hist_tree = RB_ROOT;
1465
1466         ret = mutex_cond_init_pshared(&td->io_u_lock, &td->free_cond);
1467         if (ret) {
1468                 td_verror(td, ret, "mutex_cond_init_pshared");
1469                 goto err;
1470         }
1471         ret = cond_init_pshared(&td->verify_cond);
1472         if (ret) {
1473                 td_verror(td, ret, "mutex_cond_pshared");
1474                 goto err;
1475         }
1476
1477         td_set_runstate(td, TD_INITIALIZED);
1478         dprint(FD_MUTEX, "up startup_mutex\n");
1479         fio_mutex_up(startup_mutex);
1480         dprint(FD_MUTEX, "wait on td->mutex\n");
1481         fio_mutex_down(td->mutex);
1482         dprint(FD_MUTEX, "done waiting on td->mutex\n");
1483
1484         /*
1485          * A new gid requires privilege, so we need to do this before setting
1486          * the uid.
1487          */
1488         if (o->gid != -1U && setgid(o->gid)) {
1489                 td_verror(td, errno, "setgid");
1490                 goto err;
1491         }
1492         if (o->uid != -1U && setuid(o->uid)) {
1493                 td_verror(td, errno, "setuid");
1494                 goto err;
1495         }
1496
1497         /*
1498          * Do this early, we don't want the compress threads to be limited
1499          * to the same CPUs as the IO workers. So do this before we set
1500          * any potential CPU affinity
1501          */
1502         if (iolog_compress_init(td, sk_out))
1503                 goto err;
1504
1505         /*
1506          * If we have a gettimeofday() thread, make sure we exclude that
1507          * thread from this job
1508          */
1509         if (o->gtod_cpu)
1510                 fio_cpu_clear(&o->cpumask, o->gtod_cpu);
1511
1512         /*
1513          * Set affinity first, in case it has an impact on the memory
1514          * allocations.
1515          */
1516         if (fio_option_is_set(o, cpumask)) {
1517                 if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
1518                         ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
1519                         if (!ret) {
1520                                 log_err("fio: no CPUs set\n");
1521                                 log_err("fio: Try increasing number of available CPUs\n");
1522                                 td_verror(td, EINVAL, "cpus_split");
1523                                 goto err;
1524                         }
1525                 }
1526                 ret = fio_setaffinity(td->pid, o->cpumask);
1527                 if (ret == -1) {
1528                         td_verror(td, errno, "cpu_set_affinity");
1529                         goto err;
1530                 }
1531         }
1532
1533 #ifdef CONFIG_LIBNUMA
1534         /* numa node setup */
1535         if (fio_option_is_set(o, numa_cpunodes) ||
1536             fio_option_is_set(o, numa_memnodes)) {
1537                 struct bitmask *mask;
1538
1539                 if (numa_available() < 0) {
1540                         td_verror(td, errno, "Does not support NUMA API\n");
1541                         goto err;
1542                 }
1543
1544                 if (fio_option_is_set(o, numa_cpunodes)) {
1545                         mask = numa_parse_nodestring(o->numa_cpunodes);
1546                         ret = numa_run_on_node_mask(mask);
1547                         numa_free_nodemask(mask);
1548                         if (ret == -1) {
1549                                 td_verror(td, errno, \
1550                                         "numa_run_on_node_mask failed\n");
1551                                 goto err;
1552                         }
1553                 }
1554
1555                 if (fio_option_is_set(o, numa_memnodes)) {
1556                         mask = NULL;
1557                         if (o->numa_memnodes)
1558                                 mask = numa_parse_nodestring(o->numa_memnodes);
1559
1560                         switch (o->numa_mem_mode) {
1561                         case MPOL_INTERLEAVE:
1562                                 numa_set_interleave_mask(mask);
1563                                 break;
1564                         case MPOL_BIND:
1565                                 numa_set_membind(mask);
1566                                 break;
1567                         case MPOL_LOCAL:
1568                                 numa_set_localalloc();
1569                                 break;
1570                         case MPOL_PREFERRED:
1571                                 numa_set_preferred(o->numa_mem_prefer_node);
1572                                 break;
1573                         case MPOL_DEFAULT:
1574                         default:
1575                                 break;
1576                         }
1577
1578                         if (mask)
1579                                 numa_free_nodemask(mask);
1580
1581                 }
1582         }
1583 #endif
1584
1585         if (fio_pin_memory(td))
1586                 goto err;
1587
1588         /*
1589          * May alter parameters that init_io_u() will use, so we need to
1590          * do this first.
1591          */
1592         if (init_iolog(td))
1593                 goto err;
1594
1595         if (init_io_u(td))
1596                 goto err;
1597
1598         if (o->verify_async && verify_async_init(td))
1599                 goto err;
1600
1601         if (fio_option_is_set(o, ioprio) ||
1602             fio_option_is_set(o, ioprio_class)) {
1603                 ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
1604                 if (ret == -1) {
1605                         td_verror(td, errno, "ioprio_set");
1606                         goto err;
1607                 }
1608         }
1609
1610         if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt))
1611                 goto err;
1612
1613         errno = 0;
1614         if (nice(o->nice) == -1 && errno != 0) {
1615                 td_verror(td, errno, "nice");
1616                 goto err;
1617         }
1618
1619         if (o->ioscheduler && switch_ioscheduler(td))
1620                 goto err;
1621
1622         if (!o->create_serialize && setup_files(td))
1623                 goto err;
1624
1625         if (td_io_init(td))
1626                 goto err;
1627
1628         if (init_random_map(td))
1629                 goto err;
1630
1631         if (o->exec_prerun && exec_string(o, o->exec_prerun, (const char *)"prerun"))
1632                 goto err;
1633
1634         if (o->pre_read) {
1635                 if (pre_read_files(td) < 0)
1636                         goto err;
1637         }
1638
1639         fio_verify_init(td);
1640
1641         if (rate_submit_init(td, sk_out))
1642                 goto err;
1643
1644         fio_gettime(&td->epoch, NULL);
1645         fio_getrusage(&td->ru_start);
1646         memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
1647         memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
1648
1649         if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
1650                         o->ratemin[DDIR_TRIM]) {
1651                 memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
1652                                         sizeof(td->bw_sample_time));
1653                 memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
1654                                         sizeof(td->bw_sample_time));
1655                 memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
1656                                         sizeof(td->bw_sample_time));
1657         }
1658
1659         clear_state = 0;
1660         while (keep_running(td)) {
1661                 uint64_t verify_bytes;
1662
1663                 fio_gettime(&td->start, NULL);
1664                 memcpy(&td->tv_cache, &td->start, sizeof(td->start));
1665
1666                 if (clear_state)
1667                         clear_io_state(td, 0);
1668
1669                 prune_io_piece_log(td);
1670
1671                 if (td->o.verify_only && (td_write(td) || td_rw(td)))
1672                         verify_bytes = do_dry_run(td);
1673                 else {
1674                         uint64_t bytes_done[DDIR_RWDIR_CNT];
1675
1676                         do_io(td, bytes_done);
1677
1678                         if (!ddir_rw_sum(bytes_done)) {
1679                                 fio_mark_td_terminate(td);
1680                                 verify_bytes = 0;
1681                         } else {
1682                                 verify_bytes = bytes_done[DDIR_WRITE] +
1683                                                 bytes_done[DDIR_TRIM];
1684                         }
1685                 }
1686
1687                 clear_state = 1;
1688
1689                 /*
1690                  * Make sure we've successfully updated the rusage stats
1691                  * before waiting on the stat mutex. Otherwise we could have
1692                  * the stat thread holding stat mutex and waiting for
1693                  * the rusage_sem, which would never get upped because
1694                  * this thread is waiting for the stat mutex.
1695                  */
1696                 check_update_rusage(td);
1697
1698                 fio_mutex_down(stat_mutex);
1699                 if (td_read(td) && td->io_bytes[DDIR_READ])
1700                         update_runtime(td, elapsed_us, DDIR_READ);
1701                 if (td_write(td) && td->io_bytes[DDIR_WRITE])
1702                         update_runtime(td, elapsed_us, DDIR_WRITE);
1703                 if (td_trim(td) && td->io_bytes[DDIR_TRIM])
1704                         update_runtime(td, elapsed_us, DDIR_TRIM);
1705                 fio_gettime(&td->start, NULL);
1706                 fio_mutex_up(stat_mutex);
1707
1708                 if (td->error || td->terminate)
1709                         break;
1710
1711                 if (!o->do_verify ||
1712                     o->verify == VERIFY_NONE ||
1713                     (td->io_ops->flags & FIO_UNIDIR))
1714                         continue;
1715
1716                 clear_io_state(td, 0);
1717
1718                 fio_gettime(&td->start, NULL);
1719
1720                 do_verify(td, verify_bytes);
1721
1722                 /*
1723                  * See comment further up for why this is done here.
1724                  */
1725                 check_update_rusage(td);
1726
1727                 fio_mutex_down(stat_mutex);
1728                 update_runtime(td, elapsed_us, DDIR_READ);
1729                 fio_gettime(&td->start, NULL);
1730                 fio_mutex_up(stat_mutex);
1731
1732                 if (td->error || td->terminate)
1733                         break;
1734         }
1735
1736         td_set_runstate(td, TD_FINISHING);
1737
1738         update_rusage_stat(td);
1739         td->ts.total_run_time = mtime_since_now(&td->epoch);
1740         td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1741         td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1742         td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1743
1744         if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
1745             (td->o.verify != VERIFY_NONE && td_write(td)))
1746                 verify_save_state(td->thread_number);
1747
1748         fio_unpin_memory(td);
1749
1750         td_writeout_logs(td, true);
1751
1752         iolog_compress_exit(td);
1753         rate_submit_exit(td);
1754
1755         if (o->exec_postrun)
1756                 exec_string(o, o->exec_postrun, (const char *)"postrun");
1757
1758         if (exitall_on_terminate || (o->exitall_error && td->error))
1759                 fio_terminate_threads(td->groupid);
1760
1761 err:
1762         if (td->error)
1763                 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error,
1764                                                         td->verror);
1765
1766         if (o->verify_async)
1767                 verify_async_exit(td);
1768
1769         close_and_free_files(td);
1770         cleanup_io_u(td);
1771         close_ioengine(td);
1772         cgroup_shutdown(td, &cgroup_mnt);
1773         verify_free_state(td);
1774
1775         if (td->zone_state_index) {
1776                 int i;
1777
1778                 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1779                         free(td->zone_state_index[i]);
1780                 free(td->zone_state_index);
1781                 td->zone_state_index = NULL;
1782         }
1783
1784         if (fio_option_is_set(o, cpumask)) {
1785                 ret = fio_cpuset_exit(&o->cpumask);
1786                 if (ret)
1787                         td_verror(td, ret, "fio_cpuset_exit");
1788         }
1789
1790         /*
1791          * do this very late, it will log file closing as well
1792          */
1793         if (o->write_iolog_file)
1794                 write_iolog_close(td);
1795
1796         fio_mutex_remove(td->mutex);
1797         td->mutex = NULL;
1798
1799         td_set_runstate(td, TD_EXITED);
1800
1801         /*
1802          * Do this last after setting our runstate to exited, so we
1803          * know that the stat thread is signaled.
1804          */
1805         check_update_rusage(td);
1806
1807         sk_out_drop();
1808         return (void *) (uintptr_t) td->error;
1809 }
1810
1811
1812 /*
1813  * We cannot pass the td data into a forked process, so attach the td and
1814  * pass it to the thread worker.
1815  */
1816 static int fork_main(struct sk_out *sk_out, int shmid, int offset)
1817 {
1818         struct fork_data *fd;
1819         void *data, *ret;
1820
1821 #if !defined(__hpux) && !defined(CONFIG_NO_SHM)
1822         data = shmat(shmid, NULL, 0);
1823         if (data == (void *) -1) {
1824                 int __err = errno;
1825
1826                 perror("shmat");
1827                 return __err;
1828         }
1829 #else
1830         /*
1831          * HP-UX inherits shm mappings?
1832          */
1833         data = threads;
1834 #endif
1835
1836         fd = calloc(1, sizeof(*fd));
1837         fd->td = data + offset * sizeof(struct thread_data);
1838         fd->sk_out = sk_out;
1839         ret = thread_main(fd);
1840         shmdt(data);
1841         return (int) (uintptr_t) ret;
1842 }
1843
1844 static void dump_td_info(struct thread_data *td)
1845 {
1846         log_err("fio: job '%s' (state=%d) hasn't exited in %lu seconds, it "
1847                 "appears to be stuck. Doing forceful exit of this job.\n",
1848                         td->o.name, td->runstate,
1849                         (unsigned long) time_since_now(&td->terminate_time));
1850 }
1851
1852 /*
1853  * Run over the job map and reap the threads that have exited, if any.
1854  */
1855 static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
1856                          unsigned int *m_rate)
1857 {
1858         struct thread_data *td;
1859         unsigned int cputhreads, realthreads, pending;
1860         int i, status, ret;
1861
1862         /*
1863          * reap exited threads (TD_EXITED -> TD_REAPED)
1864          */
1865         realthreads = pending = cputhreads = 0;
1866         for_each_td(td, i) {
1867                 int flags = 0;
1868
1869                 /*
1870                  * ->io_ops is NULL for a thread that has closed its
1871                  * io engine
1872                  */
1873                 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
1874                         cputhreads++;
1875                 else
1876                         realthreads++;
1877
1878                 if (!td->pid) {
1879                         pending++;
1880                         continue;
1881                 }
1882                 if (td->runstate == TD_REAPED)
1883                         continue;
1884                 if (td->o.use_thread) {
1885                         if (td->runstate == TD_EXITED) {
1886                                 td_set_runstate(td, TD_REAPED);
1887                                 goto reaped;
1888                         }
1889                         continue;
1890                 }
1891
1892                 flags = WNOHANG;
1893                 if (td->runstate == TD_EXITED)
1894                         flags = 0;
1895
1896                 /*
1897                  * check if someone quit or got killed in an unusual way
1898                  */
1899                 ret = waitpid(td->pid, &status, flags);
1900                 if (ret < 0) {
1901                         if (errno == ECHILD) {
1902                                 log_err("fio: pid=%d disappeared %d\n",
1903                                                 (int) td->pid, td->runstate);
1904                                 td->sig = ECHILD;
1905                                 td_set_runstate(td, TD_REAPED);
1906                                 goto reaped;
1907                         }
1908                         perror("waitpid");
1909                 } else if (ret == td->pid) {
1910                         if (WIFSIGNALED(status)) {
1911                                 int sig = WTERMSIG(status);
1912
1913                                 if (sig != SIGTERM && sig != SIGUSR2)
1914                                         log_err("fio: pid=%d, got signal=%d\n",
1915                                                         (int) td->pid, sig);
1916                                 td->sig = sig;
1917                                 td_set_runstate(td, TD_REAPED);
1918                                 goto reaped;
1919                         }
1920                         if (WIFEXITED(status)) {
1921                                 if (WEXITSTATUS(status) && !td->error)
1922                                         td->error = WEXITSTATUS(status);
1923
1924                                 td_set_runstate(td, TD_REAPED);
1925                                 goto reaped;
1926                         }
1927                 }
1928
1929                 /*
1930                  * If the job is stuck, do a forceful timeout of it and
1931                  * move on.
1932                  */
1933                 if (td->terminate &&
1934                     td->runstate < TD_FSYNCING &&
1935                     time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) {
1936                         dump_td_info(td);
1937                         td_set_runstate(td, TD_REAPED);
1938                         goto reaped;
1939                 }
1940
1941                 /*
1942                  * thread is not dead, continue
1943                  */
1944                 pending++;
1945                 continue;
1946 reaped:
1947                 (*nr_running)--;
1948                 (*m_rate) -= ddir_rw_sum(td->o.ratemin);
1949                 (*t_rate) -= ddir_rw_sum(td->o.rate);
1950                 if (!td->pid)
1951                         pending--;
1952
1953                 if (td->error)
1954                         exit_value++;
1955
1956                 done_secs += mtime_since_now(&td->epoch) / 1000;
1957                 profile_td_exit(td);
1958         }
1959
1960         if (*nr_running == cputhreads && !pending && realthreads)
1961                 fio_terminate_threads(TERMINATE_ALL);
1962 }
1963
1964 static bool __check_trigger_file(void)
1965 {
1966         struct stat sb;
1967
1968         if (!trigger_file)
1969                 return false;
1970
1971         if (stat(trigger_file, &sb))
1972                 return false;
1973
1974         if (unlink(trigger_file) < 0)
1975                 log_err("fio: failed to unlink %s: %s\n", trigger_file,
1976                                                         strerror(errno));
1977
1978         return true;
1979 }
1980
1981 static bool trigger_timedout(void)
1982 {
1983         if (trigger_timeout)
1984                 return time_since_genesis() >= trigger_timeout;
1985
1986         return false;
1987 }
1988
1989 void exec_trigger(const char *cmd)
1990 {
1991         int ret;
1992
1993         if (!cmd)
1994                 return;
1995
1996         ret = system(cmd);
1997         if (ret == -1)
1998                 log_err("fio: failed executing %s trigger\n", cmd);
1999 }
2000
2001 void check_trigger_file(void)
2002 {
2003         if (__check_trigger_file() || trigger_timedout()) {
2004                 if (nr_clients)
2005                         fio_clients_send_trigger(trigger_remote_cmd);
2006                 else {
2007                         verify_save_state(IO_LIST_ALL);
2008                         fio_terminate_threads(TERMINATE_ALL);
2009                         exec_trigger(trigger_cmd);
2010                 }
2011         }
2012 }
2013
2014 static int fio_verify_load_state(struct thread_data *td)
2015 {
2016         int ret;
2017
2018         if (!td->o.verify_state)
2019                 return 0;
2020
2021         if (is_backend) {
2022                 void *data;
2023
2024                 ret = fio_server_get_verify_state(td->o.name,
2025                                         td->thread_number - 1, &data);
2026                 if (!ret)
2027                         verify_assign_state(td, data);
2028         } else
2029                 ret = verify_load_state(td, "local");
2030
2031         return ret;
2032 }
2033
2034 static void do_usleep(unsigned int usecs)
2035 {
2036         check_for_running_stats();
2037         check_trigger_file();
2038         usleep(usecs);
2039 }
2040
2041 static bool check_mount_writes(struct thread_data *td)
2042 {
2043         struct fio_file *f;
2044         unsigned int i;
2045
2046         if (!td_write(td) || td->o.allow_mounted_write)
2047                 return false;
2048
2049         for_each_file(td, f, i) {
2050                 if (f->filetype != FIO_TYPE_BD)
2051                         continue;
2052                 if (device_is_mounted(f->file_name))
2053                         goto mounted;
2054         }
2055
2056         return false;
2057 mounted:
2058         log_err("fio: %s appears mounted, and 'allow_mounted_write' isn't set. Aborting.", f->file_name);
2059         return true;
2060 }
2061
2062 static bool waitee_running(struct thread_data *me)
2063 {
2064         const char *waitee = me->o.wait_for;
2065         const char *self = me->o.name;
2066         struct thread_data *td;
2067         int i;
2068
2069         if (!waitee)
2070                 return false;
2071
2072         for_each_td(td, i) {
2073                 if (!strcmp(td->o.name, self) || strcmp(td->o.name, waitee))
2074                         continue;
2075
2076                 if (td->runstate < TD_EXITED) {
2077                         dprint(FD_PROCESS, "%s fenced by %s(%s)\n",
2078                                         self, td->o.name,
2079                                         runstate_to_name(td->runstate));
2080                         return true;
2081                 }
2082         }
2083
2084         dprint(FD_PROCESS, "%s: %s completed, can run\n", self, waitee);
2085         return false;
2086 }
2087
2088 /*
2089  * Main function for kicking off and reaping jobs, as needed.
2090  */
2091 static void run_threads(struct sk_out *sk_out)
2092 {
2093         struct thread_data *td;
2094         unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
2095         uint64_t spent;
2096
2097         if (fio_gtod_offload && fio_start_gtod_thread())
2098                 return;
2099
2100         fio_idle_prof_init();
2101
2102         set_sig_handlers();
2103
2104         nr_thread = nr_process = 0;
2105         for_each_td(td, i) {
2106                 if (check_mount_writes(td))
2107                         return;
2108                 if (td->o.use_thread)
2109                         nr_thread++;
2110                 else
2111                         nr_process++;
2112         }
2113
2114         if (output_format & FIO_OUTPUT_NORMAL) {
2115                 log_info("Starting ");
2116                 if (nr_thread)
2117                         log_info("%d thread%s", nr_thread,
2118                                                 nr_thread > 1 ? "s" : "");
2119                 if (nr_process) {
2120                         if (nr_thread)
2121                                 log_info(" and ");
2122                         log_info("%d process%s", nr_process,
2123                                                 nr_process > 1 ? "es" : "");
2124                 }
2125                 log_info("\n");
2126                 log_info_flush();
2127         }
2128
2129         todo = thread_number;
2130         nr_running = 0;
2131         nr_started = 0;
2132         m_rate = t_rate = 0;
2133
2134         for_each_td(td, i) {
2135                 print_status_init(td->thread_number - 1);
2136
2137                 if (!td->o.create_serialize)
2138                         continue;
2139
2140                 if (fio_verify_load_state(td))
2141                         goto reap;
2142
2143                 /*
2144                  * do file setup here so it happens sequentially,
2145                  * we don't want X number of threads getting their
2146                  * client data interspersed on disk
2147                  */
2148                 if (setup_files(td)) {
2149 reap:
2150                         exit_value++;
2151                         if (td->error)
2152                                 log_err("fio: pid=%d, err=%d/%s\n",
2153                                         (int) td->pid, td->error, td->verror);
2154                         td_set_runstate(td, TD_REAPED);
2155                         todo--;
2156                 } else {
2157                         struct fio_file *f;
2158                         unsigned int j;
2159
2160                         /*
2161                          * for sharing to work, each job must always open
2162                          * its own files. so close them, if we opened them
2163                          * for creation
2164                          */
2165                         for_each_file(td, f, j) {
2166                                 if (fio_file_open(f))
2167                                         td_io_close_file(td, f);
2168                         }
2169                 }
2170         }
2171
2172         /* start idle threads before io threads start to run */
2173         fio_idle_prof_start();
2174
2175         set_genesis_time();
2176
2177         while (todo) {
2178                 struct thread_data *map[REAL_MAX_JOBS];
2179                 struct timeval this_start;
2180                 int this_jobs = 0, left;
2181
2182                 /*
2183                  * create threads (TD_NOT_CREATED -> TD_CREATED)
2184                  */
2185                 for_each_td(td, i) {
2186                         if (td->runstate != TD_NOT_CREATED)
2187                                 continue;
2188
2189                         /*
2190                          * never got a chance to start, killed by other
2191                          * thread for some reason
2192                          */
2193                         if (td->terminate) {
2194                                 todo--;
2195                                 continue;
2196                         }
2197
2198                         if (td->o.start_delay) {
2199                                 spent = utime_since_genesis();
2200
2201                                 if (td->o.start_delay > spent)
2202                                         continue;
2203                         }
2204
2205                         if (td->o.stonewall && (nr_started || nr_running)) {
2206                                 dprint(FD_PROCESS, "%s: stonewall wait\n",
2207                                                         td->o.name);
2208                                 break;
2209                         }
2210
2211                         if (waitee_running(td)) {
2212                                 dprint(FD_PROCESS, "%s: waiting for %s\n",
2213                                                 td->o.name, td->o.wait_for);
2214                                 continue;
2215                         }
2216
2217                         init_disk_util(td);
2218
2219                         td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED);
2220                         td->update_rusage = 0;
2221
2222                         /*
2223                          * Set state to created. Thread will transition
2224                          * to TD_INITIALIZED when it's done setting up.
2225                          */
2226                         td_set_runstate(td, TD_CREATED);
2227                         map[this_jobs++] = td;
2228                         nr_started++;
2229
2230                         if (td->o.use_thread) {
2231                                 struct fork_data *fd;
2232                                 int ret;
2233
2234                                 fd = calloc(1, sizeof(*fd));
2235                                 fd->td = td;
2236                                 fd->sk_out = sk_out;
2237
2238                                 dprint(FD_PROCESS, "will pthread_create\n");
2239                                 ret = pthread_create(&td->thread, NULL,
2240                                                         thread_main, fd);
2241                                 if (ret) {
2242                                         log_err("pthread_create: %s\n",
2243                                                         strerror(ret));
2244                                         free(fd);
2245                                         nr_started--;
2246                                         break;
2247                                 }
2248                                 ret = pthread_detach(td->thread);
2249                                 if (ret)
2250                                         log_err("pthread_detach: %s",
2251                                                         strerror(ret));
2252                         } else {
2253                                 pid_t pid;
2254                                 dprint(FD_PROCESS, "will fork\n");
2255                                 pid = fork();
2256                                 if (!pid) {
2257                                         int ret = fork_main(sk_out, shm_id, i);
2258
2259                                         _exit(ret);
2260                                 } else if (i == fio_debug_jobno)
2261                                         *fio_debug_jobp = pid;
2262                         }
2263                         dprint(FD_MUTEX, "wait on startup_mutex\n");
2264                         if (fio_mutex_down_timeout(startup_mutex, 10000)) {
2265                                 log_err("fio: job startup hung? exiting.\n");
2266                                 fio_terminate_threads(TERMINATE_ALL);
2267                                 fio_abort = 1;
2268                                 nr_started--;
2269                                 break;
2270                         }
2271                         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
2272                 }
2273
2274                 /*
2275                  * Wait for the started threads to transition to
2276                  * TD_INITIALIZED.
2277                  */
2278                 fio_gettime(&this_start, NULL);
2279                 left = this_jobs;
2280                 while (left && !fio_abort) {
2281                         if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
2282                                 break;
2283
2284                         do_usleep(100000);
2285
2286                         for (i = 0; i < this_jobs; i++) {
2287                                 td = map[i];
2288                                 if (!td)
2289                                         continue;
2290                                 if (td->runstate == TD_INITIALIZED) {
2291                                         map[i] = NULL;
2292                                         left--;
2293                                 } else if (td->runstate >= TD_EXITED) {
2294                                         map[i] = NULL;
2295                                         left--;
2296                                         todo--;
2297                                         nr_running++; /* work-around... */
2298                                 }
2299                         }
2300                 }
2301
2302                 if (left) {
2303                         log_err("fio: %d job%s failed to start\n", left,
2304                                         left > 1 ? "s" : "");
2305                         for (i = 0; i < this_jobs; i++) {
2306                                 td = map[i];
2307                                 if (!td)
2308                                         continue;
2309                                 kill(td->pid, SIGTERM);
2310                         }
2311                         break;
2312                 }
2313
2314                 /*
2315                  * start created threads (TD_INITIALIZED -> TD_RUNNING).
2316                  */
2317                 for_each_td(td, i) {
2318                         if (td->runstate != TD_INITIALIZED)
2319                                 continue;
2320
2321                         if (in_ramp_time(td))
2322                                 td_set_runstate(td, TD_RAMP);
2323                         else
2324                                 td_set_runstate(td, TD_RUNNING);
2325                         nr_running++;
2326                         nr_started--;
2327                         m_rate += ddir_rw_sum(td->o.ratemin);
2328                         t_rate += ddir_rw_sum(td->o.rate);
2329                         todo--;
2330                         fio_mutex_up(td->mutex);
2331                 }
2332
2333                 reap_threads(&nr_running, &t_rate, &m_rate);
2334
2335                 if (todo)
2336                         do_usleep(100000);
2337         }
2338
2339         while (nr_running) {
2340                 reap_threads(&nr_running, &t_rate, &m_rate);
2341                 do_usleep(10000);
2342         }
2343
2344         fio_idle_prof_stop();
2345
2346         update_io_ticks();
2347 }
2348
2349 static void free_disk_util(void)
2350 {
2351         disk_util_prune_entries();
2352         helper_thread_destroy();
2353 }
2354
2355 int fio_backend(struct sk_out *sk_out)
2356 {
2357         struct thread_data *td;
2358         int i;
2359
2360         if (exec_profile) {
2361                 if (load_profile(exec_profile))
2362                         return 1;
2363                 free(exec_profile);
2364                 exec_profile = NULL;
2365         }
2366         if (!thread_number)
2367                 return 0;
2368
2369         if (write_bw_log) {
2370                 struct log_params p = {
2371                         .log_type = IO_LOG_TYPE_BW,
2372                 };
2373
2374                 setup_log(&agg_io_log[DDIR_READ], &p, "agg-read_bw.log");
2375                 setup_log(&agg_io_log[DDIR_WRITE], &p, "agg-write_bw.log");
2376                 setup_log(&agg_io_log[DDIR_TRIM], &p, "agg-trim_bw.log");
2377         }
2378
2379         startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
2380         if (startup_mutex == NULL)
2381                 return 1;
2382
2383         set_genesis_time();
2384         stat_init();
2385         helper_thread_create(startup_mutex, sk_out);
2386
2387         cgroup_list = smalloc(sizeof(*cgroup_list));
2388         INIT_FLIST_HEAD(cgroup_list);
2389
2390         run_threads(sk_out);
2391
2392         helper_thread_exit();
2393
2394         if (!fio_abort) {
2395                 __show_run_stats();
2396                 if (write_bw_log) {
2397                         for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2398                                 struct io_log *log = agg_io_log[i];
2399
2400                                 flush_log(log, false);
2401                                 free_log(log);
2402                         }
2403                 }
2404         }
2405
2406         for_each_td(td, i) {
2407                 fio_options_free(td);
2408                 if (td->rusage_sem) {
2409                         fio_mutex_remove(td->rusage_sem);
2410                         td->rusage_sem = NULL;
2411                 }
2412         }
2413
2414         free_disk_util();
2415         cgroup_kill(cgroup_list);
2416         sfree(cgroup_list);
2417         sfree(cgroup_mnt);
2418
2419         fio_mutex_remove(startup_mutex);
2420         stat_exit();
2421         return exit_value;
2422 }