client: cleanup json output
[fio.git] / backend.c
1 /*
2  * fio - the flexible io tester
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  * Copyright (C) 2006-2012 Jens Axboe <axboe@kernel.dk>
6  *
7  * The license below covers all files distributed with fio unless otherwise
8  * noted in the file itself.
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to the Free Software
21  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  *
23  */
24 #include <unistd.h>
25 #include <fcntl.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <signal.h>
29 #include <time.h>
30 #include <locale.h>
31 #include <assert.h>
32 #include <time.h>
33 #include <inttypes.h>
34 #include <sys/stat.h>
35 #include <sys/wait.h>
36 #include <sys/ipc.h>
37 #include <sys/mman.h>
38 #include <math.h>
39
40 #include "fio.h"
41 #ifndef FIO_NO_HAVE_SHM_H
42 #include <sys/shm.h>
43 #endif
44 #include "hash.h"
45 #include "smalloc.h"
46 #include "verify.h"
47 #include "trim.h"
48 #include "diskutil.h"
49 #include "cgroup.h"
50 #include "profile.h"
51 #include "lib/rand.h"
52 #include "lib/memalign.h"
53 #include "server.h"
54 #include "lib/getrusage.h"
55 #include "idletime.h"
56 #include "err.h"
57 #include "workqueue.h"
58 #include "lib/mountcheck.h"
59 #include "rate-submit.h"
60
61 static pthread_t helper_thread;
62 static pthread_mutex_t helper_lock;
63 pthread_cond_t helper_cond;
64 int helper_do_stat = 0;
65
66 static struct fio_mutex *startup_mutex;
67 static struct flist_head *cgroup_list;
68 static char *cgroup_mnt;
69 static int exit_value;
70 static volatile int fio_abort;
71 static unsigned int nr_process = 0;
72 static unsigned int nr_thread = 0;
73
74 struct io_log *agg_io_log[DDIR_RWDIR_CNT];
75
76 int groupid = 0;
77 unsigned int thread_number = 0;
78 unsigned int stat_number = 0;
79 int shm_id = 0;
80 int temp_stall_ts;
81 unsigned long done_secs = 0;
82 volatile int helper_exit = 0;
83
84 #define PAGE_ALIGN(buf) \
85         (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask)
86
87 #define JOB_START_TIMEOUT       (5 * 1000)
88
89 static void sig_int(int sig)
90 {
91         if (threads) {
92                 if (is_backend)
93                         fio_server_got_signal(sig);
94                 else {
95                         log_info("\nfio: terminating on signal %d\n", sig);
96                         log_info_flush();
97                         exit_value = 128;
98                 }
99
100                 fio_terminate_threads(TERMINATE_ALL);
101         }
102 }
103
104 void sig_show_status(int sig)
105 {
106         show_running_run_stats();
107 }
108
109 static void set_sig_handlers(void)
110 {
111         struct sigaction act;
112
113         memset(&act, 0, sizeof(act));
114         act.sa_handler = sig_int;
115         act.sa_flags = SA_RESTART;
116         sigaction(SIGINT, &act, NULL);
117
118         memset(&act, 0, sizeof(act));
119         act.sa_handler = sig_int;
120         act.sa_flags = SA_RESTART;
121         sigaction(SIGTERM, &act, NULL);
122
123 /* Windows uses SIGBREAK as a quit signal from other applications */
124 #ifdef WIN32
125         memset(&act, 0, sizeof(act));
126         act.sa_handler = sig_int;
127         act.sa_flags = SA_RESTART;
128         sigaction(SIGBREAK, &act, NULL);
129 #endif
130
131         memset(&act, 0, sizeof(act));
132         act.sa_handler = sig_show_status;
133         act.sa_flags = SA_RESTART;
134         sigaction(SIGUSR1, &act, NULL);
135
136         if (is_backend) {
137                 memset(&act, 0, sizeof(act));
138                 act.sa_handler = sig_int;
139                 act.sa_flags = SA_RESTART;
140                 sigaction(SIGPIPE, &act, NULL);
141         }
142 }
143
144 /*
145  * Check if we are above the minimum rate given.
146  */
147 static bool __check_min_rate(struct thread_data *td, struct timeval *now,
148                              enum fio_ddir ddir)
149 {
150         unsigned long long bytes = 0;
151         unsigned long iops = 0;
152         unsigned long spent;
153         unsigned long rate;
154         unsigned int ratemin = 0;
155         unsigned int rate_iops = 0;
156         unsigned int rate_iops_min = 0;
157
158         assert(ddir_rw(ddir));
159
160         if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir])
161                 return false;
162
163         /*
164          * allow a 2 second settle period in the beginning
165          */
166         if (mtime_since(&td->start, now) < 2000)
167                 return false;
168
169         iops += td->this_io_blocks[ddir];
170         bytes += td->this_io_bytes[ddir];
171         ratemin += td->o.ratemin[ddir];
172         rate_iops += td->o.rate_iops[ddir];
173         rate_iops_min += td->o.rate_iops_min[ddir];
174
175         /*
176          * if rate blocks is set, sample is running
177          */
178         if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) {
179                 spent = mtime_since(&td->lastrate[ddir], now);
180                 if (spent < td->o.ratecycle)
181                         return false;
182
183                 if (td->o.rate[ddir] || td->o.ratemin[ddir]) {
184                         /*
185                          * check bandwidth specified rate
186                          */
187                         if (bytes < td->rate_bytes[ddir]) {
188                                 log_err("%s: min rate %u not met\n", td->o.name,
189                                                                 ratemin);
190                                 return true;
191                         } else {
192                                 if (spent)
193                                         rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
194                                 else
195                                         rate = 0;
196
197                                 if (rate < ratemin ||
198                                     bytes < td->rate_bytes[ddir]) {
199                                         log_err("%s: min rate %u not met, got"
200                                                 " %luKB/sec\n", td->o.name,
201                                                         ratemin, rate);
202                                         return true;
203                                 }
204                         }
205                 } else {
206                         /*
207                          * checks iops specified rate
208                          */
209                         if (iops < rate_iops) {
210                                 log_err("%s: min iops rate %u not met\n",
211                                                 td->o.name, rate_iops);
212                                 return true;
213                         } else {
214                                 if (spent)
215                                         rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
216                                 else
217                                         rate = 0;
218
219                                 if (rate < rate_iops_min ||
220                                     iops < td->rate_blocks[ddir]) {
221                                         log_err("%s: min iops rate %u not met,"
222                                                 " got %lu\n", td->o.name,
223                                                         rate_iops_min, rate);
224                                         return true;
225                                 }
226                         }
227                 }
228         }
229
230         td->rate_bytes[ddir] = bytes;
231         td->rate_blocks[ddir] = iops;
232         memcpy(&td->lastrate[ddir], now, sizeof(*now));
233         return false;
234 }
235
236 static bool check_min_rate(struct thread_data *td, struct timeval *now)
237 {
238         bool ret = false;
239
240         if (td->bytes_done[DDIR_READ])
241                 ret |= __check_min_rate(td, now, DDIR_READ);
242         if (td->bytes_done[DDIR_WRITE])
243                 ret |= __check_min_rate(td, now, DDIR_WRITE);
244         if (td->bytes_done[DDIR_TRIM])
245                 ret |= __check_min_rate(td, now, DDIR_TRIM);
246
247         return ret;
248 }
249
250 /*
251  * When job exits, we can cancel the in-flight IO if we are using async
252  * io. Attempt to do so.
253  */
254 static void cleanup_pending_aio(struct thread_data *td)
255 {
256         int r;
257
258         /*
259          * get immediately available events, if any
260          */
261         r = io_u_queued_complete(td, 0);
262         if (r < 0)
263                 return;
264
265         /*
266          * now cancel remaining active events
267          */
268         if (td->io_ops->cancel) {
269                 struct io_u *io_u;
270                 int i;
271
272                 io_u_qiter(&td->io_u_all, io_u, i) {
273                         if (io_u->flags & IO_U_F_FLIGHT) {
274                                 r = td->io_ops->cancel(td, io_u);
275                                 if (!r)
276                                         put_io_u(td, io_u);
277                         }
278                 }
279         }
280
281         if (td->cur_depth)
282                 r = io_u_queued_complete(td, td->cur_depth);
283 }
284
285 /*
286  * Helper to handle the final sync of a file. Works just like the normal
287  * io path, just does everything sync.
288  */
289 static bool fio_io_sync(struct thread_data *td, struct fio_file *f)
290 {
291         struct io_u *io_u = __get_io_u(td);
292         int ret;
293
294         if (!io_u)
295                 return true;
296
297         io_u->ddir = DDIR_SYNC;
298         io_u->file = f;
299
300         if (td_io_prep(td, io_u)) {
301                 put_io_u(td, io_u);
302                 return true;
303         }
304
305 requeue:
306         ret = td_io_queue(td, io_u);
307         if (ret < 0) {
308                 td_verror(td, io_u->error, "td_io_queue");
309                 put_io_u(td, io_u);
310                 return true;
311         } else if (ret == FIO_Q_QUEUED) {
312                 if (io_u_queued_complete(td, 1) < 0)
313                         return true;
314         } else if (ret == FIO_Q_COMPLETED) {
315                 if (io_u->error) {
316                         td_verror(td, io_u->error, "td_io_queue");
317                         return true;
318                 }
319
320                 if (io_u_sync_complete(td, io_u) < 0)
321                         return true;
322         } else if (ret == FIO_Q_BUSY) {
323                 if (td_io_commit(td))
324                         return true;
325                 goto requeue;
326         }
327
328         return false;
329 }
330
331 static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
332 {
333         int ret;
334
335         if (fio_file_open(f))
336                 return fio_io_sync(td, f);
337
338         if (td_io_open_file(td, f))
339                 return 1;
340
341         ret = fio_io_sync(td, f);
342         td_io_close_file(td, f);
343         return ret;
344 }
345
346 static inline void __update_tv_cache(struct thread_data *td)
347 {
348         fio_gettime(&td->tv_cache, NULL);
349 }
350
351 static inline void update_tv_cache(struct thread_data *td)
352 {
353         if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask)
354                 __update_tv_cache(td);
355 }
356
357 static inline bool runtime_exceeded(struct thread_data *td, struct timeval *t)
358 {
359         if (in_ramp_time(td))
360                 return false;
361         if (!td->o.timeout)
362                 return false;
363         if (utime_since(&td->epoch, t) >= td->o.timeout)
364                 return true;
365
366         return false;
367 }
368
369 /*
370  * We need to update the runtime consistently in ms, but keep a running
371  * tally of the current elapsed time in microseconds for sub millisecond
372  * updates.
373  */
374 static inline void update_runtime(struct thread_data *td,
375                                   unsigned long long *elapsed_us,
376                                   const enum fio_ddir ddir)
377 {
378         if (ddir == DDIR_WRITE && td_write(td) && td->o.verify_only)
379                 return;
380
381         td->ts.runtime[ddir] -= (elapsed_us[ddir] + 999) / 1000;
382         elapsed_us[ddir] += utime_since_now(&td->start);
383         td->ts.runtime[ddir] += (elapsed_us[ddir] + 999) / 1000;
384 }
385
386 static bool break_on_this_error(struct thread_data *td, enum fio_ddir ddir,
387                                 int *retptr)
388 {
389         int ret = *retptr;
390
391         if (ret < 0 || td->error) {
392                 int err = td->error;
393                 enum error_type_bit eb;
394
395                 if (ret < 0)
396                         err = -ret;
397
398                 eb = td_error_type(ddir, err);
399                 if (!(td->o.continue_on_error & (1 << eb)))
400                         return true;
401
402                 if (td_non_fatal_error(td, eb, err)) {
403                         /*
404                          * Continue with the I/Os in case of
405                          * a non fatal error.
406                          */
407                         update_error_count(td, err);
408                         td_clear_error(td);
409                         *retptr = 0;
410                         return false;
411                 } else if (td->o.fill_device && err == ENOSPC) {
412                         /*
413                          * We expect to hit this error if
414                          * fill_device option is set.
415                          */
416                         td_clear_error(td);
417                         fio_mark_td_terminate(td);
418                         return true;
419                 } else {
420                         /*
421                          * Stop the I/O in case of a fatal
422                          * error.
423                          */
424                         update_error_count(td, err);
425                         return true;
426                 }
427         }
428
429         return false;
430 }
431
432 static void check_update_rusage(struct thread_data *td)
433 {
434         if (td->update_rusage) {
435                 td->update_rusage = 0;
436                 update_rusage_stat(td);
437                 fio_mutex_up(td->rusage_sem);
438         }
439 }
440
441 static int wait_for_completions(struct thread_data *td, struct timeval *time)
442 {
443         const int full = queue_full(td);
444         int min_evts = 0;
445         int ret;
446
447         /*
448          * if the queue is full, we MUST reap at least 1 event
449          */
450         min_evts = min(td->o.iodepth_batch_complete_min, td->cur_depth);
451         if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
452                 min_evts = 1;
453
454         if (time && (__should_check_rate(td, DDIR_READ) ||
455             __should_check_rate(td, DDIR_WRITE) ||
456             __should_check_rate(td, DDIR_TRIM)))
457                 fio_gettime(time, NULL);
458
459         do {
460                 ret = io_u_queued_complete(td, min_evts);
461                 if (ret < 0)
462                         break;
463         } while (full && (td->cur_depth > td->o.iodepth_low));
464
465         return ret;
466 }
467
468 int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret,
469                    enum fio_ddir ddir, uint64_t *bytes_issued, int from_verify,
470                    struct timeval *comp_time)
471 {
472         int ret2;
473
474         switch (*ret) {
475         case FIO_Q_COMPLETED:
476                 if (io_u->error) {
477                         *ret = -io_u->error;
478                         clear_io_u(td, io_u);
479                 } else if (io_u->resid) {
480                         int bytes = io_u->xfer_buflen - io_u->resid;
481                         struct fio_file *f = io_u->file;
482
483                         if (bytes_issued)
484                                 *bytes_issued += bytes;
485
486                         if (!from_verify)
487                                 trim_io_piece(td, io_u);
488
489                         /*
490                          * zero read, fail
491                          */
492                         if (!bytes) {
493                                 if (!from_verify)
494                                         unlog_io_piece(td, io_u);
495                                 td_verror(td, EIO, "full resid");
496                                 put_io_u(td, io_u);
497                                 break;
498                         }
499
500                         io_u->xfer_buflen = io_u->resid;
501                         io_u->xfer_buf += bytes;
502                         io_u->offset += bytes;
503
504                         if (ddir_rw(io_u->ddir))
505                                 td->ts.short_io_u[io_u->ddir]++;
506
507                         f = io_u->file;
508                         if (io_u->offset == f->real_file_size)
509                                 goto sync_done;
510
511                         requeue_io_u(td, &io_u);
512                 } else {
513 sync_done:
514                         if (comp_time && (__should_check_rate(td, DDIR_READ) ||
515                             __should_check_rate(td, DDIR_WRITE) ||
516                             __should_check_rate(td, DDIR_TRIM)))
517                                 fio_gettime(comp_time, NULL);
518
519                         *ret = io_u_sync_complete(td, io_u);
520                         if (*ret < 0)
521                                 break;
522                 }
523                 return 0;
524         case FIO_Q_QUEUED:
525                 /*
526                  * if the engine doesn't have a commit hook,
527                  * the io_u is really queued. if it does have such
528                  * a hook, it has to call io_u_queued() itself.
529                  */
530                 if (td->io_ops->commit == NULL)
531                         io_u_queued(td, io_u);
532                 if (bytes_issued)
533                         *bytes_issued += io_u->xfer_buflen;
534                 break;
535         case FIO_Q_BUSY:
536                 if (!from_verify)
537                         unlog_io_piece(td, io_u);
538                 requeue_io_u(td, &io_u);
539                 ret2 = td_io_commit(td);
540                 if (ret2 < 0)
541                         *ret = ret2;
542                 break;
543         default:
544                 assert(*ret < 0);
545                 td_verror(td, -(*ret), "td_io_queue");
546                 break;
547         }
548
549         if (break_on_this_error(td, ddir, ret))
550                 return 1;
551
552         return 0;
553 }
554
555 static inline bool io_in_polling(struct thread_data *td)
556 {
557         return !td->o.iodepth_batch_complete_min &&
558                    !td->o.iodepth_batch_complete_max;
559 }
560
561 /*
562  * The main verify engine. Runs over the writes we previously submitted,
563  * reads the blocks back in, and checks the crc/md5 of the data.
564  */
565 static void do_verify(struct thread_data *td, uint64_t verify_bytes)
566 {
567         struct fio_file *f;
568         struct io_u *io_u;
569         int ret, min_events;
570         unsigned int i;
571
572         dprint(FD_VERIFY, "starting loop\n");
573
574         /*
575          * sync io first and invalidate cache, to make sure we really
576          * read from disk.
577          */
578         for_each_file(td, f, i) {
579                 if (!fio_file_open(f))
580                         continue;
581                 if (fio_io_sync(td, f))
582                         break;
583                 if (file_invalidate_cache(td, f))
584                         break;
585         }
586
587         check_update_rusage(td);
588
589         if (td->error)
590                 return;
591
592         td_set_runstate(td, TD_VERIFYING);
593
594         io_u = NULL;
595         while (!td->terminate) {
596                 enum fio_ddir ddir;
597                 int full;
598
599                 update_tv_cache(td);
600                 check_update_rusage(td);
601
602                 if (runtime_exceeded(td, &td->tv_cache)) {
603                         __update_tv_cache(td);
604                         if (runtime_exceeded(td, &td->tv_cache)) {
605                                 fio_mark_td_terminate(td);
606                                 break;
607                         }
608                 }
609
610                 if (flow_threshold_exceeded(td))
611                         continue;
612
613                 if (!td->o.experimental_verify) {
614                         io_u = __get_io_u(td);
615                         if (!io_u)
616                                 break;
617
618                         if (get_next_verify(td, io_u)) {
619                                 put_io_u(td, io_u);
620                                 break;
621                         }
622
623                         if (td_io_prep(td, io_u)) {
624                                 put_io_u(td, io_u);
625                                 break;
626                         }
627                 } else {
628                         if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
629                                 break;
630
631                         while ((io_u = get_io_u(td)) != NULL) {
632                                 if (IS_ERR(io_u)) {
633                                         io_u = NULL;
634                                         ret = FIO_Q_BUSY;
635                                         goto reap;
636                                 }
637
638                                 /*
639                                  * We are only interested in the places where
640                                  * we wrote or trimmed IOs. Turn those into
641                                  * reads for verification purposes.
642                                  */
643                                 if (io_u->ddir == DDIR_READ) {
644                                         /*
645                                          * Pretend we issued it for rwmix
646                                          * accounting
647                                          */
648                                         td->io_issues[DDIR_READ]++;
649                                         put_io_u(td, io_u);
650                                         continue;
651                                 } else if (io_u->ddir == DDIR_TRIM) {
652                                         io_u->ddir = DDIR_READ;
653                                         io_u_set(io_u, IO_U_F_TRIMMED);
654                                         break;
655                                 } else if (io_u->ddir == DDIR_WRITE) {
656                                         io_u->ddir = DDIR_READ;
657                                         break;
658                                 } else {
659                                         put_io_u(td, io_u);
660                                         continue;
661                                 }
662                         }
663
664                         if (!io_u)
665                                 break;
666                 }
667
668                 if (verify_state_should_stop(td, io_u)) {
669                         put_io_u(td, io_u);
670                         break;
671                 }
672
673                 if (td->o.verify_async)
674                         io_u->end_io = verify_io_u_async;
675                 else
676                         io_u->end_io = verify_io_u;
677
678                 ddir = io_u->ddir;
679                 if (!td->o.disable_slat)
680                         fio_gettime(&io_u->start_time, NULL);
681
682                 ret = td_io_queue(td, io_u);
683
684                 if (io_queue_event(td, io_u, &ret, ddir, NULL, 1, NULL))
685                         break;
686
687                 /*
688                  * if we can queue more, do so. but check if there are
689                  * completed io_u's first. Note that we can get BUSY even
690                  * without IO queued, if the system is resource starved.
691                  */
692 reap:
693                 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
694                 if (full || io_in_polling(td))
695                         ret = wait_for_completions(td, NULL);
696
697                 if (ret < 0)
698                         break;
699         }
700
701         check_update_rusage(td);
702
703         if (!td->error) {
704                 min_events = td->cur_depth;
705
706                 if (min_events)
707                         ret = io_u_queued_complete(td, min_events);
708         } else
709                 cleanup_pending_aio(td);
710
711         td_set_runstate(td, TD_RUNNING);
712
713         dprint(FD_VERIFY, "exiting loop\n");
714 }
715
716 static bool exceeds_number_ios(struct thread_data *td)
717 {
718         unsigned long long number_ios;
719
720         if (!td->o.number_ios)
721                 return false;
722
723         number_ios = ddir_rw_sum(td->io_blocks);
724         number_ios += td->io_u_queued + td->io_u_in_flight;
725
726         return number_ios >= (td->o.number_ios * td->loops);
727 }
728
729 static bool io_issue_bytes_exceeded(struct thread_data *td)
730 {
731         unsigned long long bytes, limit;
732
733         if (td_rw(td))
734                 bytes = td->io_issue_bytes[DDIR_READ] + td->io_issue_bytes[DDIR_WRITE];
735         else if (td_write(td))
736                 bytes = td->io_issue_bytes[DDIR_WRITE];
737         else if (td_read(td))
738                 bytes = td->io_issue_bytes[DDIR_READ];
739         else
740                 bytes = td->io_issue_bytes[DDIR_TRIM];
741
742         if (td->o.io_limit)
743                 limit = td->o.io_limit;
744         else
745                 limit = td->o.size;
746
747         limit *= td->loops;
748         return bytes >= limit || exceeds_number_ios(td);
749 }
750
751 static bool io_complete_bytes_exceeded(struct thread_data *td)
752 {
753         unsigned long long bytes, limit;
754
755         if (td_rw(td))
756                 bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE];
757         else if (td_write(td))
758                 bytes = td->this_io_bytes[DDIR_WRITE];
759         else if (td_read(td))
760                 bytes = td->this_io_bytes[DDIR_READ];
761         else
762                 bytes = td->this_io_bytes[DDIR_TRIM];
763
764         if (td->o.io_limit)
765                 limit = td->o.io_limit;
766         else
767                 limit = td->o.size;
768
769         limit *= td->loops;
770         return bytes >= limit || exceeds_number_ios(td);
771 }
772
773 /*
774  * used to calculate the next io time for rate control
775  *
776  */
777 static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
778 {
779         uint64_t secs, remainder, bps, bytes, iops;
780
781         assert(!(td->flags & TD_F_CHILD));
782         bytes = td->rate_io_issue_bytes[ddir];
783         bps = td->rate_bps[ddir];
784
785         if (td->o.rate_process == RATE_PROCESS_POISSON) {
786                 uint64_t val;
787                 iops = bps / td->o.bs[ddir];
788                 val = (int64_t) (1000000 / iops) *
789                                 -logf(__rand_0_1(&td->poisson_state));
790                 if (val) {
791                         dprint(FD_RATE, "poisson rate iops=%llu\n",
792                                         (unsigned long long) 1000000 / val);
793                 }
794                 td->last_usec += val;
795                 return td->last_usec;
796         } else if (bps) {
797                 secs = bytes / bps;
798                 remainder = bytes % bps;
799                 return remainder * 1000000 / bps + secs * 1000000;
800         }
801
802         return 0;
803 }
804
805 /*
806  * Main IO worker function. It retrieves io_u's to process and queues
807  * and reaps them, checking for rate and errors along the way.
808  *
809  * Returns number of bytes written and trimmed.
810  */
811 static uint64_t do_io(struct thread_data *td)
812 {
813         unsigned int i;
814         int ret = 0;
815         uint64_t total_bytes, bytes_issued = 0;
816
817         if (in_ramp_time(td))
818                 td_set_runstate(td, TD_RAMP);
819         else
820                 td_set_runstate(td, TD_RUNNING);
821
822         lat_target_init(td);
823
824         total_bytes = td->o.size;
825         /*
826         * Allow random overwrite workloads to write up to io_limit
827         * before starting verification phase as 'size' doesn't apply.
828         */
829         if (td_write(td) && td_random(td) && td->o.norandommap)
830                 total_bytes = max(total_bytes, (uint64_t) td->o.io_limit);
831         /*
832          * If verify_backlog is enabled, we'll run the verify in this
833          * handler as well. For that case, we may need up to twice the
834          * amount of bytes.
835          */
836         if (td->o.verify != VERIFY_NONE &&
837            (td_write(td) && td->o.verify_backlog))
838                 total_bytes += td->o.size;
839
840         /* In trimwrite mode, each byte is trimmed and then written, so
841          * allow total_bytes to be twice as big */
842         if (td_trimwrite(td))
843                 total_bytes += td->total_io_size;
844
845         while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
846                 (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
847                 td->o.time_based) {
848                 struct timeval comp_time;
849                 struct io_u *io_u;
850                 int full;
851                 enum fio_ddir ddir;
852
853                 check_update_rusage(td);
854
855                 if (td->terminate || td->done)
856                         break;
857
858                 update_tv_cache(td);
859
860                 if (runtime_exceeded(td, &td->tv_cache)) {
861                         __update_tv_cache(td);
862                         if (runtime_exceeded(td, &td->tv_cache)) {
863                                 fio_mark_td_terminate(td);
864                                 break;
865                         }
866                 }
867
868                 if (flow_threshold_exceeded(td))
869                         continue;
870
871                 if (!td->o.time_based && bytes_issued >= total_bytes)
872                         break;
873
874                 io_u = get_io_u(td);
875                 if (IS_ERR_OR_NULL(io_u)) {
876                         int err = PTR_ERR(io_u);
877
878                         io_u = NULL;
879                         if (err == -EBUSY) {
880                                 ret = FIO_Q_BUSY;
881                                 goto reap;
882                         }
883                         if (td->o.latency_target)
884                                 goto reap;
885                         break;
886                 }
887
888                 ddir = io_u->ddir;
889
890                 /*
891                  * Add verification end_io handler if:
892                  *      - Asked to verify (!td_rw(td))
893                  *      - Or the io_u is from our verify list (mixed write/ver)
894                  */
895                 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
896                     ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
897
898                         if (!td->o.verify_pattern_bytes) {
899                                 io_u->rand_seed = __rand(&td->verify_state);
900                                 if (sizeof(int) != sizeof(long *))
901                                         io_u->rand_seed *= __rand(&td->verify_state);
902                         }
903
904                         if (verify_state_should_stop(td, io_u)) {
905                                 put_io_u(td, io_u);
906                                 break;
907                         }
908
909                         if (td->o.verify_async)
910                                 io_u->end_io = verify_io_u_async;
911                         else
912                                 io_u->end_io = verify_io_u;
913                         td_set_runstate(td, TD_VERIFYING);
914                 } else if (in_ramp_time(td))
915                         td_set_runstate(td, TD_RAMP);
916                 else
917                         td_set_runstate(td, TD_RUNNING);
918
919                 /*
920                  * Always log IO before it's issued, so we know the specific
921                  * order of it. The logged unit will track when the IO has
922                  * completed.
923                  */
924                 if (td_write(td) && io_u->ddir == DDIR_WRITE &&
925                     td->o.do_verify &&
926                     td->o.verify != VERIFY_NONE &&
927                     !td->o.experimental_verify)
928                         log_io_piece(td, io_u);
929
930                 if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
931                         const unsigned long blen = io_u->xfer_buflen;
932                         const enum fio_ddir ddir = acct_ddir(io_u);
933
934                         if (td->error)
935                                 break;
936
937                         workqueue_enqueue(&td->io_wq, &io_u->work);
938                         ret = FIO_Q_QUEUED;
939
940                         if (ddir_rw(ddir)) {
941                                 td->io_issues[ddir]++;
942                                 td->io_issue_bytes[ddir] += blen;
943                                 td->rate_io_issue_bytes[ddir] += blen;
944                         }
945
946                         if (should_check_rate(td))
947                                 td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
948
949                 } else {
950                         ret = td_io_queue(td, io_u);
951
952                         if (should_check_rate(td))
953                                 td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
954
955                         if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 0, &comp_time))
956                                 break;
957
958                         /*
959                          * See if we need to complete some commands. Note that
960                          * we can get BUSY even without IO queued, if the
961                          * system is resource starved.
962                          */
963 reap:
964                         full = queue_full(td) ||
965                                 (ret == FIO_Q_BUSY && td->cur_depth);
966                         if (full || io_in_polling(td))
967                                 ret = wait_for_completions(td, &comp_time);
968                 }
969                 if (ret < 0)
970                         break;
971                 if (!ddir_rw_sum(td->bytes_done) &&
972                     !(td->io_ops->flags & FIO_NOIO))
973                         continue;
974
975                 if (!in_ramp_time(td) && should_check_rate(td)) {
976                         if (check_min_rate(td, &comp_time)) {
977                                 if (exitall_on_terminate || td->o.exitall_error)
978                                         fio_terminate_threads(td->groupid);
979                                 td_verror(td, EIO, "check_min_rate");
980                                 break;
981                         }
982                 }
983                 if (!in_ramp_time(td) && td->o.latency_target)
984                         lat_target_check(td);
985
986                 if (td->o.thinktime) {
987                         unsigned long long b;
988
989                         b = ddir_rw_sum(td->io_blocks);
990                         if (!(b % td->o.thinktime_blocks)) {
991                                 int left;
992
993                                 io_u_quiesce(td);
994
995                                 if (td->o.thinktime_spin)
996                                         usec_spin(td->o.thinktime_spin);
997
998                                 left = td->o.thinktime - td->o.thinktime_spin;
999                                 if (left)
1000                                         usec_sleep(td, left);
1001                         }
1002                 }
1003         }
1004
1005         check_update_rusage(td);
1006
1007         if (td->trim_entries)
1008                 log_err("fio: %lu trim entries leaked?\n", td->trim_entries);
1009
1010         if (td->o.fill_device && td->error == ENOSPC) {
1011                 td->error = 0;
1012                 fio_mark_td_terminate(td);
1013         }
1014         if (!td->error) {
1015                 struct fio_file *f;
1016
1017                 if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
1018                         workqueue_flush(&td->io_wq);
1019                         i = 0;
1020                 } else
1021                         i = td->cur_depth;
1022
1023                 if (i) {
1024                         ret = io_u_queued_complete(td, i);
1025                         if (td->o.fill_device && td->error == ENOSPC)
1026                                 td->error = 0;
1027                 }
1028
1029                 if (should_fsync(td) && td->o.end_fsync) {
1030                         td_set_runstate(td, TD_FSYNCING);
1031
1032                         for_each_file(td, f, i) {
1033                                 if (!fio_file_fsync(td, f))
1034                                         continue;
1035
1036                                 log_err("fio: end_fsync failed for file %s\n",
1037                                                                 f->file_name);
1038                         }
1039                 }
1040         } else
1041                 cleanup_pending_aio(td);
1042
1043         /*
1044          * stop job if we failed doing any IO
1045          */
1046         if (!ddir_rw_sum(td->this_io_bytes))
1047                 td->done = 1;
1048
1049         return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
1050 }
1051
1052 static void cleanup_io_u(struct thread_data *td)
1053 {
1054         struct io_u *io_u;
1055
1056         while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) {
1057
1058                 if (td->io_ops->io_u_free)
1059                         td->io_ops->io_u_free(td, io_u);
1060
1061                 fio_memfree(io_u, sizeof(*io_u));
1062         }
1063
1064         free_io_mem(td);
1065
1066         io_u_rexit(&td->io_u_requeues);
1067         io_u_qexit(&td->io_u_freelist);
1068         io_u_qexit(&td->io_u_all);
1069
1070         if (td->last_write_comp)
1071                 sfree(td->last_write_comp);
1072 }
1073
1074 static int init_io_u(struct thread_data *td)
1075 {
1076         struct io_u *io_u;
1077         unsigned int max_bs, min_write;
1078         int cl_align, i, max_units;
1079         int data_xfer = 1, err;
1080         char *p;
1081
1082         max_units = td->o.iodepth;
1083         max_bs = td_max_bs(td);
1084         min_write = td->o.min_bs[DDIR_WRITE];
1085         td->orig_buffer_size = (unsigned long long) max_bs
1086                                         * (unsigned long long) max_units;
1087
1088         if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td)))
1089                 data_xfer = 0;
1090
1091         err = 0;
1092         err += io_u_rinit(&td->io_u_requeues, td->o.iodepth);
1093         err += io_u_qinit(&td->io_u_freelist, td->o.iodepth);
1094         err += io_u_qinit(&td->io_u_all, td->o.iodepth);
1095
1096         if (err) {
1097                 log_err("fio: failed setting up IO queues\n");
1098                 return 1;
1099         }
1100
1101         /*
1102          * if we may later need to do address alignment, then add any
1103          * possible adjustment here so that we don't cause a buffer
1104          * overflow later. this adjustment may be too much if we get
1105          * lucky and the allocator gives us an aligned address.
1106          */
1107         if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
1108             (td->io_ops->flags & FIO_RAWIO))
1109                 td->orig_buffer_size += page_mask + td->o.mem_align;
1110
1111         if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
1112                 unsigned long bs;
1113
1114                 bs = td->orig_buffer_size + td->o.hugepage_size - 1;
1115                 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
1116         }
1117
1118         if (td->orig_buffer_size != (size_t) td->orig_buffer_size) {
1119                 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
1120                 return 1;
1121         }
1122
1123         if (data_xfer && allocate_io_mem(td))
1124                 return 1;
1125
1126         if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
1127             (td->io_ops->flags & FIO_RAWIO))
1128                 p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
1129         else
1130                 p = td->orig_buffer;
1131
1132         cl_align = os_cache_line_size();
1133
1134         for (i = 0; i < max_units; i++) {
1135                 void *ptr;
1136
1137                 if (td->terminate)
1138                         return 1;
1139
1140                 ptr = fio_memalign(cl_align, sizeof(*io_u));
1141                 if (!ptr) {
1142                         log_err("fio: unable to allocate aligned memory\n");
1143                         break;
1144                 }
1145
1146                 io_u = ptr;
1147                 memset(io_u, 0, sizeof(*io_u));
1148                 INIT_FLIST_HEAD(&io_u->verify_list);
1149                 dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
1150
1151                 if (data_xfer) {
1152                         io_u->buf = p;
1153                         dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
1154
1155                         if (td_write(td))
1156                                 io_u_fill_buffer(td, io_u, min_write, max_bs);
1157                         if (td_write(td) && td->o.verify_pattern_bytes) {
1158                                 /*
1159                                  * Fill the buffer with the pattern if we are
1160                                  * going to be doing writes.
1161                                  */
1162                                 fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
1163                         }
1164                 }
1165
1166                 io_u->index = i;
1167                 io_u->flags = IO_U_F_FREE;
1168                 io_u_qpush(&td->io_u_freelist, io_u);
1169
1170                 /*
1171                  * io_u never leaves this stack, used for iteration of all
1172                  * io_u buffers.
1173                  */
1174                 io_u_qpush(&td->io_u_all, io_u);
1175
1176                 if (td->io_ops->io_u_init) {
1177                         int ret = td->io_ops->io_u_init(td, io_u);
1178
1179                         if (ret) {
1180                                 log_err("fio: failed to init engine data: %d\n", ret);
1181                                 return 1;
1182                         }
1183                 }
1184
1185                 p += max_bs;
1186         }
1187
1188         if (td->o.verify != VERIFY_NONE) {
1189                 td->last_write_comp = scalloc(max_units, sizeof(uint64_t));
1190                 if (!td->last_write_comp) {
1191                         log_err("fio: failed to alloc write comp data\n");
1192                         return 1;
1193                 }
1194         }
1195
1196         return 0;
1197 }
1198
1199 static int switch_ioscheduler(struct thread_data *td)
1200 {
1201         char tmp[256], tmp2[128];
1202         FILE *f;
1203         int ret;
1204
1205         if (td->io_ops->flags & FIO_DISKLESSIO)
1206                 return 0;
1207
1208         sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
1209
1210         f = fopen(tmp, "r+");
1211         if (!f) {
1212                 if (errno == ENOENT) {
1213                         log_err("fio: os or kernel doesn't support IO scheduler"
1214                                 " switching\n");
1215                         return 0;
1216                 }
1217                 td_verror(td, errno, "fopen iosched");
1218                 return 1;
1219         }
1220
1221         /*
1222          * Set io scheduler.
1223          */
1224         ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
1225         if (ferror(f) || ret != 1) {
1226                 td_verror(td, errno, "fwrite");
1227                 fclose(f);
1228                 return 1;
1229         }
1230
1231         rewind(f);
1232
1233         /*
1234          * Read back and check that the selected scheduler is now the default.
1235          */
1236         memset(tmp, 0, sizeof(tmp));
1237         ret = fread(tmp, sizeof(tmp), 1, f);
1238         if (ferror(f) || ret < 0) {
1239                 td_verror(td, errno, "fread");
1240                 fclose(f);
1241                 return 1;
1242         }
1243         /*
1244          * either a list of io schedulers or "none\n" is expected.
1245          */
1246         tmp[strlen(tmp) - 1] = '\0';
1247
1248
1249         sprintf(tmp2, "[%s]", td->o.ioscheduler);
1250         if (!strstr(tmp, tmp2)) {
1251                 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
1252                 td_verror(td, EINVAL, "iosched_switch");
1253                 fclose(f);
1254                 return 1;
1255         }
1256
1257         fclose(f);
1258         return 0;
1259 }
1260
1261 static bool keep_running(struct thread_data *td)
1262 {
1263         unsigned long long limit;
1264
1265         if (td->done)
1266                 return false;
1267         if (td->o.time_based)
1268                 return true;
1269         if (td->o.loops) {
1270                 td->o.loops--;
1271                 return true;
1272         }
1273         if (exceeds_number_ios(td))
1274                 return false;
1275
1276         if (td->o.io_limit)
1277                 limit = td->o.io_limit;
1278         else
1279                 limit = td->o.size;
1280
1281         if (limit != -1ULL && ddir_rw_sum(td->io_bytes) < limit) {
1282                 uint64_t diff;
1283
1284                 /*
1285                  * If the difference is less than the minimum IO size, we
1286                  * are done.
1287                  */
1288                 diff = limit - ddir_rw_sum(td->io_bytes);
1289                 if (diff < td_max_bs(td))
1290                         return false;
1291
1292                 if (fio_files_done(td))
1293                         return false;
1294
1295                 return true;
1296         }
1297
1298         return false;
1299 }
1300
1301 static int exec_string(struct thread_options *o, const char *string, const char *mode)
1302 {
1303         size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
1304         int ret;
1305         char *str;
1306
1307         str = malloc(newlen);
1308         sprintf(str, "%s &> %s.%s.txt", string, o->name, mode);
1309
1310         log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode);
1311         ret = system(str);
1312         if (ret == -1)
1313                 log_err("fio: exec of cmd <%s> failed\n", str);
1314
1315         free(str);
1316         return ret;
1317 }
1318
1319 /*
1320  * Dry run to compute correct state of numberio for verification.
1321  */
1322 static uint64_t do_dry_run(struct thread_data *td)
1323 {
1324         td_set_runstate(td, TD_RUNNING);
1325
1326         while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
1327                 (!flist_empty(&td->trim_list)) || !io_complete_bytes_exceeded(td)) {
1328                 struct io_u *io_u;
1329                 int ret;
1330
1331                 if (td->terminate || td->done)
1332                         break;
1333
1334                 io_u = get_io_u(td);
1335                 if (!io_u)
1336                         break;
1337
1338                 io_u_set(io_u, IO_U_F_FLIGHT);
1339                 io_u->error = 0;
1340                 io_u->resid = 0;
1341                 if (ddir_rw(acct_ddir(io_u)))
1342                         td->io_issues[acct_ddir(io_u)]++;
1343                 if (ddir_rw(io_u->ddir)) {
1344                         io_u_mark_depth(td, 1);
1345                         td->ts.total_io_u[io_u->ddir]++;
1346                 }
1347
1348                 if (td_write(td) && io_u->ddir == DDIR_WRITE &&
1349                     td->o.do_verify &&
1350                     td->o.verify != VERIFY_NONE &&
1351                     !td->o.experimental_verify)
1352                         log_io_piece(td, io_u);
1353
1354                 ret = io_u_sync_complete(td, io_u);
1355                 (void) ret;
1356         }
1357
1358         return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
1359 }
1360
1361 struct fork_data {
1362         struct thread_data *td;
1363         struct sk_out *sk_out;
1364 };
1365
1366 /*
1367  * Entry point for the thread based jobs. The process based jobs end up
1368  * here as well, after a little setup.
1369  */
1370 static void *thread_main(void *data)
1371 {
1372         struct fork_data *fd = data;
1373         unsigned long long elapsed_us[DDIR_RWDIR_CNT] = { 0, };
1374         struct thread_data *td = fd->td;
1375         struct thread_options *o = &td->o;
1376         struct sk_out *sk_out = fd->sk_out;
1377         pthread_condattr_t attr;
1378         int clear_state;
1379         int ret;
1380
1381         sk_out_assign(sk_out);
1382         free(fd);
1383
1384         if (!o->use_thread) {
1385                 setsid();
1386                 td->pid = getpid();
1387         } else
1388                 td->pid = gettid();
1389
1390         fio_local_clock_init(o->use_thread);
1391
1392         dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
1393
1394         if (is_backend)
1395                 fio_server_send_start(td);
1396
1397         INIT_FLIST_HEAD(&td->io_log_list);
1398         INIT_FLIST_HEAD(&td->io_hist_list);
1399         INIT_FLIST_HEAD(&td->verify_list);
1400         INIT_FLIST_HEAD(&td->trim_list);
1401         INIT_FLIST_HEAD(&td->next_rand_list);
1402         pthread_mutex_init(&td->io_u_lock, NULL);
1403         td->io_hist_tree = RB_ROOT;
1404
1405         pthread_condattr_init(&attr);
1406         pthread_cond_init(&td->verify_cond, &attr);
1407         pthread_cond_init(&td->free_cond, &attr);
1408
1409         td_set_runstate(td, TD_INITIALIZED);
1410         dprint(FD_MUTEX, "up startup_mutex\n");
1411         fio_mutex_up(startup_mutex);
1412         dprint(FD_MUTEX, "wait on td->mutex\n");
1413         fio_mutex_down(td->mutex);
1414         dprint(FD_MUTEX, "done waiting on td->mutex\n");
1415
1416         /*
1417          * A new gid requires privilege, so we need to do this before setting
1418          * the uid.
1419          */
1420         if (o->gid != -1U && setgid(o->gid)) {
1421                 td_verror(td, errno, "setgid");
1422                 goto err;
1423         }
1424         if (o->uid != -1U && setuid(o->uid)) {
1425                 td_verror(td, errno, "setuid");
1426                 goto err;
1427         }
1428
1429         /*
1430          * If we have a gettimeofday() thread, make sure we exclude that
1431          * thread from this job
1432          */
1433         if (o->gtod_cpu)
1434                 fio_cpu_clear(&o->cpumask, o->gtod_cpu);
1435
1436         /*
1437          * Set affinity first, in case it has an impact on the memory
1438          * allocations.
1439          */
1440         if (fio_option_is_set(o, cpumask)) {
1441                 if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
1442                         ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
1443                         if (!ret) {
1444                                 log_err("fio: no CPUs set\n");
1445                                 log_err("fio: Try increasing number of available CPUs\n");
1446                                 td_verror(td, EINVAL, "cpus_split");
1447                                 goto err;
1448                         }
1449                 }
1450                 ret = fio_setaffinity(td->pid, o->cpumask);
1451                 if (ret == -1) {
1452                         td_verror(td, errno, "cpu_set_affinity");
1453                         goto err;
1454                 }
1455         }
1456
1457 #ifdef CONFIG_LIBNUMA
1458         /* numa node setup */
1459         if (fio_option_is_set(o, numa_cpunodes) ||
1460             fio_option_is_set(o, numa_memnodes)) {
1461                 struct bitmask *mask;
1462
1463                 if (numa_available() < 0) {
1464                         td_verror(td, errno, "Does not support NUMA API\n");
1465                         goto err;
1466                 }
1467
1468                 if (fio_option_is_set(o, numa_cpunodes)) {
1469                         mask = numa_parse_nodestring(o->numa_cpunodes);
1470                         ret = numa_run_on_node_mask(mask);
1471                         numa_free_nodemask(mask);
1472                         if (ret == -1) {
1473                                 td_verror(td, errno, \
1474                                         "numa_run_on_node_mask failed\n");
1475                                 goto err;
1476                         }
1477                 }
1478
1479                 if (fio_option_is_set(o, numa_memnodes)) {
1480                         mask = NULL;
1481                         if (o->numa_memnodes)
1482                                 mask = numa_parse_nodestring(o->numa_memnodes);
1483
1484                         switch (o->numa_mem_mode) {
1485                         case MPOL_INTERLEAVE:
1486                                 numa_set_interleave_mask(mask);
1487                                 break;
1488                         case MPOL_BIND:
1489                                 numa_set_membind(mask);
1490                                 break;
1491                         case MPOL_LOCAL:
1492                                 numa_set_localalloc();
1493                                 break;
1494                         case MPOL_PREFERRED:
1495                                 numa_set_preferred(o->numa_mem_prefer_node);
1496                                 break;
1497                         case MPOL_DEFAULT:
1498                         default:
1499                                 break;
1500                         }
1501
1502                         if (mask)
1503                                 numa_free_nodemask(mask);
1504
1505                 }
1506         }
1507 #endif
1508
1509         if (fio_pin_memory(td))
1510                 goto err;
1511
1512         /*
1513          * May alter parameters that init_io_u() will use, so we need to
1514          * do this first.
1515          */
1516         if (init_iolog(td))
1517                 goto err;
1518
1519         if (init_io_u(td))
1520                 goto err;
1521
1522         if (o->verify_async && verify_async_init(td))
1523                 goto err;
1524
1525         if (fio_option_is_set(o, ioprio) ||
1526             fio_option_is_set(o, ioprio_class)) {
1527                 ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
1528                 if (ret == -1) {
1529                         td_verror(td, errno, "ioprio_set");
1530                         goto err;
1531                 }
1532         }
1533
1534         if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt))
1535                 goto err;
1536
1537         errno = 0;
1538         if (nice(o->nice) == -1 && errno != 0) {
1539                 td_verror(td, errno, "nice");
1540                 goto err;
1541         }
1542
1543         if (o->ioscheduler && switch_ioscheduler(td))
1544                 goto err;
1545
1546         if (!o->create_serialize && setup_files(td))
1547                 goto err;
1548
1549         if (td_io_init(td))
1550                 goto err;
1551
1552         if (init_random_map(td))
1553                 goto err;
1554
1555         if (o->exec_prerun && exec_string(o, o->exec_prerun, (const char *)"prerun"))
1556                 goto err;
1557
1558         if (o->pre_read) {
1559                 if (pre_read_files(td) < 0)
1560                         goto err;
1561         }
1562
1563         if (iolog_compress_init(td, sk_out))
1564                 goto err;
1565
1566         fio_verify_init(td);
1567
1568         if (rate_submit_init(td, sk_out))
1569                 goto err;
1570
1571         fio_gettime(&td->epoch, NULL);
1572         fio_getrusage(&td->ru_start);
1573         memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch));
1574         memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch));
1575
1576         if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
1577                         o->ratemin[DDIR_TRIM]) {
1578                 memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
1579                                         sizeof(td->bw_sample_time));
1580                 memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
1581                                         sizeof(td->bw_sample_time));
1582                 memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
1583                                         sizeof(td->bw_sample_time));
1584         }
1585
1586         clear_state = 0;
1587         while (keep_running(td)) {
1588                 uint64_t verify_bytes;
1589
1590                 fio_gettime(&td->start, NULL);
1591                 memcpy(&td->tv_cache, &td->start, sizeof(td->start));
1592
1593                 if (clear_state)
1594                         clear_io_state(td, 0);
1595
1596                 prune_io_piece_log(td);
1597
1598                 if (td->o.verify_only && (td_write(td) || td_rw(td)))
1599                         verify_bytes = do_dry_run(td);
1600                 else {
1601                         verify_bytes = do_io(td);
1602                         if (!verify_bytes)
1603                                 fio_mark_td_terminate(td);
1604                 }
1605
1606                 clear_state = 1;
1607
1608                 /*
1609                  * Make sure we've successfully updated the rusage stats
1610                  * before waiting on the stat mutex. Otherwise we could have
1611                  * the stat thread holding stat mutex and waiting for
1612                  * the rusage_sem, which would never get upped because
1613                  * this thread is waiting for the stat mutex.
1614                  */
1615                 check_update_rusage(td);
1616
1617                 fio_mutex_down(stat_mutex);
1618                 if (td_read(td) && td->io_bytes[DDIR_READ])
1619                         update_runtime(td, elapsed_us, DDIR_READ);
1620                 if (td_write(td) && td->io_bytes[DDIR_WRITE])
1621                         update_runtime(td, elapsed_us, DDIR_WRITE);
1622                 if (td_trim(td) && td->io_bytes[DDIR_TRIM])
1623                         update_runtime(td, elapsed_us, DDIR_TRIM);
1624                 fio_gettime(&td->start, NULL);
1625                 fio_mutex_up(stat_mutex);
1626
1627                 if (td->error || td->terminate)
1628                         break;
1629
1630                 if (!o->do_verify ||
1631                     o->verify == VERIFY_NONE ||
1632                     (td->io_ops->flags & FIO_UNIDIR))
1633                         continue;
1634
1635                 clear_io_state(td, 0);
1636
1637                 fio_gettime(&td->start, NULL);
1638
1639                 do_verify(td, verify_bytes);
1640
1641                 /*
1642                  * See comment further up for why this is done here.
1643                  */
1644                 check_update_rusage(td);
1645
1646                 fio_mutex_down(stat_mutex);
1647                 update_runtime(td, elapsed_us, DDIR_READ);
1648                 fio_gettime(&td->start, NULL);
1649                 fio_mutex_up(stat_mutex);
1650
1651                 if (td->error || td->terminate)
1652                         break;
1653         }
1654
1655         update_rusage_stat(td);
1656         td->ts.total_run_time = mtime_since_now(&td->epoch);
1657         td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1658         td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1659         td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1660
1661         if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
1662             (td->o.verify != VERIFY_NONE && td_write(td)))
1663                 verify_save_state(td->thread_number);
1664
1665         fio_unpin_memory(td);
1666
1667         fio_writeout_logs(td);
1668
1669         iolog_compress_exit(td);
1670         rate_submit_exit(td);
1671
1672         if (o->exec_postrun)
1673                 exec_string(o, o->exec_postrun, (const char *)"postrun");
1674
1675         if (exitall_on_terminate || (o->exitall_error && td->error))
1676                 fio_terminate_threads(td->groupid);
1677
1678 err:
1679         if (td->error)
1680                 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error,
1681                                                         td->verror);
1682
1683         if (o->verify_async)
1684                 verify_async_exit(td);
1685
1686         close_and_free_files(td);
1687         cleanup_io_u(td);
1688         close_ioengine(td);
1689         cgroup_shutdown(td, &cgroup_mnt);
1690         verify_free_state(td);
1691
1692         if (fio_option_is_set(o, cpumask)) {
1693                 ret = fio_cpuset_exit(&o->cpumask);
1694                 if (ret)
1695                         td_verror(td, ret, "fio_cpuset_exit");
1696         }
1697
1698         /*
1699          * do this very late, it will log file closing as well
1700          */
1701         if (o->write_iolog_file)
1702                 write_iolog_close(td);
1703
1704         fio_mutex_remove(td->mutex);
1705         td->mutex = NULL;
1706
1707         td_set_runstate(td, TD_EXITED);
1708
1709         /*
1710          * Do this last after setting our runstate to exited, so we
1711          * know that the stat thread is signaled.
1712          */
1713         check_update_rusage(td);
1714
1715         sk_out_drop();
1716         return (void *) (uintptr_t) td->error;
1717 }
1718
1719
1720 /*
1721  * We cannot pass the td data into a forked process, so attach the td and
1722  * pass it to the thread worker.
1723  */
1724 static int fork_main(struct sk_out *sk_out, int shmid, int offset)
1725 {
1726         struct fork_data *fd;
1727         void *data, *ret;
1728
1729 #if !defined(__hpux) && !defined(CONFIG_NO_SHM)
1730         data = shmat(shmid, NULL, 0);
1731         if (data == (void *) -1) {
1732                 int __err = errno;
1733
1734                 perror("shmat");
1735                 return __err;
1736         }
1737 #else
1738         /*
1739          * HP-UX inherits shm mappings?
1740          */
1741         data = threads;
1742 #endif
1743
1744         fd = calloc(1, sizeof(*fd));
1745         fd->td = data + offset * sizeof(struct thread_data);
1746         fd->sk_out = sk_out;
1747         ret = thread_main(fd);
1748         shmdt(data);
1749         return (int) (uintptr_t) ret;
1750 }
1751
1752 static void dump_td_info(struct thread_data *td)
1753 {
1754         log_err("fio: job '%s' hasn't exited in %lu seconds, it appears to "
1755                 "be stuck. Doing forceful exit of this job.\n", td->o.name,
1756                         (unsigned long) time_since_now(&td->terminate_time));
1757 }
1758
1759 /*
1760  * Run over the job map and reap the threads that have exited, if any.
1761  */
1762 static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
1763                          unsigned int *m_rate)
1764 {
1765         struct thread_data *td;
1766         unsigned int cputhreads, realthreads, pending;
1767         int i, status, ret;
1768
1769         /*
1770          * reap exited threads (TD_EXITED -> TD_REAPED)
1771          */
1772         realthreads = pending = cputhreads = 0;
1773         for_each_td(td, i) {
1774                 int flags = 0;
1775
1776                 /*
1777                  * ->io_ops is NULL for a thread that has closed its
1778                  * io engine
1779                  */
1780                 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
1781                         cputhreads++;
1782                 else
1783                         realthreads++;
1784
1785                 if (!td->pid) {
1786                         pending++;
1787                         continue;
1788                 }
1789                 if (td->runstate == TD_REAPED)
1790                         continue;
1791                 if (td->o.use_thread) {
1792                         if (td->runstate == TD_EXITED) {
1793                                 td_set_runstate(td, TD_REAPED);
1794                                 goto reaped;
1795                         }
1796                         continue;
1797                 }
1798
1799                 flags = WNOHANG;
1800                 if (td->runstate == TD_EXITED)
1801                         flags = 0;
1802
1803                 /*
1804                  * check if someone quit or got killed in an unusual way
1805                  */
1806                 ret = waitpid(td->pid, &status, flags);
1807                 if (ret < 0) {
1808                         if (errno == ECHILD) {
1809                                 log_err("fio: pid=%d disappeared %d\n",
1810                                                 (int) td->pid, td->runstate);
1811                                 td->sig = ECHILD;
1812                                 td_set_runstate(td, TD_REAPED);
1813                                 goto reaped;
1814                         }
1815                         perror("waitpid");
1816                 } else if (ret == td->pid) {
1817                         if (WIFSIGNALED(status)) {
1818                                 int sig = WTERMSIG(status);
1819
1820                                 if (sig != SIGTERM && sig != SIGUSR2)
1821                                         log_err("fio: pid=%d, got signal=%d\n",
1822                                                         (int) td->pid, sig);
1823                                 td->sig = sig;
1824                                 td_set_runstate(td, TD_REAPED);
1825                                 goto reaped;
1826                         }
1827                         if (WIFEXITED(status)) {
1828                                 if (WEXITSTATUS(status) && !td->error)
1829                                         td->error = WEXITSTATUS(status);
1830
1831                                 td_set_runstate(td, TD_REAPED);
1832                                 goto reaped;
1833                         }
1834                 }
1835
1836                 /*
1837                  * If the job is stuck, do a forceful timeout of it and
1838                  * move on.
1839                  */
1840                 if (td->terminate &&
1841                     time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) {
1842                         dump_td_info(td);
1843                         td_set_runstate(td, TD_REAPED);
1844                         goto reaped;
1845                 }
1846
1847                 /*
1848                  * thread is not dead, continue
1849                  */
1850                 pending++;
1851                 continue;
1852 reaped:
1853                 (*nr_running)--;
1854                 (*m_rate) -= ddir_rw_sum(td->o.ratemin);
1855                 (*t_rate) -= ddir_rw_sum(td->o.rate);
1856                 if (!td->pid)
1857                         pending--;
1858
1859                 if (td->error)
1860                         exit_value++;
1861
1862                 done_secs += mtime_since_now(&td->epoch) / 1000;
1863                 profile_td_exit(td);
1864         }
1865
1866         if (*nr_running == cputhreads && !pending && realthreads)
1867                 fio_terminate_threads(TERMINATE_ALL);
1868 }
1869
1870 static bool __check_trigger_file(void)
1871 {
1872         struct stat sb;
1873
1874         if (!trigger_file)
1875                 return false;
1876
1877         if (stat(trigger_file, &sb))
1878                 return false;
1879
1880         if (unlink(trigger_file) < 0)
1881                 log_err("fio: failed to unlink %s: %s\n", trigger_file,
1882                                                         strerror(errno));
1883
1884         return true;
1885 }
1886
1887 static bool trigger_timedout(void)
1888 {
1889         if (trigger_timeout)
1890                 return time_since_genesis() >= trigger_timeout;
1891
1892         return false;
1893 }
1894
1895 void exec_trigger(const char *cmd)
1896 {
1897         int ret;
1898
1899         if (!cmd)
1900                 return;
1901
1902         ret = system(cmd);
1903         if (ret == -1)
1904                 log_err("fio: failed executing %s trigger\n", cmd);
1905 }
1906
1907 void check_trigger_file(void)
1908 {
1909         if (__check_trigger_file() || trigger_timedout()) {
1910                 if (nr_clients)
1911                         fio_clients_send_trigger(trigger_remote_cmd);
1912                 else {
1913                         verify_save_state(IO_LIST_ALL);
1914                         fio_terminate_threads(TERMINATE_ALL);
1915                         exec_trigger(trigger_cmd);
1916                 }
1917         }
1918 }
1919
1920 static int fio_verify_load_state(struct thread_data *td)
1921 {
1922         int ret;
1923
1924         if (!td->o.verify_state)
1925                 return 0;
1926
1927         if (is_backend) {
1928                 void *data;
1929                 int ver;
1930
1931                 ret = fio_server_get_verify_state(td->o.name,
1932                                         td->thread_number - 1, &data, &ver);
1933                 if (!ret)
1934                         verify_convert_assign_state(td, data, ver);
1935         } else
1936                 ret = verify_load_state(td, "local");
1937
1938         return ret;
1939 }
1940
1941 static void do_usleep(unsigned int usecs)
1942 {
1943         check_for_running_stats();
1944         check_trigger_file();
1945         usleep(usecs);
1946 }
1947
1948 static bool check_mount_writes(struct thread_data *td)
1949 {
1950         struct fio_file *f;
1951         unsigned int i;
1952
1953         if (!td_write(td) || td->o.allow_mounted_write)
1954                 return false;
1955
1956         for_each_file(td, f, i) {
1957                 if (f->filetype != FIO_TYPE_BD)
1958                         continue;
1959                 if (device_is_mounted(f->file_name))
1960                         goto mounted;
1961         }
1962
1963         return false;
1964 mounted:
1965         log_err("fio: %s appears mounted, and 'allow_mounted_write' isn't set. Aborting.", f->file_name);
1966         return true;
1967 }
1968
1969 /*
1970  * Main function for kicking off and reaping jobs, as needed.
1971  */
1972 static void run_threads(struct sk_out *sk_out)
1973 {
1974         struct thread_data *td;
1975         unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
1976         uint64_t spent;
1977
1978         if (fio_gtod_offload && fio_start_gtod_thread())
1979                 return;
1980
1981         fio_idle_prof_init();
1982
1983         set_sig_handlers();
1984
1985         nr_thread = nr_process = 0;
1986         for_each_td(td, i) {
1987                 if (check_mount_writes(td))
1988                         return;
1989                 if (td->o.use_thread)
1990                         nr_thread++;
1991                 else
1992                         nr_process++;
1993         }
1994
1995         if (output_format & FIO_OUTPUT_NORMAL) {
1996                 log_info("Starting ");
1997                 if (nr_thread)
1998                         log_info("%d thread%s", nr_thread,
1999                                                 nr_thread > 1 ? "s" : "");
2000                 if (nr_process) {
2001                         if (nr_thread)
2002                                 log_info(" and ");
2003                         log_info("%d process%s", nr_process,
2004                                                 nr_process > 1 ? "es" : "");
2005                 }
2006                 log_info("\n");
2007                 log_info_flush();
2008         }
2009
2010         todo = thread_number;
2011         nr_running = 0;
2012         nr_started = 0;
2013         m_rate = t_rate = 0;
2014
2015         for_each_td(td, i) {
2016                 print_status_init(td->thread_number - 1);
2017
2018                 if (!td->o.create_serialize)
2019                         continue;
2020
2021                 if (fio_verify_load_state(td))
2022                         goto reap;
2023
2024                 /*
2025                  * do file setup here so it happens sequentially,
2026                  * we don't want X number of threads getting their
2027                  * client data interspersed on disk
2028                  */
2029                 if (setup_files(td)) {
2030 reap:
2031                         exit_value++;
2032                         if (td->error)
2033                                 log_err("fio: pid=%d, err=%d/%s\n",
2034                                         (int) td->pid, td->error, td->verror);
2035                         td_set_runstate(td, TD_REAPED);
2036                         todo--;
2037                 } else {
2038                         struct fio_file *f;
2039                         unsigned int j;
2040
2041                         /*
2042                          * for sharing to work, each job must always open
2043                          * its own files. so close them, if we opened them
2044                          * for creation
2045                          */
2046                         for_each_file(td, f, j) {
2047                                 if (fio_file_open(f))
2048                                         td_io_close_file(td, f);
2049                         }
2050                 }
2051         }
2052
2053         /* start idle threads before io threads start to run */
2054         fio_idle_prof_start();
2055
2056         set_genesis_time();
2057
2058         while (todo) {
2059                 struct thread_data *map[REAL_MAX_JOBS];
2060                 struct timeval this_start;
2061                 int this_jobs = 0, left;
2062
2063                 /*
2064                  * create threads (TD_NOT_CREATED -> TD_CREATED)
2065                  */
2066                 for_each_td(td, i) {
2067                         if (td->runstate != TD_NOT_CREATED)
2068                                 continue;
2069
2070                         /*
2071                          * never got a chance to start, killed by other
2072                          * thread for some reason
2073                          */
2074                         if (td->terminate) {
2075                                 todo--;
2076                                 continue;
2077                         }
2078
2079                         if (td->o.start_delay) {
2080                                 spent = utime_since_genesis();
2081
2082                                 if (td->o.start_delay > spent)
2083                                         continue;
2084                         }
2085
2086                         if (td->o.stonewall && (nr_started || nr_running)) {
2087                                 dprint(FD_PROCESS, "%s: stonewall wait\n",
2088                                                         td->o.name);
2089                                 break;
2090                         }
2091
2092                         init_disk_util(td);
2093
2094                         td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED);
2095                         td->update_rusage = 0;
2096
2097                         /*
2098                          * Set state to created. Thread will transition
2099                          * to TD_INITIALIZED when it's done setting up.
2100                          */
2101                         td_set_runstate(td, TD_CREATED);
2102                         map[this_jobs++] = td;
2103                         nr_started++;
2104
2105                         if (td->o.use_thread) {
2106                                 struct fork_data *fd;
2107                                 int ret;
2108
2109                                 fd = calloc(1, sizeof(*fd));
2110                                 fd->td = td;
2111                                 fd->sk_out = sk_out;
2112
2113                                 dprint(FD_PROCESS, "will pthread_create\n");
2114                                 ret = pthread_create(&td->thread, NULL,
2115                                                         thread_main, fd);
2116                                 if (ret) {
2117                                         log_err("pthread_create: %s\n",
2118                                                         strerror(ret));
2119                                         free(fd);
2120                                         nr_started--;
2121                                         break;
2122                                 }
2123                                 ret = pthread_detach(td->thread);
2124                                 if (ret)
2125                                         log_err("pthread_detach: %s",
2126                                                         strerror(ret));
2127                         } else {
2128                                 pid_t pid;
2129                                 dprint(FD_PROCESS, "will fork\n");
2130                                 pid = fork();
2131                                 if (!pid) {
2132                                         int ret = fork_main(sk_out, shm_id, i);
2133
2134                                         _exit(ret);
2135                                 } else if (i == fio_debug_jobno)
2136                                         *fio_debug_jobp = pid;
2137                         }
2138                         dprint(FD_MUTEX, "wait on startup_mutex\n");
2139                         if (fio_mutex_down_timeout(startup_mutex, 10000)) {
2140                                 log_err("fio: job startup hung? exiting.\n");
2141                                 fio_terminate_threads(TERMINATE_ALL);
2142                                 fio_abort = 1;
2143                                 nr_started--;
2144                                 break;
2145                         }
2146                         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
2147                 }
2148
2149                 /*
2150                  * Wait for the started threads to transition to
2151                  * TD_INITIALIZED.
2152                  */
2153                 fio_gettime(&this_start, NULL);
2154                 left = this_jobs;
2155                 while (left && !fio_abort) {
2156                         if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
2157                                 break;
2158
2159                         do_usleep(100000);
2160
2161                         for (i = 0; i < this_jobs; i++) {
2162                                 td = map[i];
2163                                 if (!td)
2164                                         continue;
2165                                 if (td->runstate == TD_INITIALIZED) {
2166                                         map[i] = NULL;
2167                                         left--;
2168                                 } else if (td->runstate >= TD_EXITED) {
2169                                         map[i] = NULL;
2170                                         left--;
2171                                         todo--;
2172                                         nr_running++; /* work-around... */
2173                                 }
2174                         }
2175                 }
2176
2177                 if (left) {
2178                         log_err("fio: %d job%s failed to start\n", left,
2179                                         left > 1 ? "s" : "");
2180                         for (i = 0; i < this_jobs; i++) {
2181                                 td = map[i];
2182                                 if (!td)
2183                                         continue;
2184                                 kill(td->pid, SIGTERM);
2185                         }
2186                         break;
2187                 }
2188
2189                 /*
2190                  * start created threads (TD_INITIALIZED -> TD_RUNNING).
2191                  */
2192                 for_each_td(td, i) {
2193                         if (td->runstate != TD_INITIALIZED)
2194                                 continue;
2195
2196                         if (in_ramp_time(td))
2197                                 td_set_runstate(td, TD_RAMP);
2198                         else
2199                                 td_set_runstate(td, TD_RUNNING);
2200                         nr_running++;
2201                         nr_started--;
2202                         m_rate += ddir_rw_sum(td->o.ratemin);
2203                         t_rate += ddir_rw_sum(td->o.rate);
2204                         todo--;
2205                         fio_mutex_up(td->mutex);
2206                 }
2207
2208                 reap_threads(&nr_running, &t_rate, &m_rate);
2209
2210                 if (todo)
2211                         do_usleep(100000);
2212         }
2213
2214         while (nr_running) {
2215                 reap_threads(&nr_running, &t_rate, &m_rate);
2216                 do_usleep(10000);
2217         }
2218
2219         fio_idle_prof_stop();
2220
2221         update_io_ticks();
2222 }
2223
2224 static void wait_for_helper_thread_exit(void)
2225 {
2226         void *ret;
2227
2228         helper_exit = 1;
2229         pthread_cond_signal(&helper_cond);
2230         pthread_join(helper_thread, &ret);
2231 }
2232
2233 static void free_disk_util(void)
2234 {
2235         disk_util_prune_entries();
2236
2237         pthread_cond_destroy(&helper_cond);
2238 }
2239
2240 static void *helper_thread_main(void *data)
2241 {
2242         struct sk_out *sk_out = data;
2243         int ret = 0;
2244
2245         sk_out_assign(sk_out);
2246
2247         fio_mutex_up(startup_mutex);
2248
2249         while (!ret) {
2250                 uint64_t sec = DISK_UTIL_MSEC / 1000;
2251                 uint64_t nsec = (DISK_UTIL_MSEC % 1000) * 1000000;
2252                 struct timespec ts;
2253                 struct timeval tv;
2254
2255                 gettimeofday(&tv, NULL);
2256                 ts.tv_sec = tv.tv_sec + sec;
2257                 ts.tv_nsec = (tv.tv_usec * 1000) + nsec;
2258
2259                 if (ts.tv_nsec >= 1000000000ULL) {
2260                         ts.tv_nsec -= 1000000000ULL;
2261                         ts.tv_sec++;
2262                 }
2263
2264                 pthread_cond_timedwait(&helper_cond, &helper_lock, &ts);
2265
2266                 ret = update_io_ticks();
2267
2268                 if (helper_do_stat) {
2269                         helper_do_stat = 0;
2270                         __show_running_run_stats();
2271                 }
2272
2273                 if (!is_backend)
2274                         print_thread_status();
2275         }
2276
2277         sk_out_drop();
2278         return NULL;
2279 }
2280
2281 static int create_helper_thread(struct sk_out *sk_out)
2282 {
2283         int ret;
2284
2285         setup_disk_util();
2286
2287         pthread_cond_init(&helper_cond, NULL);
2288         pthread_mutex_init(&helper_lock, NULL);
2289
2290         ret = pthread_create(&helper_thread, NULL, helper_thread_main, sk_out);
2291         if (ret) {
2292                 log_err("Can't create helper thread: %s\n", strerror(ret));
2293                 return 1;
2294         }
2295
2296         dprint(FD_MUTEX, "wait on startup_mutex\n");
2297         fio_mutex_down(startup_mutex);
2298         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
2299         return 0;
2300 }
2301
2302 int fio_backend(struct sk_out *sk_out)
2303 {
2304         struct thread_data *td;
2305         int i;
2306
2307         if (exec_profile) {
2308                 if (load_profile(exec_profile))
2309                         return 1;
2310                 free(exec_profile);
2311                 exec_profile = NULL;
2312         }
2313         if (!thread_number)
2314                 return 0;
2315
2316         if (write_bw_log) {
2317                 struct log_params p = {
2318                         .log_type = IO_LOG_TYPE_BW,
2319                 };
2320
2321                 setup_log(&agg_io_log[DDIR_READ], &p, "agg-read_bw.log");
2322                 setup_log(&agg_io_log[DDIR_WRITE], &p, "agg-write_bw.log");
2323                 setup_log(&agg_io_log[DDIR_TRIM], &p, "agg-trim_bw.log");
2324         }
2325
2326         startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
2327         if (startup_mutex == NULL)
2328                 return 1;
2329
2330         set_genesis_time();
2331         stat_init();
2332         create_helper_thread(sk_out);
2333
2334         cgroup_list = smalloc(sizeof(*cgroup_list));
2335         INIT_FLIST_HEAD(cgroup_list);
2336
2337         run_threads(sk_out);
2338
2339         wait_for_helper_thread_exit();
2340
2341         if (!fio_abort) {
2342                 __show_run_stats();
2343                 if (write_bw_log) {
2344                         for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2345                                 struct io_log *log = agg_io_log[i];
2346
2347                                 flush_log(log, 0);
2348                                 free_log(log);
2349                         }
2350                 }
2351         }
2352
2353         for_each_td(td, i) {
2354                 fio_options_free(td);
2355                 if (td->rusage_sem) {
2356                         fio_mutex_remove(td->rusage_sem);
2357                         td->rusage_sem = NULL;
2358                 }
2359         }
2360
2361         free_disk_util();
2362         cgroup_kill(cgroup_list);
2363         sfree(cgroup_list);
2364         sfree(cgroup_mnt);
2365
2366         fio_mutex_remove(startup_mutex);
2367         stat_exit();
2368         return exit_value;
2369 }