Fix clat percentile display
[fio.git] / fio.c
1 /*
2  * fio - the flexible io tester
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
6  *
7  * The license below covers all files distributed with fio unless otherwise
8  * noted in the file itself.
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to the Free Software
21  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  *
23  */
24 #include <unistd.h>
25 #include <fcntl.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <signal.h>
29 #include <time.h>
30 #include <locale.h>
31 #include <assert.h>
32 #include <time.h>
33 #include <sys/stat.h>
34 #include <sys/wait.h>
35 #include <sys/ipc.h>
36 #include <sys/shm.h>
37 #include <sys/mman.h>
38
39 #include "fio.h"
40 #include "hash.h"
41 #include "smalloc.h"
42 #include "verify.h"
43 #include "trim.h"
44 #include "diskutil.h"
45 #include "cgroup.h"
46 #include "profile.h"
47 #include "lib/rand.h"
48 #include "memalign.h"
49 #include "server.h"
50
51 unsigned long page_mask;
52 unsigned long page_size;
53
54 #define PAGE_ALIGN(buf) \
55         (char *) (((unsigned long) (buf) + page_mask) & ~page_mask)
56
57 int groupid = 0;
58 int thread_number = 0;
59 int nr_process = 0;
60 int nr_thread = 0;
61 int shm_id = 0;
62 int temp_stall_ts;
63 unsigned long done_secs = 0;
64
65 static struct fio_mutex *startup_mutex;
66 static struct fio_mutex *writeout_mutex;
67 static volatile int fio_abort;
68 static int exit_value;
69 static pthread_t gtod_thread;
70 static pthread_t disk_util_thread;
71 static struct flist_head *cgroup_list;
72 static char *cgroup_mnt;
73
74 unsigned long arch_flags = 0;
75
76 struct io_log *agg_io_log[2];
77
78 #define JOB_START_TIMEOUT       (5 * 1000)
79
80 static const char *fio_os_strings[os_nr] = {
81         "Invalid",
82         "Linux",
83         "AIX",
84         "FreeBSD",
85         "HP-UX",
86         "OSX",
87         "NetBSD",
88         "Solaris",
89         "Windows"
90 };
91
92 static const char *fio_arch_strings[arch_nr] = {
93         "Invalid",
94         "x86-64",
95         "x86",
96         "ppc",
97         "ia64",
98         "s390",
99         "alpha",
100         "sparc",
101         "sparc64",
102         "arm",
103         "sh",
104         "hppa",
105         "generic"
106 };
107
108 const char *fio_get_os_string(int nr)
109 {
110         if (nr < os_nr)
111                 return fio_os_strings[nr];
112
113         return NULL;
114 }
115
116 const char *fio_get_arch_string(int nr)
117 {
118         if (nr < arch_nr)
119                 return fio_arch_strings[nr];
120
121         return NULL;
122 }
123
124 void td_set_runstate(struct thread_data *td, int runstate)
125 {
126         if (td->runstate == runstate)
127                 return;
128
129         dprint(FD_PROCESS, "pid=%d: runstate %d -> %d\n", (int) td->pid,
130                                                 td->runstate, runstate);
131         td->runstate = runstate;
132 }
133
134 void fio_terminate_threads(int group_id)
135 {
136         struct thread_data *td;
137         int i;
138
139         dprint(FD_PROCESS, "terminate group_id=%d\n", group_id);
140
141         for_each_td(td, i) {
142                 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
143                         dprint(FD_PROCESS, "setting terminate on %s/%d\n",
144                                                 td->o.name, (int) td->pid);
145                         td->terminate = 1;
146                         td->o.start_delay = 0;
147
148                         /*
149                          * if the thread is running, just let it exit
150                          */
151                         if (!td->pid)
152                                 continue;
153                         else if (td->runstate < TD_RAMP)
154                                 kill(td->pid, SIGTERM);
155                         else {
156                                 struct ioengine_ops *ops = td->io_ops;
157
158                                 if (ops && (ops->flags & FIO_SIGTERM))
159                                         kill(td->pid, SIGTERM);
160                         }
161                 }
162         }
163 }
164
165 static void sig_int(int sig)
166 {
167         if (threads) {
168                 if (is_backend)
169                         fio_server_got_signal(sig);
170                 else {
171                         log_info("\nfio: terminating on signal %d\n", sig);
172                         fflush(stdout);
173                         exit_value = 128;
174                 }
175
176                 fio_terminate_threads(TERMINATE_ALL);
177         }
178 }
179
180 static void *disk_thread_main(void *data)
181 {
182         fio_mutex_up(startup_mutex);
183
184         while (threads) {
185                 usleep(DISK_UTIL_MSEC * 1000);
186                 if (!threads)
187                         break;
188                 update_io_ticks();
189
190                 if (!is_backend)
191                         print_thread_status();
192         }
193
194         return NULL;
195 }
196
197 static int create_disk_util_thread(void)
198 {
199         int ret;
200
201         ret = pthread_create(&disk_util_thread, NULL, disk_thread_main, NULL);
202         if (ret) {
203                 log_err("Can't create disk util thread: %s\n", strerror(ret));
204                 return 1;
205         }
206
207         ret = pthread_detach(disk_util_thread);
208         if (ret) {
209                 log_err("Can't detatch disk util thread: %s\n", strerror(ret));
210                 return 1;
211         }
212
213         dprint(FD_MUTEX, "wait on startup_mutex\n");
214         fio_mutex_down(startup_mutex);
215         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
216         return 0;
217 }
218
219 static void set_sig_handlers(void)
220 {
221         struct sigaction act;
222
223         memset(&act, 0, sizeof(act));
224         act.sa_handler = sig_int;
225         act.sa_flags = SA_RESTART;
226         sigaction(SIGINT, &act, NULL);
227
228         memset(&act, 0, sizeof(act));
229         act.sa_handler = sig_int;
230         act.sa_flags = SA_RESTART;
231         sigaction(SIGTERM, &act, NULL);
232
233         if (is_backend) {
234                 memset(&act, 0, sizeof(act));
235                 act.sa_handler = sig_int;
236                 act.sa_flags = SA_RESTART;
237                 sigaction(SIGPIPE, &act, NULL);
238         }
239 }
240
241 /*
242  * Check if we are above the minimum rate given.
243  */
244 static int __check_min_rate(struct thread_data *td, struct timeval *now,
245                             enum fio_ddir ddir)
246 {
247         unsigned long long bytes = 0;
248         unsigned long iops = 0;
249         unsigned long spent;
250         unsigned long rate;
251         unsigned int ratemin = 0;
252         unsigned int rate_iops = 0;
253         unsigned int rate_iops_min = 0;
254
255         assert(ddir_rw(ddir));
256
257         if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir])
258                 return 0;
259
260         /*
261          * allow a 2 second settle period in the beginning
262          */
263         if (mtime_since(&td->start, now) < 2000)
264                 return 0;
265
266         iops += td->this_io_blocks[ddir];
267         bytes += td->this_io_bytes[ddir];
268         ratemin += td->o.ratemin[ddir];
269         rate_iops += td->o.rate_iops[ddir];
270         rate_iops_min += td->o.rate_iops_min[ddir];
271
272         /*
273          * if rate blocks is set, sample is running
274          */
275         if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) {
276                 spent = mtime_since(&td->lastrate[ddir], now);
277                 if (spent < td->o.ratecycle)
278                         return 0;
279
280                 if (td->o.rate[ddir]) {
281                         /*
282                          * check bandwidth specified rate
283                          */
284                         if (bytes < td->rate_bytes[ddir]) {
285                                 log_err("%s: min rate %u not met\n", td->o.name,
286                                                                 ratemin);
287                                 return 1;
288                         } else {
289                                 rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
290                                 if (rate < ratemin ||
291                                     bytes < td->rate_bytes[ddir]) {
292                                         log_err("%s: min rate %u not met, got"
293                                                 " %luKB/sec\n", td->o.name,
294                                                         ratemin, rate);
295                                         return 1;
296                                 }
297                         }
298                 } else {
299                         /*
300                          * checks iops specified rate
301                          */
302                         if (iops < rate_iops) {
303                                 log_err("%s: min iops rate %u not met\n",
304                                                 td->o.name, rate_iops);
305                                 return 1;
306                         } else {
307                                 rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
308                                 if (rate < rate_iops_min ||
309                                     iops < td->rate_blocks[ddir]) {
310                                         log_err("%s: min iops rate %u not met,"
311                                                 " got %lu\n", td->o.name,
312                                                         rate_iops_min, rate);
313                                 }
314                         }
315                 }
316         }
317
318         td->rate_bytes[ddir] = bytes;
319         td->rate_blocks[ddir] = iops;
320         memcpy(&td->lastrate[ddir], now, sizeof(*now));
321         return 0;
322 }
323
324 static int check_min_rate(struct thread_data *td, struct timeval *now,
325                           unsigned long *bytes_done)
326 {
327         int ret = 0;
328
329         if (bytes_done[0])
330                 ret |= __check_min_rate(td, now, 0);
331         if (bytes_done[1])
332                 ret |= __check_min_rate(td, now, 1);
333
334         return ret;
335 }
336
337 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
338 {
339         if (!td->o.timeout)
340                 return 0;
341         if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000)
342                 return 1;
343
344         return 0;
345 }
346
347 /*
348  * When job exits, we can cancel the in-flight IO if we are using async
349  * io. Attempt to do so.
350  */
351 static void cleanup_pending_aio(struct thread_data *td)
352 {
353         struct flist_head *entry, *n;
354         struct io_u *io_u;
355         int r;
356
357         /*
358          * get immediately available events, if any
359          */
360         r = io_u_queued_complete(td, 0, NULL);
361         if (r < 0)
362                 return;
363
364         /*
365          * now cancel remaining active events
366          */
367         if (td->io_ops->cancel) {
368                 flist_for_each_safe(entry, n, &td->io_u_busylist) {
369                         io_u = flist_entry(entry, struct io_u, list);
370
371                         /*
372                          * if the io_u isn't in flight, then that generally
373                          * means someone leaked an io_u. complain but fix
374                          * it up, so we don't stall here.
375                          */
376                         if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
377                                 log_err("fio: non-busy IO on busy list\n");
378                                 put_io_u(td, io_u);
379                         } else {
380                                 r = td->io_ops->cancel(td, io_u);
381                                 if (!r)
382                                         put_io_u(td, io_u);
383                         }
384                 }
385         }
386
387         if (td->cur_depth)
388                 r = io_u_queued_complete(td, td->cur_depth, NULL);
389 }
390
391 /*
392  * Helper to handle the final sync of a file. Works just like the normal
393  * io path, just does everything sync.
394  */
395 static int fio_io_sync(struct thread_data *td, struct fio_file *f)
396 {
397         struct io_u *io_u = __get_io_u(td);
398         int ret;
399
400         if (!io_u)
401                 return 1;
402
403         io_u->ddir = DDIR_SYNC;
404         io_u->file = f;
405
406         if (td_io_prep(td, io_u)) {
407                 put_io_u(td, io_u);
408                 return 1;
409         }
410
411 requeue:
412         ret = td_io_queue(td, io_u);
413         if (ret < 0) {
414                 td_verror(td, io_u->error, "td_io_queue");
415                 put_io_u(td, io_u);
416                 return 1;
417         } else if (ret == FIO_Q_QUEUED) {
418                 if (io_u_queued_complete(td, 1, NULL) < 0)
419                         return 1;
420         } else if (ret == FIO_Q_COMPLETED) {
421                 if (io_u->error) {
422                         td_verror(td, io_u->error, "td_io_queue");
423                         return 1;
424                 }
425
426                 if (io_u_sync_complete(td, io_u, NULL) < 0)
427                         return 1;
428         } else if (ret == FIO_Q_BUSY) {
429                 if (td_io_commit(td))
430                         return 1;
431                 goto requeue;
432         }
433
434         return 0;
435 }
436
437 static inline void __update_tv_cache(struct thread_data *td)
438 {
439         fio_gettime(&td->tv_cache, NULL);
440 }
441
442 static inline void update_tv_cache(struct thread_data *td)
443 {
444         if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask)
445                 __update_tv_cache(td);
446 }
447
448 static int break_on_this_error(struct thread_data *td, int *retptr)
449 {
450         int ret = *retptr;
451
452         if (ret < 0 || td->error) {
453                 int err;
454
455                 if (!td->o.continue_on_error)
456                         return 1;
457
458                 if (ret < 0)
459                         err = -ret;
460                 else
461                         err = td->error;
462
463                 if (td_non_fatal_error(err)) {
464                         /*
465                          * Continue with the I/Os in case of
466                          * a non fatal error.
467                          */
468                         update_error_count(td, err);
469                         td_clear_error(td);
470                         *retptr = 0;
471                         return 0;
472                 } else if (td->o.fill_device && err == ENOSPC) {
473                         /*
474                          * We expect to hit this error if
475                          * fill_device option is set.
476                          */
477                         td_clear_error(td);
478                         td->terminate = 1;
479                         return 1;
480                 } else {
481                         /*
482                          * Stop the I/O in case of a fatal
483                          * error.
484                          */
485                         update_error_count(td, err);
486                         return 1;
487                 }
488         }
489
490         return 0;
491 }
492
493 /*
494  * The main verify engine. Runs over the writes we previously submitted,
495  * reads the blocks back in, and checks the crc/md5 of the data.
496  */
497 static void do_verify(struct thread_data *td)
498 {
499         struct fio_file *f;
500         struct io_u *io_u;
501         int ret, min_events;
502         unsigned int i;
503
504         dprint(FD_VERIFY, "starting loop\n");
505
506         /*
507          * sync io first and invalidate cache, to make sure we really
508          * read from disk.
509          */
510         for_each_file(td, f, i) {
511                 if (!fio_file_open(f))
512                         continue;
513                 if (fio_io_sync(td, f))
514                         break;
515                 if (file_invalidate_cache(td, f))
516                         break;
517         }
518
519         if (td->error)
520                 return;
521
522         td_set_runstate(td, TD_VERIFYING);
523
524         io_u = NULL;
525         while (!td->terminate) {
526                 int ret2, full;
527
528                 update_tv_cache(td);
529
530                 if (runtime_exceeded(td, &td->tv_cache)) {
531                         __update_tv_cache(td);
532                         if (runtime_exceeded(td, &td->tv_cache)) {
533                                 td->terminate = 1;
534                                 break;
535                         }
536                 }
537
538                 io_u = __get_io_u(td);
539                 if (!io_u)
540                         break;
541
542                 if (get_next_verify(td, io_u)) {
543                         put_io_u(td, io_u);
544                         break;
545                 }
546
547                 if (td_io_prep(td, io_u)) {
548                         put_io_u(td, io_u);
549                         break;
550                 }
551
552                 if (td->o.verify_async)
553                         io_u->end_io = verify_io_u_async;
554                 else
555                         io_u->end_io = verify_io_u;
556
557                 ret = td_io_queue(td, io_u);
558                 switch (ret) {
559                 case FIO_Q_COMPLETED:
560                         if (io_u->error) {
561                                 ret = -io_u->error;
562                                 clear_io_u(td, io_u);
563                         } else if (io_u->resid) {
564                                 int bytes = io_u->xfer_buflen - io_u->resid;
565
566                                 /*
567                                  * zero read, fail
568                                  */
569                                 if (!bytes) {
570                                         td_verror(td, EIO, "full resid");
571                                         put_io_u(td, io_u);
572                                         break;
573                                 }
574
575                                 io_u->xfer_buflen = io_u->resid;
576                                 io_u->xfer_buf += bytes;
577                                 io_u->offset += bytes;
578
579                                 if (ddir_rw(io_u->ddir))
580                                         td->ts.short_io_u[io_u->ddir]++;
581
582                                 f = io_u->file;
583                                 if (io_u->offset == f->real_file_size)
584                                         goto sync_done;
585
586                                 requeue_io_u(td, &io_u);
587                         } else {
588 sync_done:
589                                 ret = io_u_sync_complete(td, io_u, NULL);
590                                 if (ret < 0)
591                                         break;
592                         }
593                         continue;
594                 case FIO_Q_QUEUED:
595                         break;
596                 case FIO_Q_BUSY:
597                         requeue_io_u(td, &io_u);
598                         ret2 = td_io_commit(td);
599                         if (ret2 < 0)
600                                 ret = ret2;
601                         break;
602                 default:
603                         assert(ret < 0);
604                         td_verror(td, -ret, "td_io_queue");
605                         break;
606                 }
607
608                 if (break_on_this_error(td, &ret))
609                         break;
610
611                 /*
612                  * if we can queue more, do so. but check if there are
613                  * completed io_u's first. Note that we can get BUSY even
614                  * without IO queued, if the system is resource starved.
615                  */
616                 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
617                 if (full || !td->o.iodepth_batch_complete) {
618                         min_events = min(td->o.iodepth_batch_complete,
619                                          td->cur_depth);
620                         if (full && !min_events && td->o.iodepth_batch_complete != 0)
621                                 min_events = 1;
622
623                         do {
624                                 /*
625                                  * Reap required number of io units, if any,
626                                  * and do the verification on them through
627                                  * the callback handler
628                                  */
629                                 if (io_u_queued_complete(td, min_events, NULL) < 0) {
630                                         ret = -1;
631                                         break;
632                                 }
633                         } while (full && (td->cur_depth > td->o.iodepth_low));
634                 }
635                 if (ret < 0)
636                         break;
637         }
638
639         if (!td->error) {
640                 min_events = td->cur_depth;
641
642                 if (min_events)
643                         ret = io_u_queued_complete(td, min_events, NULL);
644         } else
645                 cleanup_pending_aio(td);
646
647         td_set_runstate(td, TD_RUNNING);
648
649         dprint(FD_VERIFY, "exiting loop\n");
650 }
651
652 /*
653  * Main IO worker function. It retrieves io_u's to process and queues
654  * and reaps them, checking for rate and errors along the way.
655  */
656 static void do_io(struct thread_data *td)
657 {
658         unsigned int i;
659         int ret = 0;
660
661         if (in_ramp_time(td))
662                 td_set_runstate(td, TD_RAMP);
663         else
664                 td_set_runstate(td, TD_RUNNING);
665
666         while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
667                 (!flist_empty(&td->trim_list)) ||
668                 ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) {
669                 struct timeval comp_time;
670                 unsigned long bytes_done[2] = { 0, 0 };
671                 int min_evts = 0;
672                 struct io_u *io_u;
673                 int ret2, full;
674
675                 if (td->terminate)
676                         break;
677
678                 update_tv_cache(td);
679
680                 if (runtime_exceeded(td, &td->tv_cache)) {
681                         __update_tv_cache(td);
682                         if (runtime_exceeded(td, &td->tv_cache)) {
683                                 td->terminate = 1;
684                                 break;
685                         }
686                 }
687
688                 io_u = get_io_u(td);
689                 if (!io_u)
690                         break;
691
692                 /*
693                  * Add verification end_io handler, if asked to verify
694                  * a previously written file.
695                  */
696                 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
697                     !td_rw(td)) {
698                         if (td->o.verify_async)
699                                 io_u->end_io = verify_io_u_async;
700                         else
701                                 io_u->end_io = verify_io_u;
702                         td_set_runstate(td, TD_VERIFYING);
703                 } else if (in_ramp_time(td))
704                         td_set_runstate(td, TD_RAMP);
705                 else
706                         td_set_runstate(td, TD_RUNNING);
707
708                 ret = td_io_queue(td, io_u);
709                 switch (ret) {
710                 case FIO_Q_COMPLETED:
711                         if (io_u->error) {
712                                 ret = -io_u->error;
713                                 clear_io_u(td, io_u);
714                         } else if (io_u->resid) {
715                                 int bytes = io_u->xfer_buflen - io_u->resid;
716                                 struct fio_file *f = io_u->file;
717
718                                 /*
719                                  * zero read, fail
720                                  */
721                                 if (!bytes) {
722                                         td_verror(td, EIO, "full resid");
723                                         put_io_u(td, io_u);
724                                         break;
725                                 }
726
727                                 io_u->xfer_buflen = io_u->resid;
728                                 io_u->xfer_buf += bytes;
729                                 io_u->offset += bytes;
730
731                                 if (ddir_rw(io_u->ddir))
732                                         td->ts.short_io_u[io_u->ddir]++;
733
734                                 if (io_u->offset == f->real_file_size)
735                                         goto sync_done;
736
737                                 requeue_io_u(td, &io_u);
738                         } else {
739 sync_done:
740                                 if (__should_check_rate(td, 0) ||
741                                     __should_check_rate(td, 1))
742                                         fio_gettime(&comp_time, NULL);
743
744                                 ret = io_u_sync_complete(td, io_u, bytes_done);
745                                 if (ret < 0)
746                                         break;
747                         }
748                         break;
749                 case FIO_Q_QUEUED:
750                         /*
751                          * if the engine doesn't have a commit hook,
752                          * the io_u is really queued. if it does have such
753                          * a hook, it has to call io_u_queued() itself.
754                          */
755                         if (td->io_ops->commit == NULL)
756                                 io_u_queued(td, io_u);
757                         break;
758                 case FIO_Q_BUSY:
759                         requeue_io_u(td, &io_u);
760                         ret2 = td_io_commit(td);
761                         if (ret2 < 0)
762                                 ret = ret2;
763                         break;
764                 default:
765                         assert(ret < 0);
766                         put_io_u(td, io_u);
767                         break;
768                 }
769
770                 if (break_on_this_error(td, &ret))
771                         break;
772
773                 /*
774                  * See if we need to complete some commands. Note that we
775                  * can get BUSY even without IO queued, if the system is
776                  * resource starved.
777                  */
778                 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
779                 if (full || !td->o.iodepth_batch_complete) {
780                         min_evts = min(td->o.iodepth_batch_complete,
781                                         td->cur_depth);
782                         if (full && !min_evts && td->o.iodepth_batch_complete != 0)
783                                 min_evts = 1;
784
785                         if (__should_check_rate(td, 0) ||
786                             __should_check_rate(td, 1))
787                                 fio_gettime(&comp_time, NULL);
788
789                         do {
790                                 ret = io_u_queued_complete(td, min_evts, bytes_done);
791                                 if (ret < 0)
792                                         break;
793
794                         } while (full && (td->cur_depth > td->o.iodepth_low));
795                 }
796
797                 if (ret < 0)
798                         break;
799                 if (!(bytes_done[0] + bytes_done[1]))
800                         continue;
801
802                 if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) {
803                         if (check_min_rate(td, &comp_time, bytes_done)) {
804                                 if (exitall_on_terminate)
805                                         fio_terminate_threads(td->groupid);
806                                 td_verror(td, EIO, "check_min_rate");
807                                 break;
808                         }
809                 }
810
811                 if (td->o.thinktime) {
812                         unsigned long long b;
813
814                         b = td->io_blocks[0] + td->io_blocks[1];
815                         if (!(b % td->o.thinktime_blocks)) {
816                                 int left;
817
818                                 if (td->o.thinktime_spin)
819                                         usec_spin(td->o.thinktime_spin);
820
821                                 left = td->o.thinktime - td->o.thinktime_spin;
822                                 if (left)
823                                         usec_sleep(td, left);
824                         }
825                 }
826         }
827
828         if (td->trim_entries)
829                 printf("trim entries %ld\n", td->trim_entries);
830
831         if (td->o.fill_device && td->error == ENOSPC) {
832                 td->error = 0;
833                 td->terminate = 1;
834         }
835         if (!td->error) {
836                 struct fio_file *f;
837
838                 i = td->cur_depth;
839                 if (i) {
840                         ret = io_u_queued_complete(td, i, NULL);
841                         if (td->o.fill_device && td->error == ENOSPC)
842                                 td->error = 0;
843                 }
844
845                 if (should_fsync(td) && td->o.end_fsync) {
846                         td_set_runstate(td, TD_FSYNCING);
847
848                         for_each_file(td, f, i) {
849                                 if (!fio_file_open(f))
850                                         continue;
851                                 fio_io_sync(td, f);
852                         }
853                 }
854         } else
855                 cleanup_pending_aio(td);
856
857         /*
858          * stop job if we failed doing any IO
859          */
860         if ((td->this_io_bytes[0] + td->this_io_bytes[1]) == 0)
861                 td->done = 1;
862 }
863
864 static void cleanup_io_u(struct thread_data *td)
865 {
866         struct flist_head *entry, *n;
867         struct io_u *io_u;
868
869         flist_for_each_safe(entry, n, &td->io_u_freelist) {
870                 io_u = flist_entry(entry, struct io_u, list);
871
872                 flist_del(&io_u->list);
873                 fio_memfree(io_u, sizeof(*io_u));
874         }
875
876         free_io_mem(td);
877 }
878
879 static int init_io_u(struct thread_data *td)
880 {
881         struct io_u *io_u;
882         unsigned int max_bs;
883         int cl_align, i, max_units;
884         char *p;
885
886         max_units = td->o.iodepth;
887         max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
888         td->orig_buffer_size = (unsigned long long) max_bs
889                                         * (unsigned long long) max_units;
890
891         if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
892                 unsigned long bs;
893
894                 bs = td->orig_buffer_size + td->o.hugepage_size - 1;
895                 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
896         }
897
898         if (td->orig_buffer_size != (size_t) td->orig_buffer_size) {
899                 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
900                 return 1;
901         }
902
903         if (allocate_io_mem(td))
904                 return 1;
905
906         if (td->o.odirect || td->o.mem_align ||
907             (td->io_ops->flags & FIO_RAWIO))
908                 p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
909         else
910                 p = td->orig_buffer;
911
912         cl_align = os_cache_line_size();
913
914         for (i = 0; i < max_units; i++) {
915                 void *ptr;
916
917                 if (td->terminate)
918                         return 1;
919
920                 ptr = fio_memalign(cl_align, sizeof(*io_u));
921                 if (!ptr) {
922                         log_err("fio: unable to allocate aligned memory\n");
923                         break;
924                 }
925
926                 io_u = ptr;
927                 memset(io_u, 0, sizeof(*io_u));
928                 INIT_FLIST_HEAD(&io_u->list);
929                 dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
930
931                 if (!(td->io_ops->flags & FIO_NOIO)) {
932                         io_u->buf = p + max_bs * i;
933                         dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
934
935                         if (td_write(td))
936                                 io_u_fill_buffer(td, io_u, max_bs);
937                         if (td_write(td) && td->o.verify_pattern_bytes) {
938                                 /*
939                                  * Fill the buffer with the pattern if we are
940                                  * going to be doing writes.
941                                  */
942                                 fill_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
943                         }
944                 }
945
946                 io_u->index = i;
947                 io_u->flags = IO_U_F_FREE;
948                 flist_add(&io_u->list, &td->io_u_freelist);
949         }
950
951         return 0;
952 }
953
954 static int switch_ioscheduler(struct thread_data *td)
955 {
956         char tmp[256], tmp2[128];
957         FILE *f;
958         int ret;
959
960         if (td->io_ops->flags & FIO_DISKLESSIO)
961                 return 0;
962
963         sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
964
965         f = fopen(tmp, "r+");
966         if (!f) {
967                 if (errno == ENOENT) {
968                         log_err("fio: os or kernel doesn't support IO scheduler"
969                                 " switching\n");
970                         return 0;
971                 }
972                 td_verror(td, errno, "fopen iosched");
973                 return 1;
974         }
975
976         /*
977          * Set io scheduler.
978          */
979         ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
980         if (ferror(f) || ret != 1) {
981                 td_verror(td, errno, "fwrite");
982                 fclose(f);
983                 return 1;
984         }
985
986         rewind(f);
987
988         /*
989          * Read back and check that the selected scheduler is now the default.
990          */
991         ret = fread(tmp, 1, sizeof(tmp), f);
992         if (ferror(f) || ret < 0) {
993                 td_verror(td, errno, "fread");
994                 fclose(f);
995                 return 1;
996         }
997
998         sprintf(tmp2, "[%s]", td->o.ioscheduler);
999         if (!strstr(tmp, tmp2)) {
1000                 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
1001                 td_verror(td, EINVAL, "iosched_switch");
1002                 fclose(f);
1003                 return 1;
1004         }
1005
1006         fclose(f);
1007         return 0;
1008 }
1009
1010 static int keep_running(struct thread_data *td)
1011 {
1012         unsigned long long io_done;
1013
1014         if (td->done)
1015                 return 0;
1016         if (td->o.time_based)
1017                 return 1;
1018         if (td->o.loops) {
1019                 td->o.loops--;
1020                 return 1;
1021         }
1022
1023         io_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE]
1024                         + td->io_skip_bytes;
1025         if (io_done < td->o.size)
1026                 return 1;
1027
1028         return 0;
1029 }
1030
1031 static void reset_io_counters(struct thread_data *td)
1032 {
1033         td->stat_io_bytes[0] = td->stat_io_bytes[1] = 0;
1034         td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
1035         td->stat_io_blocks[0] = td->stat_io_blocks[1] = 0;
1036         td->this_io_blocks[0] = td->this_io_blocks[1] = 0;
1037         td->zone_bytes = 0;
1038         td->rate_bytes[0] = td->rate_bytes[1] = 0;
1039         td->rate_blocks[0] = td->rate_blocks[1] = 0;
1040
1041         td->last_was_sync = 0;
1042
1043         /*
1044          * reset file done count if we are to start over
1045          */
1046         if (td->o.time_based || td->o.loops)
1047                 td->nr_done_files = 0;
1048 }
1049
1050 void reset_all_stats(struct thread_data *td)
1051 {
1052         struct timeval tv;
1053         int i;
1054
1055         reset_io_counters(td);
1056
1057         for (i = 0; i < 2; i++) {
1058                 td->io_bytes[i] = 0;
1059                 td->io_blocks[i] = 0;
1060                 td->io_issues[i] = 0;
1061                 td->ts.total_io_u[i] = 0;
1062         }
1063
1064         fio_gettime(&tv, NULL);
1065         td->ts.runtime[0] = 0;
1066         td->ts.runtime[1] = 0;
1067         memcpy(&td->epoch, &tv, sizeof(tv));
1068         memcpy(&td->start, &tv, sizeof(tv));
1069 }
1070
1071 static void clear_io_state(struct thread_data *td)
1072 {
1073         struct fio_file *f;
1074         unsigned int i;
1075
1076         reset_io_counters(td);
1077
1078         close_files(td);
1079         for_each_file(td, f, i)
1080                 fio_file_clear_done(f);
1081
1082         /*
1083          * Set the same seed to get repeatable runs
1084          */
1085         td_fill_rand_seeds(td);
1086 }
1087
1088 static int exec_string(const char *string)
1089 {
1090         int ret, newlen = strlen(string) + 1 + 8;
1091         char *str;
1092
1093         str = malloc(newlen);
1094         sprintf(str, "sh -c %s", string);
1095
1096         ret = system(str);
1097         if (ret == -1)
1098                 log_err("fio: exec of cmd <%s> failed\n", str);
1099
1100         free(str);
1101         return ret;
1102 }
1103
1104 /*
1105  * Entry point for the thread based jobs. The process based jobs end up
1106  * here as well, after a little setup.
1107  */
1108 static void *thread_main(void *data)
1109 {
1110         unsigned long long elapsed;
1111         struct thread_data *td = data;
1112         pthread_condattr_t attr;
1113         int clear_state;
1114
1115         if (!td->o.use_thread) {
1116                 setsid();
1117                 td->pid = getpid();
1118         } else
1119                 td->pid = gettid();
1120
1121         dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
1122
1123         INIT_FLIST_HEAD(&td->io_u_freelist);
1124         INIT_FLIST_HEAD(&td->io_u_busylist);
1125         INIT_FLIST_HEAD(&td->io_u_requeues);
1126         INIT_FLIST_HEAD(&td->io_log_list);
1127         INIT_FLIST_HEAD(&td->io_hist_list);
1128         INIT_FLIST_HEAD(&td->verify_list);
1129         INIT_FLIST_HEAD(&td->trim_list);
1130         pthread_mutex_init(&td->io_u_lock, NULL);
1131         td->io_hist_tree = RB_ROOT;
1132
1133         pthread_condattr_init(&attr);
1134         pthread_cond_init(&td->verify_cond, &attr);
1135         pthread_cond_init(&td->free_cond, &attr);
1136
1137         td_set_runstate(td, TD_INITIALIZED);
1138         dprint(FD_MUTEX, "up startup_mutex\n");
1139         fio_mutex_up(startup_mutex);
1140         dprint(FD_MUTEX, "wait on td->mutex\n");
1141         fio_mutex_down(td->mutex);
1142         dprint(FD_MUTEX, "done waiting on td->mutex\n");
1143
1144         /*
1145          * the ->mutex mutex is now no longer used, close it to avoid
1146          * eating a file descriptor
1147          */
1148         fio_mutex_remove(td->mutex);
1149
1150         /*
1151          * A new gid requires privilege, so we need to do this before setting
1152          * the uid.
1153          */
1154         if (td->o.gid != -1U && setgid(td->o.gid)) {
1155                 td_verror(td, errno, "setgid");
1156                 goto err;
1157         }
1158         if (td->o.uid != -1U && setuid(td->o.uid)) {
1159                 td_verror(td, errno, "setuid");
1160                 goto err;
1161         }
1162
1163         /*
1164          * If we have a gettimeofday() thread, make sure we exclude that
1165          * thread from this job
1166          */
1167         if (td->o.gtod_cpu)
1168                 fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu);
1169
1170         /*
1171          * Set affinity first, in case it has an impact on the memory
1172          * allocations.
1173          */
1174         if (td->o.cpumask_set && fio_setaffinity(td->pid, td->o.cpumask) == -1) {
1175                 td_verror(td, errno, "cpu_set_affinity");
1176                 goto err;
1177         }
1178
1179         /*
1180          * May alter parameters that init_io_u() will use, so we need to
1181          * do this first.
1182          */
1183         if (init_iolog(td))
1184                 goto err;
1185
1186         if (init_io_u(td))
1187                 goto err;
1188
1189         if (td->o.verify_async && verify_async_init(td))
1190                 goto err;
1191
1192         if (td->ioprio_set) {
1193                 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1194                         td_verror(td, errno, "ioprio_set");
1195                         goto err;
1196                 }
1197         }
1198
1199         if (td->o.cgroup_weight && cgroup_setup(td, cgroup_list, &cgroup_mnt))
1200                 goto err;
1201
1202         if (nice(td->o.nice) == -1) {
1203                 td_verror(td, errno, "nice");
1204                 goto err;
1205         }
1206
1207         if (td->o.ioscheduler && switch_ioscheduler(td))
1208                 goto err;
1209
1210         if (!td->o.create_serialize && setup_files(td))
1211                 goto err;
1212
1213         if (td_io_init(td))
1214                 goto err;
1215
1216         if (init_random_map(td))
1217                 goto err;
1218
1219         if (td->o.exec_prerun) {
1220                 if (exec_string(td->o.exec_prerun))
1221                         goto err;
1222         }
1223
1224         if (td->o.pre_read) {
1225                 if (pre_read_files(td) < 0)
1226                         goto err;
1227         }
1228
1229         fio_gettime(&td->epoch, NULL);
1230         getrusage(RUSAGE_SELF, &td->ru_start);
1231
1232         clear_state = 0;
1233         while (keep_running(td)) {
1234                 fio_gettime(&td->start, NULL);
1235                 memcpy(&td->bw_sample_time, &td->start, sizeof(td->start));
1236                 memcpy(&td->iops_sample_time, &td->start, sizeof(td->start));
1237                 memcpy(&td->tv_cache, &td->start, sizeof(td->start));
1238
1239                 if (td->o.ratemin[0] || td->o.ratemin[1])
1240                         memcpy(&td->lastrate, &td->bw_sample_time,
1241                                                         sizeof(td->lastrate));
1242
1243                 if (clear_state)
1244                         clear_io_state(td);
1245
1246                 prune_io_piece_log(td);
1247
1248                 do_io(td);
1249
1250                 clear_state = 1;
1251
1252                 if (td_read(td) && td->io_bytes[DDIR_READ]) {
1253                         elapsed = utime_since_now(&td->start);
1254                         td->ts.runtime[DDIR_READ] += elapsed;
1255                 }
1256                 if (td_write(td) && td->io_bytes[DDIR_WRITE]) {
1257                         elapsed = utime_since_now(&td->start);
1258                         td->ts.runtime[DDIR_WRITE] += elapsed;
1259                 }
1260
1261                 if (td->error || td->terminate)
1262                         break;
1263
1264                 if (!td->o.do_verify ||
1265                     td->o.verify == VERIFY_NONE ||
1266                     (td->io_ops->flags & FIO_UNIDIR))
1267                         continue;
1268
1269                 clear_io_state(td);
1270
1271                 fio_gettime(&td->start, NULL);
1272
1273                 do_verify(td);
1274
1275                 td->ts.runtime[DDIR_READ] += utime_since_now(&td->start);
1276
1277                 if (td->error || td->terminate)
1278                         break;
1279         }
1280
1281         update_rusage_stat(td);
1282         td->ts.runtime[0] = (td->ts.runtime[0] + 999) / 1000;
1283         td->ts.runtime[1] = (td->ts.runtime[1] + 999) / 1000;
1284         td->ts.total_run_time = mtime_since_now(&td->epoch);
1285         td->ts.io_bytes[0] = td->io_bytes[0];
1286         td->ts.io_bytes[1] = td->io_bytes[1];
1287
1288         fio_mutex_down(writeout_mutex);
1289         if (td->bw_log) {
1290                 if (td->o.bw_log_file) {
1291                         finish_log_named(td, td->bw_log,
1292                                                 td->o.bw_log_file, "bw");
1293                 } else
1294                         finish_log(td, td->bw_log, "bw");
1295         }
1296         if (td->lat_log) {
1297                 if (td->o.lat_log_file) {
1298                         finish_log_named(td, td->lat_log,
1299                                                 td->o.lat_log_file, "lat");
1300                 } else
1301                         finish_log(td, td->lat_log, "lat");
1302         }
1303         if (td->slat_log) {
1304                 if (td->o.lat_log_file) {
1305                         finish_log_named(td, td->slat_log,
1306                                                 td->o.lat_log_file, "slat");
1307                 } else
1308                         finish_log(td, td->slat_log, "slat");
1309         }
1310         if (td->clat_log) {
1311                 if (td->o.lat_log_file) {
1312                         finish_log_named(td, td->clat_log,
1313                                                 td->o.lat_log_file, "clat");
1314                 } else
1315                         finish_log(td, td->clat_log, "clat");
1316         }
1317         if (td->iops_log) {
1318                 if (td->o.iops_log_file) {
1319                         finish_log_named(td, td->iops_log,
1320                                                 td->o.iops_log_file, "iops");
1321                 } else
1322                         finish_log(td, td->iops_log, "iops");
1323         }
1324
1325         fio_mutex_up(writeout_mutex);
1326         if (td->o.exec_postrun)
1327                 exec_string(td->o.exec_postrun);
1328
1329         if (exitall_on_terminate)
1330                 fio_terminate_threads(td->groupid);
1331
1332 err:
1333         if (td->error)
1334                 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error,
1335                                                         td->verror);
1336
1337         if (td->o.verify_async)
1338                 verify_async_exit(td);
1339
1340         close_and_free_files(td);
1341         close_ioengine(td);
1342         cleanup_io_u(td);
1343         cgroup_shutdown(td, &cgroup_mnt);
1344
1345         if (td->o.cpumask_set) {
1346                 int ret = fio_cpuset_exit(&td->o.cpumask);
1347
1348                 td_verror(td, ret, "fio_cpuset_exit");
1349         }
1350
1351         /*
1352          * do this very late, it will log file closing as well
1353          */
1354         if (td->o.write_iolog_file)
1355                 write_iolog_close(td);
1356
1357         options_mem_free(td);
1358         td_set_runstate(td, TD_EXITED);
1359         return (void *) (unsigned long) td->error;
1360 }
1361
1362 /*
1363  * We cannot pass the td data into a forked process, so attach the td and
1364  * pass it to the thread worker.
1365  */
1366 static int fork_main(int shmid, int offset)
1367 {
1368         struct thread_data *td;
1369         void *data, *ret;
1370
1371 #ifndef __hpux
1372         data = shmat(shmid, NULL, 0);
1373         if (data == (void *) -1) {
1374                 int __err = errno;
1375
1376                 perror("shmat");
1377                 return __err;
1378         }
1379 #else
1380         /*
1381          * HP-UX inherits shm mappings?
1382          */
1383         data = threads;
1384 #endif
1385
1386         td = data + offset * sizeof(struct thread_data);
1387         ret = thread_main(td);
1388         shmdt(data);
1389         return (int) (unsigned long) ret;
1390 }
1391
1392 /*
1393  * Run over the job map and reap the threads that have exited, if any.
1394  */
1395 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1396 {
1397         struct thread_data *td;
1398         int i, cputhreads, realthreads, pending, status, ret;
1399
1400         /*
1401          * reap exited threads (TD_EXITED -> TD_REAPED)
1402          */
1403         realthreads = pending = cputhreads = 0;
1404         for_each_td(td, i) {
1405                 int flags = 0;
1406
1407                 /*
1408                  * ->io_ops is NULL for a thread that has closed its
1409                  * io engine
1410                  */
1411                 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
1412                         cputhreads++;
1413                 else
1414                         realthreads++;
1415
1416                 if (!td->pid) {
1417                         pending++;
1418                         continue;
1419                 }
1420                 if (td->runstate == TD_REAPED)
1421                         continue;
1422                 if (td->o.use_thread) {
1423                         if (td->runstate == TD_EXITED) {
1424                                 td_set_runstate(td, TD_REAPED);
1425                                 goto reaped;
1426                         }
1427                         continue;
1428                 }
1429
1430                 flags = WNOHANG;
1431                 if (td->runstate == TD_EXITED)
1432                         flags = 0;
1433
1434                 /*
1435                  * check if someone quit or got killed in an unusual way
1436                  */
1437                 ret = waitpid(td->pid, &status, flags);
1438                 if (ret < 0) {
1439                         if (errno == ECHILD) {
1440                                 log_err("fio: pid=%d disappeared %d\n",
1441                                                 (int) td->pid, td->runstate);
1442                                 td_set_runstate(td, TD_REAPED);
1443                                 goto reaped;
1444                         }
1445                         perror("waitpid");
1446                 } else if (ret == td->pid) {
1447                         if (WIFSIGNALED(status)) {
1448                                 int sig = WTERMSIG(status);
1449
1450                                 if (sig != SIGTERM)
1451                                         log_err("fio: pid=%d, got signal=%d\n",
1452                                                         (int) td->pid, sig);
1453                                 td_set_runstate(td, TD_REAPED);
1454                                 goto reaped;
1455                         }
1456                         if (WIFEXITED(status)) {
1457                                 if (WEXITSTATUS(status) && !td->error)
1458                                         td->error = WEXITSTATUS(status);
1459
1460                                 td_set_runstate(td, TD_REAPED);
1461                                 goto reaped;
1462                         }
1463                 }
1464
1465                 /*
1466                  * thread is not dead, continue
1467                  */
1468                 pending++;
1469                 continue;
1470 reaped:
1471                 (*nr_running)--;
1472                 (*m_rate) -= (td->o.ratemin[0] + td->o.ratemin[1]);
1473                 (*t_rate) -= (td->o.rate[0] + td->o.rate[1]);
1474                 if (!td->pid)
1475                         pending--;
1476
1477                 if (td->error)
1478                         exit_value++;
1479
1480                 done_secs += mtime_since_now(&td->epoch) / 1000;
1481         }
1482
1483         if (*nr_running == cputhreads && !pending && realthreads)
1484                 fio_terminate_threads(TERMINATE_ALL);
1485 }
1486
1487 static void *gtod_thread_main(void *data)
1488 {
1489         fio_mutex_up(startup_mutex);
1490
1491         /*
1492          * As long as we have jobs around, update the clock. It would be nice
1493          * to have some way of NOT hammering that CPU with gettimeofday(),
1494          * but I'm not sure what to use outside of a simple CPU nop to relax
1495          * it - we don't want to lose precision.
1496          */
1497         while (threads) {
1498                 fio_gtod_update();
1499                 nop;
1500         }
1501
1502         return NULL;
1503 }
1504
1505 static int fio_start_gtod_thread(void)
1506 {
1507         pthread_attr_t attr;
1508         int ret;
1509
1510         pthread_attr_init(&attr);
1511         pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);
1512         ret = pthread_create(&gtod_thread, &attr, gtod_thread_main, NULL);
1513         pthread_attr_destroy(&attr);
1514         if (ret) {
1515                 log_err("Can't create gtod thread: %s\n", strerror(ret));
1516                 return 1;
1517         }
1518
1519         ret = pthread_detach(gtod_thread);
1520         if (ret) {
1521                 log_err("Can't detatch gtod thread: %s\n", strerror(ret));
1522                 return 1;
1523         }
1524
1525         dprint(FD_MUTEX, "wait on startup_mutex\n");
1526         fio_mutex_down(startup_mutex);
1527         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
1528         return 0;
1529 }
1530
1531 /*
1532  * Main function for kicking off and reaping jobs, as needed.
1533  */
1534 static void run_threads(void)
1535 {
1536         struct thread_data *td;
1537         unsigned long spent;
1538         int i, todo, nr_running, m_rate, t_rate, nr_started;
1539
1540         if (fio_pin_memory())
1541                 return;
1542
1543         if (fio_gtod_offload && fio_start_gtod_thread())
1544                 return;
1545
1546         set_sig_handlers();
1547
1548         if (!terse_output) {
1549                 log_info("Starting ");
1550                 if (nr_thread)
1551                         log_info("%d thread%s", nr_thread,
1552                                                 nr_thread > 1 ? "s" : "");
1553                 if (nr_process) {
1554                         if (nr_thread)
1555                                 printf(" and ");
1556                         log_info("%d process%s", nr_process,
1557                                                 nr_process > 1 ? "es" : "");
1558                 }
1559                 log_info("\n");
1560                 fflush(stdout);
1561         }
1562
1563         todo = thread_number;
1564         nr_running = 0;
1565         nr_started = 0;
1566         m_rate = t_rate = 0;
1567
1568         for_each_td(td, i) {
1569                 print_status_init(td->thread_number - 1);
1570
1571                 if (!td->o.create_serialize)
1572                         continue;
1573
1574                 /*
1575                  * do file setup here so it happens sequentially,
1576                  * we don't want X number of threads getting their
1577                  * client data interspersed on disk
1578                  */
1579                 if (setup_files(td)) {
1580                         exit_value++;
1581                         if (td->error)
1582                                 log_err("fio: pid=%d, err=%d/%s\n",
1583                                         (int) td->pid, td->error, td->verror);
1584                         td_set_runstate(td, TD_REAPED);
1585                         todo--;
1586                 } else {
1587                         struct fio_file *f;
1588                         unsigned int j;
1589
1590                         /*
1591                          * for sharing to work, each job must always open
1592                          * its own files. so close them, if we opened them
1593                          * for creation
1594                          */
1595                         for_each_file(td, f, j) {
1596                                 if (fio_file_open(f))
1597                                         td_io_close_file(td, f);
1598                         }
1599                 }
1600         }
1601
1602         set_genesis_time();
1603
1604         while (todo) {
1605                 struct thread_data *map[REAL_MAX_JOBS];
1606                 struct timeval this_start;
1607                 int this_jobs = 0, left;
1608
1609                 /*
1610                  * create threads (TD_NOT_CREATED -> TD_CREATED)
1611                  */
1612                 for_each_td(td, i) {
1613                         if (td->runstate != TD_NOT_CREATED)
1614                                 continue;
1615
1616                         /*
1617                          * never got a chance to start, killed by other
1618                          * thread for some reason
1619                          */
1620                         if (td->terminate) {
1621                                 todo--;
1622                                 continue;
1623                         }
1624
1625                         if (td->o.start_delay) {
1626                                 spent = mtime_since_genesis();
1627
1628                                 if (td->o.start_delay * 1000 > spent)
1629                                         continue;
1630                         }
1631
1632                         if (td->o.stonewall && (nr_started || nr_running)) {
1633                                 dprint(FD_PROCESS, "%s: stonewall wait\n",
1634                                                         td->o.name);
1635                                 break;
1636                         }
1637
1638                         init_disk_util(td);
1639
1640                         /*
1641                          * Set state to created. Thread will transition
1642                          * to TD_INITIALIZED when it's done setting up.
1643                          */
1644                         td_set_runstate(td, TD_CREATED);
1645                         map[this_jobs++] = td;
1646                         nr_started++;
1647
1648                         if (td->o.use_thread) {
1649                                 int ret;
1650
1651                                 dprint(FD_PROCESS, "will pthread_create\n");
1652                                 ret = pthread_create(&td->thread, NULL,
1653                                                         thread_main, td);
1654                                 if (ret) {
1655                                         log_err("pthread_create: %s\n",
1656                                                         strerror(ret));
1657                                         nr_started--;
1658                                         break;
1659                                 }
1660                                 ret = pthread_detach(td->thread);
1661                                 if (ret)
1662                                         log_err("pthread_detach: %s",
1663                                                         strerror(ret));
1664                         } else {
1665                                 pid_t pid;
1666                                 dprint(FD_PROCESS, "will fork\n");
1667                                 pid = fork();
1668                                 if (!pid) {
1669                                         int ret = fork_main(shm_id, i);
1670
1671                                         _exit(ret);
1672                                 } else if (i == fio_debug_jobno)
1673                                         *fio_debug_jobp = pid;
1674                         }
1675                         dprint(FD_MUTEX, "wait on startup_mutex\n");
1676                         if (fio_mutex_down_timeout(startup_mutex, 10)) {
1677                                 log_err("fio: job startup hung? exiting.\n");
1678                                 fio_terminate_threads(TERMINATE_ALL);
1679                                 fio_abort = 1;
1680                                 nr_started--;
1681                                 break;
1682                         }
1683                         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
1684                 }
1685
1686                 /*
1687                  * Wait for the started threads to transition to
1688                  * TD_INITIALIZED.
1689                  */
1690                 fio_gettime(&this_start, NULL);
1691                 left = this_jobs;
1692                 while (left && !fio_abort) {
1693                         if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
1694                                 break;
1695
1696                         usleep(100000);
1697
1698                         for (i = 0; i < this_jobs; i++) {
1699                                 td = map[i];
1700                                 if (!td)
1701                                         continue;
1702                                 if (td->runstate == TD_INITIALIZED) {
1703                                         map[i] = NULL;
1704                                         left--;
1705                                 } else if (td->runstate >= TD_EXITED) {
1706                                         map[i] = NULL;
1707                                         left--;
1708                                         todo--;
1709                                         nr_running++; /* work-around... */
1710                                 }
1711                         }
1712                 }
1713
1714                 if (left) {
1715                         log_err("fio: %d jobs failed to start\n", left);
1716                         for (i = 0; i < this_jobs; i++) {
1717                                 td = map[i];
1718                                 if (!td)
1719                                         continue;
1720                                 kill(td->pid, SIGTERM);
1721                         }
1722                         break;
1723                 }
1724
1725                 /*
1726                  * start created threads (TD_INITIALIZED -> TD_RUNNING).
1727                  */
1728                 for_each_td(td, i) {
1729                         if (td->runstate != TD_INITIALIZED)
1730                                 continue;
1731
1732                         if (in_ramp_time(td))
1733                                 td_set_runstate(td, TD_RAMP);
1734                         else
1735                                 td_set_runstate(td, TD_RUNNING);
1736                         nr_running++;
1737                         nr_started--;
1738                         m_rate += td->o.ratemin[0] + td->o.ratemin[1];
1739                         t_rate += td->o.rate[0] + td->o.rate[1];
1740                         todo--;
1741                         fio_mutex_up(td->mutex);
1742                 }
1743
1744                 reap_threads(&nr_running, &t_rate, &m_rate);
1745
1746                 if (todo) {
1747                         if (is_backend)
1748                                 fio_server_idle_loop();
1749                         else
1750                                 usleep(100000);
1751                 }
1752         }
1753
1754         while (nr_running) {
1755                 reap_threads(&nr_running, &t_rate, &m_rate);
1756
1757                 if (is_backend)
1758                         fio_server_idle_loop();
1759                 else
1760                         usleep(10000);
1761         }
1762
1763         update_io_ticks();
1764         fio_unpin_memory();
1765 }
1766
1767 int exec_run(void)
1768 {
1769         if (nr_clients)
1770                 return fio_handle_clients();
1771         if (exec_profile && load_profile(exec_profile))
1772                 return 1;
1773
1774         if (!thread_number)
1775                 return 0;
1776
1777         if (write_bw_log) {
1778                 setup_log(&agg_io_log[DDIR_READ]);
1779                 setup_log(&agg_io_log[DDIR_WRITE]);
1780         }
1781
1782         startup_mutex = fio_mutex_init(0);
1783         if (startup_mutex == NULL)
1784                 return 1;
1785         writeout_mutex = fio_mutex_init(1);
1786         if (writeout_mutex == NULL)
1787                 return 1;
1788
1789         set_genesis_time();
1790         create_disk_util_thread();
1791
1792         cgroup_list = smalloc(sizeof(*cgroup_list));
1793         INIT_FLIST_HEAD(cgroup_list);
1794
1795         run_threads();
1796
1797         if (!fio_abort) {
1798                 show_run_stats();
1799                 if (write_bw_log) {
1800                         __finish_log(agg_io_log[DDIR_READ], "agg-read_bw.log");
1801                         __finish_log(agg_io_log[DDIR_WRITE],
1802                                         "agg-write_bw.log");
1803                 }
1804         }
1805
1806         cgroup_kill(cgroup_list);
1807         sfree(cgroup_list);
1808         sfree(cgroup_mnt);
1809
1810         fio_mutex_remove(startup_mutex);
1811         fio_mutex_remove(writeout_mutex);
1812         return exit_value;
1813 }
1814
1815 void reset_fio_state(void)
1816 {
1817         groupid = 0;
1818         thread_number = 0;
1819         nr_process = 0;
1820         nr_thread = 0;
1821         done_secs = 0;
1822 }
1823
1824 static int endian_check(void)
1825 {
1826         union {
1827                 uint8_t c[8];
1828                 uint64_t v;
1829         } u;
1830         int le = 0, be = 0;
1831
1832         u.v = 0x12;
1833         if (u.c[7] == 0x12)
1834                 be = 1;
1835         else if (u.c[0] == 0x12)
1836                 le = 1;
1837
1838 #if defined(FIO_LITTLE_ENDIAN)
1839         if (be)
1840                 return 1;
1841 #elif defined(FIO_BIG_ENDIAN)
1842         if (le)
1843                 return 1;
1844 #else
1845         return 1;
1846 #endif
1847
1848         if (!le && !be)
1849                 return 1;
1850
1851         return 0;
1852 }
1853
1854 int main(int argc, char *argv[], char *envp[])
1855 {
1856         long ps;
1857
1858         if (endian_check()) {
1859                 log_err("fio: endianness settings appear wrong.\n");
1860                 log_err("fio: please report this to fio@vger.kernel.org\n");
1861                 return 1;
1862         }
1863
1864         arch_init(envp);
1865
1866         sinit();
1867
1868         /*
1869          * We need locale for number printing, if it isn't set then just
1870          * go with the US format.
1871          */
1872         if (!getenv("LC_NUMERIC"))
1873                 setlocale(LC_NUMERIC, "en_US");
1874
1875         ps = sysconf(_SC_PAGESIZE);
1876         if (ps < 0) {
1877                 log_err("Failed to get page size\n");
1878                 return 1;
1879         }
1880
1881         page_size = ps;
1882         page_mask = ps - 1;
1883
1884         fio_keywords_init();
1885
1886         if (parse_options(argc, argv))
1887                 return 1;
1888
1889         return exec_run();
1890 }