bdf9fd188dde338bcffe13a4eb43df938993fbf6
[fio.git] / fio.c
1 /*
2  * fio - the flexible io tester
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
6  *
7  * The license below covers all files distributed with fio unless otherwise
8  * noted in the file itself.
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to the Free Software
21  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  *
23  */
24 #include <unistd.h>
25 #include <fcntl.h>
26 #include <string.h>
27 #include <signal.h>
28 #include <time.h>
29 #include <locale.h>
30 #include <assert.h>
31 #include <sys/stat.h>
32 #include <sys/wait.h>
33 #include <sys/ipc.h>
34 #include <sys/shm.h>
35 #include <sys/mman.h>
36
37 #include "fio.h"
38 #include "hash.h"
39 #include "smalloc.h"
40 #include "verify.h"
41 #include "diskutil.h"
42
43 unsigned long page_mask;
44 unsigned long page_size;
45 #define ALIGN(buf)      \
46         (char *) (((unsigned long) (buf) + page_mask) & ~page_mask)
47
48 int groupid = 0;
49 int thread_number = 0;
50 int nr_process = 0;
51 int nr_thread = 0;
52 int shm_id = 0;
53 int temp_stall_ts;
54 unsigned long done_secs = 0;
55
56 static struct fio_mutex *startup_mutex;
57 static struct fio_mutex *writeout_mutex;
58 static volatile int fio_abort;
59 static int exit_value;
60 static struct itimerval itimer;
61 static pthread_t gtod_thread;
62
63 struct io_log *agg_io_log[2];
64
65 #define TERMINATE_ALL           (-1)
66 #define JOB_START_TIMEOUT       (5 * 1000)
67
68 void td_set_runstate(struct thread_data *td, int runstate)
69 {
70         if (td->runstate == runstate)
71                 return;
72
73         dprint(FD_PROCESS, "pid=%d: runstate %d -> %d\n", (int) td->pid,
74                                                 td->runstate, runstate);
75         td->runstate = runstate;
76 }
77
78 static void terminate_threads(int group_id)
79 {
80         struct thread_data *td;
81         int i;
82
83         dprint(FD_PROCESS, "terminate group_id=%d\n", group_id);
84
85         for_each_td(td, i) {
86                 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
87                         dprint(FD_PROCESS, "setting terminate on %s/%d\n",
88                                                 td->o.name, (int) td->pid);
89                         td->terminate = 1;
90                         td->o.start_delay = 0;
91
92                         /*
93                          * if the thread is running, just let it exit
94                          */
95                         if (td->runstate < TD_RUNNING)
96                                 kill(td->pid, SIGQUIT);
97                         else {
98                                 struct ioengine_ops *ops = td->io_ops;
99
100                                 if (ops && (ops->flags & FIO_SIGQUIT))
101                                         kill(td->pid, SIGQUIT);
102                         }
103                 }
104         }
105 }
106
107 static void status_timer_arm(void)
108 {
109         itimer.it_value.tv_sec = 0;
110         itimer.it_value.tv_usec = DISK_UTIL_MSEC * 1000;
111         setitimer(ITIMER_REAL, &itimer, NULL);
112 }
113
114 static void sig_alrm(int fio_unused sig)
115 {
116         if (threads) {
117                 update_io_ticks();
118                 print_thread_status();
119                 status_timer_arm();
120         }
121 }
122
123 /*
124  * Happens on thread runs with ctrl-c, ignore our own SIGQUIT
125  */
126 static void sig_quit(int sig)
127 {
128 }
129
130 static void sig_int(int sig)
131 {
132         if (threads) {
133                 printf("\nfio: terminating on signal %d\n", sig);
134                 fflush(stdout);
135                 terminate_threads(TERMINATE_ALL);
136         }
137 }
138
139 static void sig_ill(int fio_unused sig)
140 {
141         if (!threads)
142                 return;
143
144         log_err("fio: illegal instruction. your cpu does not support "
145                 "the sse4.2 instruction for crc32c\n");
146         terminate_threads(TERMINATE_ALL);
147         exit(4);
148 }
149
150 static void set_sig_handlers(void)
151 {
152         struct sigaction act;
153
154         memset(&act, 0, sizeof(act));
155         act.sa_handler = sig_alrm;
156         act.sa_flags = SA_RESTART;
157         sigaction(SIGALRM, &act, NULL);
158
159         memset(&act, 0, sizeof(act));
160         act.sa_handler = sig_int;
161         act.sa_flags = SA_RESTART;
162         sigaction(SIGINT, &act, NULL);
163
164         memset(&act, 0, sizeof(act));
165         act.sa_handler = sig_ill;
166         act.sa_flags = SA_RESTART;
167         sigaction(SIGILL, &act, NULL);
168
169         memset(&act, 0, sizeof(act));
170         act.sa_handler = sig_quit;
171         act.sa_flags = SA_RESTART;
172         sigaction(SIGQUIT, &act, NULL);
173 }
174
175 static inline int should_check_rate(struct thread_data *td)
176 {
177         struct thread_options *o = &td->o;
178
179         /*
180          * If some rate setting was given, we need to check it
181          */
182         if (o->rate || o->ratemin || o->rate_iops || o->rate_iops_min)
183                 return 1;
184
185         return 0;
186 }
187
188 /*
189  * Check if we are above the minimum rate given.
190  */
191 static int check_min_rate(struct thread_data *td, struct timeval *now)
192 {
193         unsigned long long bytes = 0;
194         unsigned long iops = 0;
195         unsigned long spent;
196         unsigned long rate;
197
198         /*
199          * allow a 2 second settle period in the beginning
200          */
201         if (mtime_since(&td->start, now) < 2000)
202                 return 0;
203
204         if (td_read(td)) {
205                 iops += td->io_blocks[DDIR_READ];
206                 bytes += td->this_io_bytes[DDIR_READ];
207         }
208         if (td_write(td)) {
209                 iops += td->io_blocks[DDIR_WRITE];
210                 bytes += td->this_io_bytes[DDIR_WRITE];
211         }
212
213         /*
214          * if rate blocks is set, sample is running
215          */
216         if (td->rate_bytes || td->rate_blocks) {
217                 spent = mtime_since(&td->lastrate, now);
218                 if (spent < td->o.ratecycle)
219                         return 0;
220
221                 if (td->o.rate) {
222                         /*
223                          * check bandwidth specified rate
224                          */
225                         if (bytes < td->rate_bytes) {
226                                 log_err("%s: min rate %u not met\n", td->o.name,
227                                                                 td->o.ratemin);
228                                 return 1;
229                         } else {
230                                 rate = (bytes - td->rate_bytes) / spent;
231                                 if (rate < td->o.ratemin ||
232                                     bytes < td->rate_bytes) {
233                                         log_err("%s: min rate %u not met, got"
234                                                 " %luKiB/sec\n", td->o.name,
235                                                         td->o.ratemin, rate);
236                                         return 1;
237                                 }
238                         }
239                 } else {
240                         /*
241                          * checks iops specified rate
242                          */
243                         if (iops < td->o.rate_iops) {
244                                 log_err("%s: min iops rate %u not met\n",
245                                                 td->o.name, td->o.rate_iops);
246                                 return 1;
247                         } else {
248                                 rate = (iops - td->rate_blocks) / spent;
249                                 if (rate < td->o.rate_iops_min ||
250                                     iops < td->rate_blocks) {
251                                         log_err("%s: min iops rate %u not met,"
252                                                 " got %lu\n", td->o.name,
253                                                         td->o.rate_iops_min,
254                                                         rate);
255                                 }
256                         }
257                 }
258         }
259
260         td->rate_bytes = bytes;
261         td->rate_blocks = iops;
262         memcpy(&td->lastrate, now, sizeof(*now));
263         return 0;
264 }
265
266 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
267 {
268         if (!td->o.timeout)
269                 return 0;
270         if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000)
271                 return 1;
272
273         return 0;
274 }
275
276 /*
277  * When job exits, we can cancel the in-flight IO if we are using async
278  * io. Attempt to do so.
279  */
280 static void cleanup_pending_aio(struct thread_data *td)
281 {
282         struct flist_head *entry, *n;
283         struct io_u *io_u;
284         int r;
285
286         /*
287          * get immediately available events, if any
288          */
289         r = io_u_queued_complete(td, 0);
290         if (r < 0)
291                 return;
292
293         /*
294          * now cancel remaining active events
295          */
296         if (td->io_ops->cancel) {
297                 flist_for_each_safe(entry, n, &td->io_u_busylist) {
298                         io_u = flist_entry(entry, struct io_u, list);
299
300                         /*
301                          * if the io_u isn't in flight, then that generally
302                          * means someone leaked an io_u. complain but fix
303                          * it up, so we don't stall here.
304                          */
305                         if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
306                                 log_err("fio: non-busy IO on busy list\n");
307                                 put_io_u(td, io_u);
308                         } else {
309                                 r = td->io_ops->cancel(td, io_u);
310                                 if (!r)
311                                         put_io_u(td, io_u);
312                         }
313                 }
314         }
315
316         if (td->cur_depth)
317                 r = io_u_queued_complete(td, td->cur_depth);
318 }
319
320 /*
321  * Helper to handle the final sync of a file. Works just like the normal
322  * io path, just does everything sync.
323  */
324 static int fio_io_sync(struct thread_data *td, struct fio_file *f)
325 {
326         struct io_u *io_u = __get_io_u(td);
327         int ret;
328
329         if (!io_u)
330                 return 1;
331
332         io_u->ddir = DDIR_SYNC;
333         io_u->file = f;
334
335         if (td_io_prep(td, io_u)) {
336                 put_io_u(td, io_u);
337                 return 1;
338         }
339
340 requeue:
341         ret = td_io_queue(td, io_u);
342         if (ret < 0) {
343                 td_verror(td, io_u->error, "td_io_queue");
344                 put_io_u(td, io_u);
345                 return 1;
346         } else if (ret == FIO_Q_QUEUED) {
347                 if (io_u_queued_complete(td, 1) < 0)
348                         return 1;
349         } else if (ret == FIO_Q_COMPLETED) {
350                 if (io_u->error) {
351                         td_verror(td, io_u->error, "td_io_queue");
352                         return 1;
353                 }
354
355                 if (io_u_sync_complete(td, io_u) < 0)
356                         return 1;
357         } else if (ret == FIO_Q_BUSY) {
358                 if (td_io_commit(td))
359                         return 1;
360                 goto requeue;
361         }
362
363         return 0;
364 }
365
366 static inline void update_tv_cache(struct thread_data *td)
367 {
368         if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask)
369                 fio_gettime(&td->tv_cache, NULL);
370 }
371
372 /*
373  * The main verify engine. Runs over the writes we previously submitted,
374  * reads the blocks back in, and checks the crc/md5 of the data.
375  */
376 static void do_verify(struct thread_data *td)
377 {
378         struct fio_file *f;
379         struct io_u *io_u;
380         int ret, min_events;
381         unsigned int i;
382
383         /*
384          * sync io first and invalidate cache, to make sure we really
385          * read from disk.
386          */
387         for_each_file(td, f, i) {
388                 if (!fio_file_open(f))
389                         continue;
390                 if (fio_io_sync(td, f))
391                         break;
392                 if (file_invalidate_cache(td, f))
393                         break;
394         }
395
396         if (td->error)
397                 return;
398
399         td_set_runstate(td, TD_VERIFYING);
400
401         io_u = NULL;
402         while (!td->terminate) {
403                 int ret2, full;
404
405                 io_u = __get_io_u(td);
406                 if (!io_u)
407                         break;
408
409                 update_tv_cache(td);
410
411                 if (runtime_exceeded(td, &td->tv_cache)) {
412                         put_io_u(td, io_u);
413                         td->terminate = 1;
414                         break;
415                 }
416
417                 if (get_next_verify(td, io_u)) {
418                         put_io_u(td, io_u);
419                         break;
420                 }
421
422                 if (td_io_prep(td, io_u)) {
423                         put_io_u(td, io_u);
424                         break;
425                 }
426
427                 io_u->end_io = verify_io_u;
428
429                 ret = td_io_queue(td, io_u);
430                 switch (ret) {
431                 case FIO_Q_COMPLETED:
432                         if (io_u->error)
433                                 ret = -io_u->error;
434                         else if (io_u->resid) {
435                                 int bytes = io_u->xfer_buflen - io_u->resid;
436                                 struct fio_file *f = io_u->file;
437
438                                 /*
439                                  * zero read, fail
440                                  */
441                                 if (!bytes) {
442                                         td_verror(td, EIO, "full resid");
443                                         put_io_u(td, io_u);
444                                         break;
445                                 }
446
447                                 io_u->xfer_buflen = io_u->resid;
448                                 io_u->xfer_buf += bytes;
449                                 io_u->offset += bytes;
450
451                                 td->ts.short_io_u[io_u->ddir]++;
452
453                                 if (io_u->offset == f->real_file_size)
454                                         goto sync_done;
455
456                                 requeue_io_u(td, &io_u);
457                         } else {
458 sync_done:
459                                 ret = io_u_sync_complete(td, io_u);
460                                 if (ret < 0)
461                                         break;
462                         }
463                         continue;
464                 case FIO_Q_QUEUED:
465                         break;
466                 case FIO_Q_BUSY:
467                         requeue_io_u(td, &io_u);
468                         ret2 = td_io_commit(td);
469                         if (ret2 < 0)
470                                 ret = ret2;
471                         break;
472                 default:
473                         assert(ret < 0);
474                         td_verror(td, -ret, "td_io_queue");
475                         break;
476                 }
477
478                 if (ret < 0 || td->error)
479                         break;
480
481                 /*
482                  * if we can queue more, do so. but check if there are
483                  * completed io_u's first.
484                  */
485                 full = queue_full(td) || ret == FIO_Q_BUSY;
486                 if (full || !td->o.iodepth_batch_complete) {
487                         min_events = td->o.iodepth_batch_complete;
488                         if (full && !min_events)
489                                 min_events = 1;
490
491                         do {
492                                 /*
493                                  * Reap required number of io units, if any,
494                                  * and do the verification on them through
495                                  * the callback handler
496                                  */
497                                 if (io_u_queued_complete(td, min_events) < 0) {
498                                         ret = -1;
499                                         break;
500                                 }
501                         } while (full && (td->cur_depth > td->o.iodepth_low));
502                 }
503                 if (ret < 0)
504                         break;
505         }
506
507         if (!td->error) {
508                 min_events = td->cur_depth;
509
510                 if (min_events)
511                         ret = io_u_queued_complete(td, min_events);
512         } else
513                 cleanup_pending_aio(td);
514
515         td_set_runstate(td, TD_RUNNING);
516 }
517
518 /*
519  * Main IO worker function. It retrieves io_u's to process and queues
520  * and reaps them, checking for rate and errors along the way.
521  */
522 static void do_io(struct thread_data *td)
523 {
524         unsigned long usec;
525         unsigned int i;
526         int ret = 0;
527
528         if (in_ramp_time(td))
529                 td_set_runstate(td, TD_RAMP);
530         else
531                 td_set_runstate(td, TD_RUNNING);
532
533         while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) {
534                 struct timeval comp_time;
535                 long bytes_done = 0;
536                 int min_evts = 0;
537                 struct io_u *io_u;
538                 int ret2, full;
539
540                 if (td->terminate)
541                         break;
542
543                 io_u = get_io_u(td);
544                 if (!io_u)
545                         break;
546
547                 update_tv_cache(td);
548
549                 if (runtime_exceeded(td, &td->tv_cache)) {
550                         put_io_u(td, io_u);
551                         td->terminate = 1;
552                         break;
553                 }
554
555                 /*
556                  * Add verification end_io handler, if asked to verify
557                  * a previously written file.
558                  */
559                 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ) {
560                         io_u->end_io = verify_io_u;
561                         td_set_runstate(td, TD_VERIFYING);
562                 } else if (in_ramp_time(td))
563                         td_set_runstate(td, TD_RAMP);
564                 else
565                         td_set_runstate(td, TD_RUNNING);
566
567                 ret = td_io_queue(td, io_u);
568                 switch (ret) {
569                 case FIO_Q_COMPLETED:
570                         if (io_u->error)
571                                 ret = -io_u->error;
572                         else if (io_u->resid) {
573                                 int bytes = io_u->xfer_buflen - io_u->resid;
574                                 struct fio_file *f = io_u->file;
575
576                                 /*
577                                  * zero read, fail
578                                  */
579                                 if (!bytes) {
580                                         td_verror(td, EIO, "full resid");
581                                         put_io_u(td, io_u);
582                                         break;
583                                 }
584
585                                 io_u->xfer_buflen = io_u->resid;
586                                 io_u->xfer_buf += bytes;
587                                 io_u->offset += bytes;
588
589                                 td->ts.short_io_u[io_u->ddir]++;
590
591                                 if (io_u->offset == f->real_file_size)
592                                         goto sync_done;
593
594                                 requeue_io_u(td, &io_u);
595                         } else {
596 sync_done:
597                                 if (should_check_rate(td))
598                                         fio_gettime(&comp_time, NULL);
599
600                                 bytes_done = io_u_sync_complete(td, io_u);
601                                 if (bytes_done < 0)
602                                         ret = bytes_done;
603                         }
604                         break;
605                 case FIO_Q_QUEUED:
606                         /*
607                          * if the engine doesn't have a commit hook,
608                          * the io_u is really queued. if it does have such
609                          * a hook, it has to call io_u_queued() itself.
610                          */
611                         if (td->io_ops->commit == NULL)
612                                 io_u_queued(td, io_u);
613                         break;
614                 case FIO_Q_BUSY:
615                         requeue_io_u(td, &io_u);
616                         ret2 = td_io_commit(td);
617                         if (ret2 < 0)
618                                 ret = ret2;
619                         break;
620                 default:
621                         assert(ret < 0);
622                         put_io_u(td, io_u);
623                         break;
624                 }
625
626                 if (ret < 0 || td->error)
627                         break;
628
629                 /*
630                  * See if we need to complete some commands
631                  */
632                 full = queue_full(td) || ret == FIO_Q_BUSY;
633                 if (full || !td->o.iodepth_batch_complete) {
634                         min_evts = td->o.iodepth_batch_complete;
635                         if (full && !min_evts)
636                                 min_evts = 1;
637
638                         if (should_check_rate(td))
639                                 fio_gettime(&comp_time, NULL);
640
641                         do {
642                                 ret = io_u_queued_complete(td, min_evts);
643                                 if (ret <= 0)
644                                         break;
645
646                                 bytes_done += ret;
647                         } while (full && (td->cur_depth > td->o.iodepth_low));
648                 }
649
650                 if (ret < 0)
651                         break;
652                 if (!bytes_done)
653                         continue;
654
655                 /*
656                  * the rate is batched for now, it should work for batches
657                  * of completions except the very first one which may look
658                  * a little bursty
659                  */
660                 if (!in_ramp_time(td) && should_check_rate(td)) {
661                         usec = utime_since(&td->tv_cache, &comp_time);
662
663                         rate_throttle(td, usec, bytes_done);
664
665                         if (check_min_rate(td, &comp_time)) {
666                                 if (exitall_on_terminate)
667                                         terminate_threads(td->groupid);
668                                 td_verror(td, EIO, "check_min_rate");
669                                 break;
670                         }
671                 }
672
673                 if (td->o.thinktime) {
674                         unsigned long long b;
675
676                         b = td->io_blocks[0] + td->io_blocks[1];
677                         if (!(b % td->o.thinktime_blocks)) {
678                                 int left;
679
680                                 if (td->o.thinktime_spin)
681                                         usec_spin(td->o.thinktime_spin);
682
683                                 left = td->o.thinktime - td->o.thinktime_spin;
684                                 if (left)
685                                         usec_sleep(td, left);
686                         }
687                 }
688         }
689
690         if (td->o.fill_device && td->error == ENOSPC) {
691                 td->error = 0;
692                 td->terminate = 1;
693         }
694         if (!td->error) {
695                 struct fio_file *f;
696
697                 i = td->cur_depth;
698                 if (i)
699                         ret = io_u_queued_complete(td, i);
700
701                 if (should_fsync(td) && td->o.end_fsync) {
702                         td_set_runstate(td, TD_FSYNCING);
703
704                         for_each_file(td, f, i) {
705                                 if (!fio_file_open(f))
706                                         continue;
707                                 fio_io_sync(td, f);
708                         }
709                 }
710         } else
711                 cleanup_pending_aio(td);
712
713         /*
714          * stop job if we failed doing any IO
715          */
716         if ((td->this_io_bytes[0] + td->this_io_bytes[1]) == 0)
717                 td->done = 1;
718 }
719
720 static void cleanup_io_u(struct thread_data *td)
721 {
722         struct flist_head *entry, *n;
723         struct io_u *io_u;
724
725         flist_for_each_safe(entry, n, &td->io_u_freelist) {
726                 io_u = flist_entry(entry, struct io_u, list);
727
728                 flist_del(&io_u->list);
729                 free(io_u);
730         }
731
732         free_io_mem(td);
733 }
734
735 static int init_io_u(struct thread_data *td)
736 {
737         struct io_u *io_u;
738         unsigned int max_bs;
739         int cl_align, i, max_units;
740         char *p;
741
742         max_units = td->o.iodepth;
743         max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
744         td->orig_buffer_size = (unsigned long long) max_bs
745                                         * (unsigned long long) max_units;
746
747         if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
748                 unsigned long bs;
749
750                 bs = td->orig_buffer_size + td->o.hugepage_size - 1;
751                 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
752         }
753
754         if (td->orig_buffer_size != (size_t) td->orig_buffer_size) {
755                 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
756                 return 1;
757         }
758
759         if (allocate_io_mem(td))
760                 return 1;
761
762         if (td->o.odirect)
763                 p = ALIGN(td->orig_buffer);
764         else
765                 p = td->orig_buffer;
766
767         cl_align = os_cache_line_size();
768
769         for (i = 0; i < max_units; i++) {
770                 void *ptr;
771
772                 if (td->terminate)
773                         return 1;
774
775                 if (posix_memalign(&ptr, cl_align, sizeof(*io_u))) {
776                         log_err("fio: posix_memalign=%s\n", strerror(errno));
777                         break;
778                 }
779
780                 io_u = ptr;
781                 memset(io_u, 0, sizeof(*io_u));
782                 INIT_FLIST_HEAD(&io_u->list);
783
784                 if (!(td->io_ops->flags & FIO_NOIO)) {
785                         io_u->buf = p + max_bs * i;
786
787                         if (td_write(td) && !td->o.refill_buffers)
788                                 io_u_fill_buffer(td, io_u, max_bs);
789                 }
790
791                 io_u->index = i;
792                 io_u->flags = IO_U_F_FREE;
793                 flist_add(&io_u->list, &td->io_u_freelist);
794         }
795
796         return 0;
797 }
798
799 static int switch_ioscheduler(struct thread_data *td)
800 {
801         char tmp[256], tmp2[128];
802         FILE *f;
803         int ret;
804
805         if (td->io_ops->flags & FIO_DISKLESSIO)
806                 return 0;
807
808         sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
809
810         f = fopen(tmp, "r+");
811         if (!f) {
812                 if (errno == ENOENT) {
813                         log_err("fio: os or kernel doesn't support IO scheduler"
814                                 " switching\n");
815                         return 0;
816                 }
817                 td_verror(td, errno, "fopen iosched");
818                 return 1;
819         }
820
821         /*
822          * Set io scheduler.
823          */
824         ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
825         if (ferror(f) || ret != 1) {
826                 td_verror(td, errno, "fwrite");
827                 fclose(f);
828                 return 1;
829         }
830
831         rewind(f);
832
833         /*
834          * Read back and check that the selected scheduler is now the default.
835          */
836         ret = fread(tmp, 1, sizeof(tmp), f);
837         if (ferror(f) || ret < 0) {
838                 td_verror(td, errno, "fread");
839                 fclose(f);
840                 return 1;
841         }
842
843         sprintf(tmp2, "[%s]", td->o.ioscheduler);
844         if (!strstr(tmp, tmp2)) {
845                 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
846                 td_verror(td, EINVAL, "iosched_switch");
847                 fclose(f);
848                 return 1;
849         }
850
851         fclose(f);
852         return 0;
853 }
854
855 static int keep_running(struct thread_data *td)
856 {
857         unsigned long long io_done;
858
859         if (td->done)
860                 return 0;
861         if (td->o.time_based)
862                 return 1;
863         if (td->o.loops) {
864                 td->o.loops--;
865                 return 1;
866         }
867
868         io_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE]
869                         + td->io_skip_bytes;
870         if (io_done < td->o.size)
871                 return 1;
872
873         return 0;
874 }
875
876 static void reset_io_counters(struct thread_data *td)
877 {
878         td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
879         td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
880         td->zone_bytes = 0;
881         td->rate_bytes = 0;
882         td->rate_blocks = 0;
883         td->rw_end_set[0] = td->rw_end_set[1] = 0;
884
885         td->last_was_sync = 0;
886
887         /*
888          * reset file done count if we are to start over
889          */
890         if (td->o.time_based || td->o.loops)
891                 td->nr_done_files = 0;
892
893         /*
894          * Set the same seed to get repeatable runs
895          */
896         td_fill_rand_seeds(td);
897 }
898
899 void reset_all_stats(struct thread_data *td)
900 {
901         struct timeval tv;
902         int i;
903
904         reset_io_counters(td);
905
906         for (i = 0; i < 2; i++) {
907                 td->io_bytes[i] = 0;
908                 td->io_blocks[i] = 0;
909                 td->io_issues[i] = 0;
910                 td->ts.total_io_u[i] = 0;
911         }
912         
913         fio_gettime(&tv, NULL);
914         memcpy(&td->epoch, &tv, sizeof(tv));
915         memcpy(&td->start, &tv, sizeof(tv));
916 }
917
918 static void clear_io_state(struct thread_data *td)
919 {
920         struct fio_file *f;
921         unsigned int i;
922
923         reset_io_counters(td);
924
925         close_files(td);
926         for_each_file(td, f, i)
927                 fio_file_clear_done(f);
928 }
929
930 static int exec_string(const char *string)
931 {
932         int ret, newlen = strlen(string) + 1 + 8;
933         char *str;
934
935         str = malloc(newlen);
936         sprintf(str, "sh -c %s", string);
937
938         ret = system(str);
939         if (ret == -1)
940                 log_err("fio: exec of cmd <%s> failed\n", str);
941
942         free(str);
943         return ret;
944 }
945
946 /*
947  * Entry point for the thread based jobs. The process based jobs end up
948  * here as well, after a little setup.
949  */
950 static void *thread_main(void *data)
951 {
952         unsigned long long runtime[2], elapsed;
953         struct thread_data *td = data;
954         int clear_state;
955
956         if (!td->o.use_thread)
957                 setsid();
958
959         td->pid = getpid();
960
961         dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
962
963         INIT_FLIST_HEAD(&td->io_u_freelist);
964         INIT_FLIST_HEAD(&td->io_u_busylist);
965         INIT_FLIST_HEAD(&td->io_u_requeues);
966         INIT_FLIST_HEAD(&td->io_log_list);
967         INIT_FLIST_HEAD(&td->io_hist_list);
968         td->io_hist_tree = RB_ROOT;
969
970         td_set_runstate(td, TD_INITIALIZED);
971         dprint(FD_MUTEX, "up startup_mutex\n");
972         fio_mutex_up(startup_mutex);
973         dprint(FD_MUTEX, "wait on td->mutex\n");
974         fio_mutex_down(td->mutex);
975         dprint(FD_MUTEX, "done waiting on td->mutex\n");
976
977         /*
978          * the ->mutex mutex is now no longer used, close it to avoid
979          * eating a file descriptor
980          */
981         fio_mutex_remove(td->mutex);
982
983         /*
984          * May alter parameters that init_io_u() will use, so we need to
985          * do this first.
986          */
987         if (init_iolog(td))
988                 goto err;
989
990         if (init_io_u(td))
991                 goto err;
992
993         if (td->o.cpumask_set && fio_setaffinity(td) == -1) {
994                 td_verror(td, errno, "cpu_set_affinity");
995                 goto err;
996         }
997
998         /*
999          * If we have a gettimeofday() thread, make sure we exclude that
1000          * thread from this job
1001          */
1002         if (td->o.gtod_cpu) {
1003                 fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu);
1004                 if (fio_setaffinity(td) == -1) {
1005                         td_verror(td, errno, "cpu_set_affinity");
1006                         goto err;
1007                 }
1008         }
1009
1010         if (td->ioprio_set) {
1011                 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1012                         td_verror(td, errno, "ioprio_set");
1013                         goto err;
1014                 }
1015         }
1016
1017         if (nice(td->o.nice) == -1) {
1018                 td_verror(td, errno, "nice");
1019                 goto err;
1020         }
1021
1022         if (td->o.ioscheduler && switch_ioscheduler(td))
1023                 goto err;
1024
1025         if (!td->o.create_serialize && setup_files(td))
1026                 goto err;
1027
1028         if (td_io_init(td))
1029                 goto err;
1030
1031         if (init_random_map(td))
1032                 goto err;
1033
1034         if (td->o.exec_prerun) {
1035                 if (exec_string(td->o.exec_prerun))
1036                         goto err;
1037         }
1038
1039         if (td->o.pre_read) {
1040                 if (pre_read_files(td) < 0)
1041                         goto err;
1042         }
1043
1044         fio_gettime(&td->epoch, NULL);
1045         getrusage(RUSAGE_SELF, &td->ts.ru_start);
1046
1047         runtime[0] = runtime[1] = 0;
1048         clear_state = 0;
1049         while (keep_running(td)) {
1050                 fio_gettime(&td->start, NULL);
1051                 memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
1052                 memcpy(&td->tv_cache, &td->start, sizeof(td->start));
1053
1054                 if (td->o.ratemin)
1055                         memcpy(&td->lastrate, &td->ts.stat_sample_time,
1056                                                         sizeof(td->lastrate));
1057
1058                 if (clear_state)
1059                         clear_io_state(td);
1060
1061                 prune_io_piece_log(td);
1062
1063                 do_io(td);
1064
1065                 clear_state = 1;
1066
1067                 if (td_read(td) && td->io_bytes[DDIR_READ]) {
1068                         if (td->rw_end_set[DDIR_READ])
1069                                 elapsed = utime_since(&td->start,
1070                                                       &td->rw_end[DDIR_READ]);
1071                         else
1072                                 elapsed = utime_since_now(&td->start);
1073
1074                         runtime[DDIR_READ] += elapsed;
1075                 }
1076                 if (td_write(td) && td->io_bytes[DDIR_WRITE]) {
1077                         if (td->rw_end_set[DDIR_WRITE])
1078                                 elapsed = utime_since(&td->start,
1079                                                       &td->rw_end[DDIR_WRITE]);
1080                         else
1081                                 elapsed = utime_since_now(&td->start);
1082
1083                         runtime[DDIR_WRITE] += elapsed;
1084                 }
1085
1086                 if (td->error || td->terminate)
1087                         break;
1088
1089                 if (!td->o.do_verify ||
1090                     td->o.verify == VERIFY_NONE ||
1091                     (td->io_ops->flags & FIO_UNIDIR))
1092                         continue;
1093
1094                 clear_io_state(td);
1095
1096                 fio_gettime(&td->start, NULL);
1097
1098                 do_verify(td);
1099
1100                 runtime[DDIR_READ] += utime_since_now(&td->start);
1101
1102                 if (td->error || td->terminate)
1103                         break;
1104         }
1105
1106         update_rusage_stat(td);
1107         td->ts.runtime[0] = (runtime[0] + 999) / 1000;
1108         td->ts.runtime[1] = (runtime[1] + 999) / 1000;
1109         td->ts.total_run_time = mtime_since_now(&td->epoch);
1110         td->ts.io_bytes[0] = td->io_bytes[0];
1111         td->ts.io_bytes[1] = td->io_bytes[1];
1112
1113         fio_mutex_down(writeout_mutex);
1114         if (td->ts.bw_log) {
1115                 if (td->o.bw_log_file) {
1116                         finish_log_named(td, td->ts.bw_log,
1117                                                 td->o.bw_log_file, "bw");
1118                 } else
1119                         finish_log(td, td->ts.bw_log, "bw");
1120         }
1121         if (td->ts.slat_log) {
1122                 if (td->o.lat_log_file) {
1123                         finish_log_named(td, td->ts.slat_log,
1124                                                 td->o.lat_log_file, "slat");
1125                 } else
1126                         finish_log(td, td->ts.slat_log, "slat");
1127         }
1128         if (td->ts.clat_log) {
1129                 if (td->o.lat_log_file) {
1130                         finish_log_named(td, td->ts.clat_log,
1131                                                 td->o.lat_log_file, "clat");
1132                 } else
1133                         finish_log(td, td->ts.clat_log, "clat");
1134         }
1135         fio_mutex_up(writeout_mutex);
1136         if (td->o.exec_postrun)
1137                 exec_string(td->o.exec_postrun);
1138
1139         if (exitall_on_terminate)
1140                 terminate_threads(td->groupid);
1141
1142 err:
1143         if (td->error)
1144                 printf("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error,
1145                                                         td->verror);
1146         close_and_free_files(td);
1147         close_ioengine(td);
1148         cleanup_io_u(td);
1149
1150         if (td->o.cpumask_set) {
1151                 int ret = fio_cpuset_exit(&td->o.cpumask);
1152
1153                 td_verror(td, ret, "fio_cpuset_exit");
1154         }
1155
1156         /*
1157          * do this very late, it will log file closing as well
1158          */
1159         if (td->o.write_iolog_file)
1160                 write_iolog_close(td);
1161
1162         options_mem_free(td);
1163         td_set_runstate(td, TD_EXITED);
1164         return (void *) (unsigned long) td->error;
1165 }
1166
1167 /*
1168  * We cannot pass the td data into a forked process, so attach the td and
1169  * pass it to the thread worker.
1170  */
1171 static int fork_main(int shmid, int offset)
1172 {
1173         struct thread_data *td;
1174         void *data, *ret;
1175
1176         data = shmat(shmid, NULL, 0);
1177         if (data == (void *) -1) {
1178                 int __err = errno;
1179
1180                 perror("shmat");
1181                 return __err;
1182         }
1183
1184         td = data + offset * sizeof(struct thread_data);
1185         ret = thread_main(td);
1186         shmdt(data);
1187         return (int) (unsigned long) ret;
1188 }
1189
1190 /*
1191  * Run over the job map and reap the threads that have exited, if any.
1192  */
1193 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1194 {
1195         struct thread_data *td;
1196         int i, cputhreads, realthreads, pending, status, ret;
1197
1198         /*
1199          * reap exited threads (TD_EXITED -> TD_REAPED)
1200          */
1201         realthreads = pending = cputhreads = 0;
1202         for_each_td(td, i) {
1203                 int flags = 0;
1204
1205                 /*
1206                  * ->io_ops is NULL for a thread that has closed its
1207                  * io engine
1208                  */
1209                 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
1210                         cputhreads++;
1211                 else
1212                         realthreads++;
1213
1214                 if (!td->pid) {
1215                         pending++;
1216                         continue;
1217                 }
1218                 if (td->runstate == TD_REAPED)
1219                         continue;
1220                 if (td->o.use_thread) {
1221                         if (td->runstate == TD_EXITED) {
1222                                 td_set_runstate(td, TD_REAPED);
1223                                 goto reaped;
1224                         }
1225                         continue;
1226                 }
1227
1228                 flags = WNOHANG;
1229                 if (td->runstate == TD_EXITED)
1230                         flags = 0;
1231
1232                 /*
1233                  * check if someone quit or got killed in an unusual way
1234                  */
1235                 ret = waitpid(td->pid, &status, flags);
1236                 if (ret < 0) {
1237                         if (errno == ECHILD) {
1238                                 log_err("fio: pid=%d disappeared %d\n",
1239                                                 (int) td->pid, td->runstate);
1240                                 td_set_runstate(td, TD_REAPED);
1241                                 goto reaped;
1242                         }
1243                         perror("waitpid");
1244                 } else if (ret == td->pid) {
1245                         if (WIFSIGNALED(status)) {
1246                                 int sig = WTERMSIG(status);
1247
1248                                 if (sig != SIGQUIT)
1249                                         log_err("fio: pid=%d, got signal=%d\n",
1250                                                         (int) td->pid, sig);
1251                                 td_set_runstate(td, TD_REAPED);
1252                                 goto reaped;
1253                         }
1254                         if (WIFEXITED(status)) {
1255                                 if (WEXITSTATUS(status) && !td->error)
1256                                         td->error = WEXITSTATUS(status);
1257
1258                                 td_set_runstate(td, TD_REAPED);
1259                                 goto reaped;
1260                         }
1261                 }
1262
1263                 /*
1264                  * thread is not dead, continue
1265                  */
1266                 pending++;
1267                 continue;
1268 reaped:
1269                 (*nr_running)--;
1270                 (*m_rate) -= td->o.ratemin;
1271                 (*t_rate) -= td->o.rate;
1272                 if (!td->pid)
1273                         pending--;
1274
1275                 if (td->error)
1276                         exit_value++;
1277
1278                 done_secs += mtime_since_now(&td->epoch) / 1000;
1279         }
1280
1281         if (*nr_running == cputhreads && !pending && realthreads)
1282                 terminate_threads(TERMINATE_ALL);
1283 }
1284
1285 static void *gtod_thread_main(void *data)
1286 {
1287         fio_mutex_up(startup_mutex);
1288
1289         /*
1290          * As long as we have jobs around, update the clock. It would be nice
1291          * to have some way of NOT hammering that CPU with gettimeofday(),
1292          * but I'm not sure what to use outside of a simple CPU nop to relax
1293          * it - we don't want to lose precision.
1294          */
1295         while (threads) {
1296                 fio_gtod_update();
1297                 nop;
1298         }
1299
1300         return NULL;
1301 }
1302
1303 static int fio_start_gtod_thread(void)
1304 {
1305         int ret;
1306
1307         ret = pthread_create(&gtod_thread, NULL, gtod_thread_main, NULL);
1308         if (ret) {
1309                 log_err("Can't create gtod thread: %s\n", strerror(ret));
1310                 return 1;
1311         }
1312
1313         ret = pthread_detach(gtod_thread);
1314         if (ret) {
1315                 log_err("Can't detatch gtod thread: %s\n", strerror(ret));
1316                 return 1;
1317         }
1318
1319         dprint(FD_MUTEX, "wait on startup_mutex\n");
1320         fio_mutex_down(startup_mutex);
1321         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
1322         return 0;
1323 }
1324
1325 /*
1326  * Main function for kicking off and reaping jobs, as needed.
1327  */
1328 static void run_threads(void)
1329 {
1330         struct thread_data *td;
1331         unsigned long spent;
1332         int i, todo, nr_running, m_rate, t_rate, nr_started;
1333
1334         if (fio_pin_memory())
1335                 return;
1336
1337         if (fio_gtod_offload && fio_start_gtod_thread())
1338                 return;
1339
1340         if (!terse_output) {
1341                 printf("Starting ");
1342                 if (nr_thread)
1343                         printf("%d thread%s", nr_thread,
1344                                                 nr_thread > 1 ? "s" : "");
1345                 if (nr_process) {
1346                         if (nr_thread)
1347                                 printf(" and ");
1348                         printf("%d process%s", nr_process,
1349                                                 nr_process > 1 ? "es" : "");
1350                 }
1351                 printf("\n");
1352                 fflush(stdout);
1353         }
1354
1355         set_sig_handlers();
1356
1357         todo = thread_number;
1358         nr_running = 0;
1359         nr_started = 0;
1360         m_rate = t_rate = 0;
1361
1362         for_each_td(td, i) {
1363                 print_status_init(td->thread_number - 1);
1364
1365                 if (!td->o.create_serialize) {
1366                         init_disk_util(td);
1367                         continue;
1368                 }
1369
1370                 /*
1371                  * do file setup here so it happens sequentially,
1372                  * we don't want X number of threads getting their
1373                  * client data interspersed on disk
1374                  */
1375                 if (setup_files(td)) {
1376                         exit_value++;
1377                         if (td->error)
1378                                 log_err("fio: pid=%d, err=%d/%s\n",
1379                                         (int) td->pid, td->error, td->verror);
1380                         td_set_runstate(td, TD_REAPED);
1381                         todo--;
1382                 } else {
1383                         struct fio_file *f;
1384                         unsigned int i;
1385
1386                         /*
1387                          * for sharing to work, each job must always open
1388                          * its own files. so close them, if we opened them
1389                          * for creation
1390                          */
1391                         for_each_file(td, f, i)
1392                                 td_io_close_file(td, f);
1393                 }
1394
1395                 init_disk_util(td);
1396         }
1397
1398         set_genesis_time();
1399
1400         while (todo) {
1401                 struct thread_data *map[MAX_JOBS];
1402                 struct timeval this_start;
1403                 int this_jobs = 0, left;
1404
1405                 /*
1406                  * create threads (TD_NOT_CREATED -> TD_CREATED)
1407                  */
1408                 for_each_td(td, i) {
1409                         if (td->runstate != TD_NOT_CREATED)
1410                                 continue;
1411
1412                         /*
1413                          * never got a chance to start, killed by other
1414                          * thread for some reason
1415                          */
1416                         if (td->terminate) {
1417                                 todo--;
1418                                 continue;
1419                         }
1420
1421                         if (td->o.start_delay) {
1422                                 spent = mtime_since_genesis();
1423
1424                                 if (td->o.start_delay * 1000 > spent)
1425                                         continue;
1426                         }
1427
1428                         if (td->o.stonewall && (nr_started || nr_running)) {
1429                                 dprint(FD_PROCESS, "%s: stonewall wait\n",
1430                                                         td->o.name);
1431                                 break;
1432                         }
1433
1434                         /*
1435                          * Set state to created. Thread will transition
1436                          * to TD_INITIALIZED when it's done setting up.
1437                          */
1438                         td_set_runstate(td, TD_CREATED);
1439                         map[this_jobs++] = td;
1440                         nr_started++;
1441
1442                         if (td->o.use_thread) {
1443                                 int ret;
1444
1445                                 dprint(FD_PROCESS, "will pthread_create\n");
1446                                 ret = pthread_create(&td->thread, NULL,
1447                                                         thread_main, td);
1448                                 if (ret) {
1449                                         log_err("pthread_create: %s\n",
1450                                                         strerror(ret));
1451                                         nr_started--;
1452                                         break;
1453                                 }
1454                                 ret = pthread_detach(td->thread);
1455                                 if (ret)
1456                                         log_err("pthread_detach: %s",
1457                                                         strerror(ret));
1458                         } else {
1459                                 pid_t pid;
1460                                 dprint(FD_PROCESS, "will fork\n");
1461                                 pid = fork();
1462                                 if (!pid) {
1463                                         int ret = fork_main(shm_id, i);
1464
1465                                         _exit(ret);
1466                                 } else if (i == fio_debug_jobno)
1467                                         *fio_debug_jobp = pid;
1468                         }
1469                         dprint(FD_MUTEX, "wait on startup_mutex\n");
1470                         fio_mutex_down(startup_mutex);
1471                         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
1472                 }
1473
1474                 /*
1475                  * Wait for the started threads to transition to
1476                  * TD_INITIALIZED.
1477                  */
1478                 fio_gettime(&this_start, NULL);
1479                 left = this_jobs;
1480                 while (left && !fio_abort) {
1481                         if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
1482                                 break;
1483
1484                         usleep(100000);
1485
1486                         for (i = 0; i < this_jobs; i++) {
1487                                 td = map[i];
1488                                 if (!td)
1489                                         continue;
1490                                 if (td->runstate == TD_INITIALIZED) {
1491                                         map[i] = NULL;
1492                                         left--;
1493                                 } else if (td->runstate >= TD_EXITED) {
1494                                         map[i] = NULL;
1495                                         left--;
1496                                         todo--;
1497                                         nr_running++; /* work-around... */
1498                                 }
1499                         }
1500                 }
1501
1502                 if (left) {
1503                         log_err("fio: %d jobs failed to start\n", left);
1504                         for (i = 0; i < this_jobs; i++) {
1505                                 td = map[i];
1506                                 if (!td)
1507                                         continue;
1508                                 kill(td->pid, SIGTERM);
1509                         }
1510                         break;
1511                 }
1512
1513                 /*
1514                  * start created threads (TD_INITIALIZED -> TD_RUNNING).
1515                  */
1516                 for_each_td(td, i) {
1517                         if (td->runstate != TD_INITIALIZED)
1518                                 continue;
1519
1520                         if (in_ramp_time(td))
1521                                 td_set_runstate(td, TD_RAMP);
1522                         else
1523                                 td_set_runstate(td, TD_RUNNING);
1524                         nr_running++;
1525                         nr_started--;
1526                         m_rate += td->o.ratemin;
1527                         t_rate += td->o.rate;
1528                         todo--;
1529                         fio_mutex_up(td->mutex);
1530                 }
1531
1532                 reap_threads(&nr_running, &t_rate, &m_rate);
1533
1534                 if (todo)
1535                         usleep(100000);
1536         }
1537
1538         while (nr_running) {
1539                 reap_threads(&nr_running, &t_rate, &m_rate);
1540                 usleep(10000);
1541         }
1542
1543         update_io_ticks();
1544         fio_unpin_memory();
1545 }
1546
1547 int main(int argc, char *argv[])
1548 {
1549         long ps;
1550
1551         sinit();
1552
1553         /*
1554          * We need locale for number printing, if it isn't set then just
1555          * go with the US format.
1556          */
1557         if (!getenv("LC_NUMERIC"))
1558                 setlocale(LC_NUMERIC, "en_US");
1559
1560         if (parse_options(argc, argv))
1561                 return 1;
1562
1563         if (!thread_number)
1564                 return 0;
1565
1566         ps = sysconf(_SC_PAGESIZE);
1567         if (ps < 0) {
1568                 log_err("Failed to get page size\n");
1569                 return 1;
1570         }
1571
1572         page_size = ps;
1573         page_mask = ps - 1;
1574
1575         if (write_bw_log) {
1576                 setup_log(&agg_io_log[DDIR_READ]);
1577                 setup_log(&agg_io_log[DDIR_WRITE]);
1578         }
1579
1580         startup_mutex = fio_mutex_init(0);
1581         writeout_mutex = fio_mutex_init(1);
1582
1583         set_genesis_time();
1584
1585         status_timer_arm();
1586
1587         run_threads();
1588
1589         if (!fio_abort) {
1590                 show_run_stats();
1591                 if (write_bw_log) {
1592                         __finish_log(agg_io_log[DDIR_READ], "agg-read_bw.log");
1593                         __finish_log(agg_io_log[DDIR_WRITE],
1594                                         "agg-write_bw.log");
1595                 }
1596         }
1597
1598         fio_mutex_remove(startup_mutex);
1599         fio_mutex_remove(writeout_mutex);
1600         return exit_value;
1601 }