idletime: fix another missing unlock on error
[fio.git] / backend.c
1 /*
2  * fio - the flexible io tester
3  *
4  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5  * Copyright (C) 2006-2012 Jens Axboe <axboe@kernel.dk>
6  *
7  * The license below covers all files distributed with fio unless otherwise
8  * noted in the file itself.
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License version 2 as
12  *  published by the Free Software Foundation.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to the Free Software
21  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  *
23  */
24 #include <unistd.h>
25 #include <fcntl.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <signal.h>
29 #include <time.h>
30 #include <locale.h>
31 #include <assert.h>
32 #include <time.h>
33 #include <inttypes.h>
34 #include <sys/stat.h>
35 #include <sys/wait.h>
36 #include <sys/ipc.h>
37 #include <sys/mman.h>
38
39 #include "fio.h"
40 #ifndef FIO_NO_HAVE_SHM_H
41 #include <sys/shm.h>
42 #endif
43 #include "hash.h"
44 #include "smalloc.h"
45 #include "verify.h"
46 #include "trim.h"
47 #include "diskutil.h"
48 #include "cgroup.h"
49 #include "profile.h"
50 #include "lib/rand.h"
51 #include "memalign.h"
52 #include "server.h"
53 #include "lib/getrusage.h"
54 #include "idletime.h"
55 #include "err.h"
56
57 static pthread_t disk_util_thread;
58 static struct fio_mutex *disk_thread_mutex;
59 static struct fio_mutex *startup_mutex;
60 static struct flist_head *cgroup_list;
61 static char *cgroup_mnt;
62 static int exit_value;
63 static volatile int fio_abort;
64 static unsigned int nr_process = 0;
65 static unsigned int nr_thread = 0;
66
67 struct io_log *agg_io_log[DDIR_RWDIR_CNT];
68
69 int groupid = 0;
70 unsigned int thread_number = 0;
71 unsigned int stat_number = 0;
72 int shm_id = 0;
73 int temp_stall_ts;
74 unsigned long done_secs = 0;
75 volatile int disk_util_exit = 0;
76
77 #define PAGE_ALIGN(buf) \
78         (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask)
79
80 #define JOB_START_TIMEOUT       (5 * 1000)
81
82 static void sig_int(int sig)
83 {
84         if (threads) {
85                 if (is_backend)
86                         fio_server_got_signal(sig);
87                 else {
88                         log_info("\nfio: terminating on signal %d\n", sig);
89                         fflush(stdout);
90                         exit_value = 128;
91                 }
92
93                 fio_terminate_threads(TERMINATE_ALL);
94         }
95 }
96
97 static void sig_show_status(int sig)
98 {
99         show_running_run_stats();
100 }
101
102 static void set_sig_handlers(void)
103 {
104         struct sigaction act;
105
106         memset(&act, 0, sizeof(act));
107         act.sa_handler = sig_int;
108         act.sa_flags = SA_RESTART;
109         sigaction(SIGINT, &act, NULL);
110
111         memset(&act, 0, sizeof(act));
112         act.sa_handler = sig_int;
113         act.sa_flags = SA_RESTART;
114         sigaction(SIGTERM, &act, NULL);
115
116 /* Windows uses SIGBREAK as a quit signal from other applications */
117 #ifdef WIN32
118         memset(&act, 0, sizeof(act));
119         act.sa_handler = sig_int;
120         act.sa_flags = SA_RESTART;
121         sigaction(SIGBREAK, &act, NULL);
122 #endif
123
124         memset(&act, 0, sizeof(act));
125         act.sa_handler = sig_show_status;
126         act.sa_flags = SA_RESTART;
127         sigaction(SIGUSR1, &act, NULL);
128
129         if (is_backend) {
130                 memset(&act, 0, sizeof(act));
131                 act.sa_handler = sig_int;
132                 act.sa_flags = SA_RESTART;
133                 sigaction(SIGPIPE, &act, NULL);
134         }
135 }
136
137 /*
138  * Check if we are above the minimum rate given.
139  */
140 static int __check_min_rate(struct thread_data *td, struct timeval *now,
141                             enum fio_ddir ddir)
142 {
143         unsigned long long bytes = 0;
144         unsigned long iops = 0;
145         unsigned long spent;
146         unsigned long rate;
147         unsigned int ratemin = 0;
148         unsigned int rate_iops = 0;
149         unsigned int rate_iops_min = 0;
150
151         assert(ddir_rw(ddir));
152
153         if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir])
154                 return 0;
155
156         /*
157          * allow a 2 second settle period in the beginning
158          */
159         if (mtime_since(&td->start, now) < 2000)
160                 return 0;
161
162         iops += td->this_io_blocks[ddir];
163         bytes += td->this_io_bytes[ddir];
164         ratemin += td->o.ratemin[ddir];
165         rate_iops += td->o.rate_iops[ddir];
166         rate_iops_min += td->o.rate_iops_min[ddir];
167
168         /*
169          * if rate blocks is set, sample is running
170          */
171         if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) {
172                 spent = mtime_since(&td->lastrate[ddir], now);
173                 if (spent < td->o.ratecycle)
174                         return 0;
175
176                 if (td->o.rate[ddir]) {
177                         /*
178                          * check bandwidth specified rate
179                          */
180                         if (bytes < td->rate_bytes[ddir]) {
181                                 log_err("%s: min rate %u not met\n", td->o.name,
182                                                                 ratemin);
183                                 return 1;
184                         } else {
185                                 if (spent)
186                                         rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent;
187                                 else
188                                         rate = 0;
189
190                                 if (rate < ratemin ||
191                                     bytes < td->rate_bytes[ddir]) {
192                                         log_err("%s: min rate %u not met, got"
193                                                 " %luKB/sec\n", td->o.name,
194                                                         ratemin, rate);
195                                         return 1;
196                                 }
197                         }
198                 } else {
199                         /*
200                          * checks iops specified rate
201                          */
202                         if (iops < rate_iops) {
203                                 log_err("%s: min iops rate %u not met\n",
204                                                 td->o.name, rate_iops);
205                                 return 1;
206                         } else {
207                                 rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent;
208                                 if (rate < rate_iops_min ||
209                                     iops < td->rate_blocks[ddir]) {
210                                         log_err("%s: min iops rate %u not met,"
211                                                 " got %lu\n", td->o.name,
212                                                         rate_iops_min, rate);
213                                 }
214                         }
215                 }
216         }
217
218         td->rate_bytes[ddir] = bytes;
219         td->rate_blocks[ddir] = iops;
220         memcpy(&td->lastrate[ddir], now, sizeof(*now));
221         return 0;
222 }
223
224 static int check_min_rate(struct thread_data *td, struct timeval *now,
225                           uint64_t *bytes_done)
226 {
227         int ret = 0;
228
229         if (bytes_done[DDIR_READ])
230                 ret |= __check_min_rate(td, now, DDIR_READ);
231         if (bytes_done[DDIR_WRITE])
232                 ret |= __check_min_rate(td, now, DDIR_WRITE);
233         if (bytes_done[DDIR_TRIM])
234                 ret |= __check_min_rate(td, now, DDIR_TRIM);
235
236         return ret;
237 }
238
239 /*
240  * When job exits, we can cancel the in-flight IO if we are using async
241  * io. Attempt to do so.
242  */
243 static void cleanup_pending_aio(struct thread_data *td)
244 {
245         int r;
246
247         /*
248          * get immediately available events, if any
249          */
250         r = io_u_queued_complete(td, 0, NULL);
251         if (r < 0)
252                 return;
253
254         /*
255          * now cancel remaining active events
256          */
257         if (td->io_ops->cancel) {
258                 struct io_u *io_u;
259                 int i;
260
261                 io_u_qiter(&td->io_u_all, io_u, i) {
262                         if (io_u->flags & IO_U_F_FLIGHT) {
263                                 r = td->io_ops->cancel(td, io_u);
264                                 if (!r)
265                                         put_io_u(td, io_u);
266                         }
267                 }
268         }
269
270         if (td->cur_depth)
271                 r = io_u_queued_complete(td, td->cur_depth, NULL);
272 }
273
274 /*
275  * Helper to handle the final sync of a file. Works just like the normal
276  * io path, just does everything sync.
277  */
278 static int fio_io_sync(struct thread_data *td, struct fio_file *f)
279 {
280         struct io_u *io_u = __get_io_u(td);
281         int ret;
282
283         if (!io_u)
284                 return 1;
285
286         io_u->ddir = DDIR_SYNC;
287         io_u->file = f;
288
289         if (td_io_prep(td, io_u)) {
290                 put_io_u(td, io_u);
291                 return 1;
292         }
293
294 requeue:
295         ret = td_io_queue(td, io_u);
296         if (ret < 0) {
297                 td_verror(td, io_u->error, "td_io_queue");
298                 put_io_u(td, io_u);
299                 return 1;
300         } else if (ret == FIO_Q_QUEUED) {
301                 if (io_u_queued_complete(td, 1, NULL) < 0)
302                         return 1;
303         } else if (ret == FIO_Q_COMPLETED) {
304                 if (io_u->error) {
305                         td_verror(td, io_u->error, "td_io_queue");
306                         return 1;
307                 }
308
309                 if (io_u_sync_complete(td, io_u, NULL) < 0)
310                         return 1;
311         } else if (ret == FIO_Q_BUSY) {
312                 if (td_io_commit(td))
313                         return 1;
314                 goto requeue;
315         }
316
317         return 0;
318 }
319
320 static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
321 {
322         int ret;
323
324         if (fio_file_open(f))
325                 return fio_io_sync(td, f);
326
327         if (td_io_open_file(td, f))
328                 return 1;
329
330         ret = fio_io_sync(td, f);
331         td_io_close_file(td, f);
332         return ret;
333 }
334
335 static inline void __update_tv_cache(struct thread_data *td)
336 {
337         fio_gettime(&td->tv_cache, NULL);
338 }
339
340 static inline void update_tv_cache(struct thread_data *td)
341 {
342         if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask)
343                 __update_tv_cache(td);
344 }
345
346 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
347 {
348         if (in_ramp_time(td))
349                 return 0;
350         if (!td->o.timeout)
351                 return 0;
352         if (utime_since(&td->epoch, t) >= td->o.timeout)
353                 return 1;
354
355         return 0;
356 }
357
358 static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir,
359                                int *retptr)
360 {
361         int ret = *retptr;
362
363         if (ret < 0 || td->error) {
364                 int err = td->error;
365                 enum error_type_bit eb;
366
367                 if (ret < 0)
368                         err = -ret;
369
370                 eb = td_error_type(ddir, err);
371                 if (!(td->o.continue_on_error & (1 << eb)))
372                         return 1;
373
374                 if (td_non_fatal_error(td, eb, err)) {
375                         /*
376                          * Continue with the I/Os in case of
377                          * a non fatal error.
378                          */
379                         update_error_count(td, err);
380                         td_clear_error(td);
381                         *retptr = 0;
382                         return 0;
383                 } else if (td->o.fill_device && err == ENOSPC) {
384                         /*
385                          * We expect to hit this error if
386                          * fill_device option is set.
387                          */
388                         td_clear_error(td);
389                         td->terminate = 1;
390                         return 1;
391                 } else {
392                         /*
393                          * Stop the I/O in case of a fatal
394                          * error.
395                          */
396                         update_error_count(td, err);
397                         return 1;
398                 }
399         }
400
401         return 0;
402 }
403
404 static void check_update_rusage(struct thread_data *td)
405 {
406         if (td->update_rusage) {
407                 td->update_rusage = 0;
408                 update_rusage_stat(td);
409                 fio_mutex_up(td->rusage_sem);
410         }
411 }
412
413 /*
414  * The main verify engine. Runs over the writes we previously submitted,
415  * reads the blocks back in, and checks the crc/md5 of the data.
416  */
417 static void do_verify(struct thread_data *td, uint64_t verify_bytes)
418 {
419         uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
420         struct fio_file *f;
421         struct io_u *io_u;
422         int ret, min_events;
423         unsigned int i;
424
425         dprint(FD_VERIFY, "starting loop\n");
426
427         /*
428          * sync io first and invalidate cache, to make sure we really
429          * read from disk.
430          */
431         for_each_file(td, f, i) {
432                 if (!fio_file_open(f))
433                         continue;
434                 if (fio_io_sync(td, f))
435                         break;
436                 if (file_invalidate_cache(td, f))
437                         break;
438         }
439
440         check_update_rusage(td);
441
442         if (td->error)
443                 return;
444
445         td_set_runstate(td, TD_VERIFYING);
446
447         io_u = NULL;
448         while (!td->terminate) {
449                 enum fio_ddir ddir;
450                 int ret2, full;
451
452                 update_tv_cache(td);
453                 check_update_rusage(td);
454
455                 if (runtime_exceeded(td, &td->tv_cache)) {
456                         __update_tv_cache(td);
457                         if (runtime_exceeded(td, &td->tv_cache)) {
458                                 td->terminate = 1;
459                                 break;
460                         }
461                 }
462
463                 if (flow_threshold_exceeded(td))
464                         continue;
465
466                 if (!td->o.experimental_verify) {
467                         io_u = __get_io_u(td);
468                         if (!io_u)
469                                 break;
470
471                         if (get_next_verify(td, io_u)) {
472                                 put_io_u(td, io_u);
473                                 break;
474                         }
475
476                         if (td_io_prep(td, io_u)) {
477                                 put_io_u(td, io_u);
478                                 break;
479                         }
480                 } else {
481                         if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes)
482                                 break;
483
484                         while ((io_u = get_io_u(td)) != NULL) {
485                                 if (IS_ERR(io_u)) {
486                                         io_u = NULL;
487                                         ret = FIO_Q_BUSY;
488                                         goto reap;
489                                 }
490
491                                 /*
492                                  * We are only interested in the places where
493                                  * we wrote or trimmed IOs. Turn those into
494                                  * reads for verification purposes.
495                                  */
496                                 if (io_u->ddir == DDIR_READ) {
497                                         /*
498                                          * Pretend we issued it for rwmix
499                                          * accounting
500                                          */
501                                         td->io_issues[DDIR_READ]++;
502                                         put_io_u(td, io_u);
503                                         continue;
504                                 } else if (io_u->ddir == DDIR_TRIM) {
505                                         io_u->ddir = DDIR_READ;
506                                         io_u->flags |= IO_U_F_TRIMMED;
507                                         break;
508                                 } else if (io_u->ddir == DDIR_WRITE) {
509                                         io_u->ddir = DDIR_READ;
510                                         break;
511                                 } else {
512                                         put_io_u(td, io_u);
513                                         continue;
514                                 }
515                         }
516
517                         if (!io_u)
518                                 break;
519                 }
520
521                 if (td->o.verify_async)
522                         io_u->end_io = verify_io_u_async;
523                 else
524                         io_u->end_io = verify_io_u;
525
526                 ddir = io_u->ddir;
527
528                 ret = td_io_queue(td, io_u);
529                 switch (ret) {
530                 case FIO_Q_COMPLETED:
531                         if (io_u->error) {
532                                 ret = -io_u->error;
533                                 clear_io_u(td, io_u);
534                         } else if (io_u->resid) {
535                                 int bytes = io_u->xfer_buflen - io_u->resid;
536
537                                 /*
538                                  * zero read, fail
539                                  */
540                                 if (!bytes) {
541                                         td_verror(td, EIO, "full resid");
542                                         put_io_u(td, io_u);
543                                         break;
544                                 }
545
546                                 io_u->xfer_buflen = io_u->resid;
547                                 io_u->xfer_buf += bytes;
548                                 io_u->offset += bytes;
549
550                                 if (ddir_rw(io_u->ddir))
551                                         td->ts.short_io_u[io_u->ddir]++;
552
553                                 f = io_u->file;
554                                 if (io_u->offset == f->real_file_size)
555                                         goto sync_done;
556
557                                 requeue_io_u(td, &io_u);
558                         } else {
559 sync_done:
560                                 ret = io_u_sync_complete(td, io_u, bytes_done);
561                                 if (ret < 0)
562                                         break;
563                         }
564                         continue;
565                 case FIO_Q_QUEUED:
566                         break;
567                 case FIO_Q_BUSY:
568                         requeue_io_u(td, &io_u);
569                         ret2 = td_io_commit(td);
570                         if (ret2 < 0)
571                                 ret = ret2;
572                         break;
573                 default:
574                         assert(ret < 0);
575                         td_verror(td, -ret, "td_io_queue");
576                         break;
577                 }
578
579                 if (break_on_this_error(td, ddir, &ret))
580                         break;
581
582                 /*
583                  * if we can queue more, do so. but check if there are
584                  * completed io_u's first. Note that we can get BUSY even
585                  * without IO queued, if the system is resource starved.
586                  */
587 reap:
588                 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
589                 if (full || !td->o.iodepth_batch_complete) {
590                         min_events = min(td->o.iodepth_batch_complete,
591                                          td->cur_depth);
592                         /*
593                          * if the queue is full, we MUST reap at least 1 event
594                          */
595                         if (full && !min_events)
596                                 min_events = 1;
597
598                         do {
599                                 /*
600                                  * Reap required number of io units, if any,
601                                  * and do the verification on them through
602                                  * the callback handler
603                                  */
604                                 if (io_u_queued_complete(td, min_events, bytes_done) < 0) {
605                                         ret = -1;
606                                         break;
607                                 }
608                         } while (full && (td->cur_depth > td->o.iodepth_low));
609                 }
610                 if (ret < 0)
611                         break;
612         }
613
614         check_update_rusage(td);
615
616         if (!td->error) {
617                 min_events = td->cur_depth;
618
619                 if (min_events)
620                         ret = io_u_queued_complete(td, min_events, NULL);
621         } else
622                 cleanup_pending_aio(td);
623
624         td_set_runstate(td, TD_RUNNING);
625
626         dprint(FD_VERIFY, "exiting loop\n");
627 }
628
629 static unsigned int exceeds_number_ios(struct thread_data *td)
630 {
631         unsigned long long number_ios;
632
633         if (!td->o.number_ios)
634                 return 0;
635
636         number_ios = ddir_rw_sum(td->this_io_blocks);
637         number_ios += td->io_u_queued + td->io_u_in_flight;
638
639         return number_ios >= td->o.number_ios;
640 }
641
642 static int io_bytes_exceeded(struct thread_data *td)
643 {
644         unsigned long long bytes;
645
646         if (td_rw(td))
647                 bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE];
648         else if (td_write(td))
649                 bytes = td->this_io_bytes[DDIR_WRITE];
650         else if (td_read(td))
651                 bytes = td->this_io_bytes[DDIR_READ];
652         else
653                 bytes = td->this_io_bytes[DDIR_TRIM];
654
655         return bytes >= td->o.size || exceeds_number_ios(td);
656 }
657
658 /*
659  * Main IO worker function. It retrieves io_u's to process and queues
660  * and reaps them, checking for rate and errors along the way.
661  *
662  * Returns number of bytes written and trimmed.
663  */
664 static uint64_t do_io(struct thread_data *td)
665 {
666         uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
667         unsigned int i;
668         int ret = 0;
669         uint64_t total_bytes, bytes_issued = 0;
670
671         if (in_ramp_time(td))
672                 td_set_runstate(td, TD_RAMP);
673         else
674                 td_set_runstate(td, TD_RUNNING);
675
676         lat_target_init(td);
677
678         /*
679          * If verify_backlog is enabled, we'll run the verify in this
680          * handler as well. For that case, we may need up to twice the
681          * amount of bytes.
682          */
683         total_bytes = td->o.size;
684         if (td->o.verify != VERIFY_NONE &&
685            (td_write(td) && td->o.verify_backlog))
686                 total_bytes += td->o.size;
687
688         while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
689                 (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) ||
690                 td->o.time_based) {
691                 struct timeval comp_time;
692                 int min_evts = 0;
693                 struct io_u *io_u;
694                 int ret2, full;
695                 enum fio_ddir ddir;
696
697                 check_update_rusage(td);
698
699                 if (td->terminate || td->done)
700                         break;
701
702                 update_tv_cache(td);
703
704                 if (runtime_exceeded(td, &td->tv_cache)) {
705                         __update_tv_cache(td);
706                         if (runtime_exceeded(td, &td->tv_cache)) {
707                                 td->terminate = 1;
708                                 break;
709                         }
710                 }
711
712                 if (flow_threshold_exceeded(td))
713                         continue;
714
715                 if (bytes_issued >= total_bytes)
716                         break;
717
718                 io_u = get_io_u(td);
719                 if (IS_ERR_OR_NULL(io_u)) {
720                         int err = PTR_ERR(io_u);
721
722                         io_u = NULL;
723                         if (err == -EBUSY) {
724                                 ret = FIO_Q_BUSY;
725                                 goto reap;
726                         }
727                         if (td->o.latency_target)
728                                 goto reap;
729                         break;
730                 }
731
732                 ddir = io_u->ddir;
733
734                 /*
735                  * Add verification end_io handler if:
736                  *      - Asked to verify (!td_rw(td))
737                  *      - Or the io_u is from our verify list (mixed write/ver)
738                  */
739                 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
740                     ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
741
742                         if (!td->o.verify_pattern_bytes) {
743                                 io_u->rand_seed = __rand(&td->__verify_state);
744                                 if (sizeof(int) != sizeof(long *))
745                                         io_u->rand_seed *= __rand(&td->__verify_state);
746                         }
747
748                         if (td->o.verify_async)
749                                 io_u->end_io = verify_io_u_async;
750                         else
751                                 io_u->end_io = verify_io_u;
752                         td_set_runstate(td, TD_VERIFYING);
753                 } else if (in_ramp_time(td))
754                         td_set_runstate(td, TD_RAMP);
755                 else
756                         td_set_runstate(td, TD_RUNNING);
757
758                 /*
759                  * Always log IO before it's issued, so we know the specific
760                  * order of it. The logged unit will track when the IO has
761                  * completed.
762                  */
763                 if (td_write(td) && io_u->ddir == DDIR_WRITE &&
764                     td->o.do_verify &&
765                     td->o.verify != VERIFY_NONE &&
766                     !td->o.experimental_verify)
767                         log_io_piece(td, io_u);
768
769                 ret = td_io_queue(td, io_u);
770                 switch (ret) {
771                 case FIO_Q_COMPLETED:
772                         if (io_u->error) {
773                                 ret = -io_u->error;
774                                 clear_io_u(td, io_u);
775                         } else if (io_u->resid) {
776                                 int bytes = io_u->xfer_buflen - io_u->resid;
777                                 struct fio_file *f = io_u->file;
778
779                                 bytes_issued += bytes;
780                                 /*
781                                  * zero read, fail
782                                  */
783                                 if (!bytes) {
784                                         td_verror(td, EIO, "full resid");
785                                         put_io_u(td, io_u);
786                                         break;
787                                 }
788
789                                 io_u->xfer_buflen = io_u->resid;
790                                 io_u->xfer_buf += bytes;
791                                 io_u->offset += bytes;
792
793                                 if (ddir_rw(io_u->ddir))
794                                         td->ts.short_io_u[io_u->ddir]++;
795
796                                 if (io_u->offset == f->real_file_size)
797                                         goto sync_done;
798
799                                 requeue_io_u(td, &io_u);
800                         } else {
801 sync_done:
802                                 if (__should_check_rate(td, DDIR_READ) ||
803                                     __should_check_rate(td, DDIR_WRITE) ||
804                                     __should_check_rate(td, DDIR_TRIM))
805                                         fio_gettime(&comp_time, NULL);
806
807                                 ret = io_u_sync_complete(td, io_u, bytes_done);
808                                 if (ret < 0)
809                                         break;
810                                 bytes_issued += io_u->xfer_buflen;
811                         }
812                         break;
813                 case FIO_Q_QUEUED:
814                         /*
815                          * if the engine doesn't have a commit hook,
816                          * the io_u is really queued. if it does have such
817                          * a hook, it has to call io_u_queued() itself.
818                          */
819                         if (td->io_ops->commit == NULL)
820                                 io_u_queued(td, io_u);
821                         bytes_issued += io_u->xfer_buflen;
822                         break;
823                 case FIO_Q_BUSY:
824                         requeue_io_u(td, &io_u);
825                         ret2 = td_io_commit(td);
826                         if (ret2 < 0)
827                                 ret = ret2;
828                         break;
829                 default:
830                         assert(ret < 0);
831                         put_io_u(td, io_u);
832                         break;
833                 }
834
835                 if (break_on_this_error(td, ddir, &ret))
836                         break;
837
838                 /*
839                  * See if we need to complete some commands. Note that we
840                  * can get BUSY even without IO queued, if the system is
841                  * resource starved.
842                  */
843 reap:
844                 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
845                 if (full || !td->o.iodepth_batch_complete) {
846                         min_evts = min(td->o.iodepth_batch_complete,
847                                         td->cur_depth);
848                         /*
849                          * if the queue is full, we MUST reap at least 1 event
850                          */
851                         if (full && !min_evts)
852                                 min_evts = 1;
853
854                         if (__should_check_rate(td, DDIR_READ) ||
855                             __should_check_rate(td, DDIR_WRITE) ||
856                             __should_check_rate(td, DDIR_TRIM))
857                                 fio_gettime(&comp_time, NULL);
858
859                         do {
860                                 ret = io_u_queued_complete(td, min_evts, bytes_done);
861                                 if (ret < 0)
862                                         break;
863
864                         } while (full && (td->cur_depth > td->o.iodepth_low));
865                 }
866
867                 if (ret < 0)
868                         break;
869                 if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO))
870                         continue;
871
872                 if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) {
873                         if (check_min_rate(td, &comp_time, bytes_done)) {
874                                 if (exitall_on_terminate)
875                                         fio_terminate_threads(td->groupid);
876                                 td_verror(td, EIO, "check_min_rate");
877                                 break;
878                         }
879                 }
880                 if (!in_ramp_time(td) && td->o.latency_target)
881                         lat_target_check(td);
882
883                 if (td->o.thinktime) {
884                         unsigned long long b;
885
886                         b = ddir_rw_sum(td->io_blocks);
887                         if (!(b % td->o.thinktime_blocks)) {
888                                 int left;
889
890                                 io_u_quiesce(td);
891
892                                 if (td->o.thinktime_spin)
893                                         usec_spin(td->o.thinktime_spin);
894
895                                 left = td->o.thinktime - td->o.thinktime_spin;
896                                 if (left)
897                                         usec_sleep(td, left);
898                         }
899                 }
900         }
901
902         check_update_rusage(td);
903
904         if (td->trim_entries)
905                 log_err("fio: %lu trim entries leaked?\n", td->trim_entries);
906
907         if (td->o.fill_device && td->error == ENOSPC) {
908                 td->error = 0;
909                 td->terminate = 1;
910         }
911         if (!td->error) {
912                 struct fio_file *f;
913
914                 i = td->cur_depth;
915                 if (i) {
916                         ret = io_u_queued_complete(td, i, bytes_done);
917                         if (td->o.fill_device && td->error == ENOSPC)
918                                 td->error = 0;
919                 }
920
921                 if (should_fsync(td) && td->o.end_fsync) {
922                         td_set_runstate(td, TD_FSYNCING);
923
924                         for_each_file(td, f, i) {
925                                 if (!fio_file_fsync(td, f))
926                                         continue;
927
928                                 log_err("fio: end_fsync failed for file %s\n",
929                                                                 f->file_name);
930                         }
931                 }
932         } else
933                 cleanup_pending_aio(td);
934
935         /*
936          * stop job if we failed doing any IO
937          */
938         if (!ddir_rw_sum(td->this_io_bytes))
939                 td->done = 1;
940
941         return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
942 }
943
944 static void cleanup_io_u(struct thread_data *td)
945 {
946         struct io_u *io_u;
947
948         while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) {
949
950                 if (td->io_ops->io_u_free)
951                         td->io_ops->io_u_free(td, io_u);
952
953                 fio_memfree(io_u, sizeof(*io_u));
954         }
955
956         free_io_mem(td);
957
958         io_u_rexit(&td->io_u_requeues);
959         io_u_qexit(&td->io_u_freelist);
960         io_u_qexit(&td->io_u_all);
961 }
962
963 static int init_io_u(struct thread_data *td)
964 {
965         struct io_u *io_u;
966         unsigned int max_bs, min_write;
967         int cl_align, i, max_units;
968         int data_xfer = 1, err;
969         char *p;
970
971         max_units = td->o.iodepth;
972         max_bs = td_max_bs(td);
973         min_write = td->o.min_bs[DDIR_WRITE];
974         td->orig_buffer_size = (unsigned long long) max_bs
975                                         * (unsigned long long) max_units;
976
977         if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td)))
978                 data_xfer = 0;
979
980         err = 0;
981         err += io_u_rinit(&td->io_u_requeues, td->o.iodepth);
982         err += io_u_qinit(&td->io_u_freelist, td->o.iodepth);
983         err += io_u_qinit(&td->io_u_all, td->o.iodepth);
984
985         if (err) {
986                 log_err("fio: failed setting up IO queues\n");
987                 return 1;
988         }
989
990         /*
991          * if we may later need to do address alignment, then add any
992          * possible adjustment here so that we don't cause a buffer
993          * overflow later. this adjustment may be too much if we get
994          * lucky and the allocator gives us an aligned address.
995          */
996         if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
997             (td->io_ops->flags & FIO_RAWIO))
998                 td->orig_buffer_size += page_mask + td->o.mem_align;
999
1000         if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
1001                 unsigned long bs;
1002
1003                 bs = td->orig_buffer_size + td->o.hugepage_size - 1;
1004                 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
1005         }
1006
1007         if (td->orig_buffer_size != (size_t) td->orig_buffer_size) {
1008                 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
1009                 return 1;
1010         }
1011
1012         if (data_xfer && allocate_io_mem(td))
1013                 return 1;
1014
1015         if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
1016             (td->io_ops->flags & FIO_RAWIO))
1017                 p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align;
1018         else
1019                 p = td->orig_buffer;
1020
1021         cl_align = os_cache_line_size();
1022
1023         for (i = 0; i < max_units; i++) {
1024                 void *ptr;
1025
1026                 if (td->terminate)
1027                         return 1;
1028
1029                 ptr = fio_memalign(cl_align, sizeof(*io_u));
1030                 if (!ptr) {
1031                         log_err("fio: unable to allocate aligned memory\n");
1032                         break;
1033                 }
1034
1035                 io_u = ptr;
1036                 memset(io_u, 0, sizeof(*io_u));
1037                 INIT_FLIST_HEAD(&io_u->verify_list);
1038                 dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
1039
1040                 if (data_xfer) {
1041                         io_u->buf = p;
1042                         dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
1043
1044                         if (td_write(td))
1045                                 io_u_fill_buffer(td, io_u, min_write, max_bs);
1046                         if (td_write(td) && td->o.verify_pattern_bytes) {
1047                                 /*
1048                                  * Fill the buffer with the pattern if we are
1049                                  * going to be doing writes.
1050                                  */
1051                                 fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
1052                         }
1053                 }
1054
1055                 io_u->index = i;
1056                 io_u->flags = IO_U_F_FREE;
1057                 io_u_qpush(&td->io_u_freelist, io_u);
1058
1059                 /*
1060                  * io_u never leaves this stack, used for iteration of all
1061                  * io_u buffers.
1062                  */
1063                 io_u_qpush(&td->io_u_all, io_u);
1064
1065                 if (td->io_ops->io_u_init) {
1066                         int ret = td->io_ops->io_u_init(td, io_u);
1067
1068                         if (ret) {
1069                                 log_err("fio: failed to init engine data: %d\n", ret);
1070                                 return 1;
1071                         }
1072                 }
1073
1074                 p += max_bs;
1075         }
1076
1077         return 0;
1078 }
1079
1080 static int switch_ioscheduler(struct thread_data *td)
1081 {
1082         char tmp[256], tmp2[128];
1083         FILE *f;
1084         int ret;
1085
1086         if (td->io_ops->flags & FIO_DISKLESSIO)
1087                 return 0;
1088
1089         sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
1090
1091         f = fopen(tmp, "r+");
1092         if (!f) {
1093                 if (errno == ENOENT) {
1094                         log_err("fio: os or kernel doesn't support IO scheduler"
1095                                 " switching\n");
1096                         return 0;
1097                 }
1098                 td_verror(td, errno, "fopen iosched");
1099                 return 1;
1100         }
1101
1102         /*
1103          * Set io scheduler.
1104          */
1105         ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
1106         if (ferror(f) || ret != 1) {
1107                 td_verror(td, errno, "fwrite");
1108                 fclose(f);
1109                 return 1;
1110         }
1111
1112         rewind(f);
1113
1114         /*
1115          * Read back and check that the selected scheduler is now the default.
1116          */
1117         ret = fread(tmp, sizeof(tmp), 1, f);
1118         if (ferror(f) || ret < 0) {
1119                 td_verror(td, errno, "fread");
1120                 fclose(f);
1121                 return 1;
1122         }
1123         tmp[sizeof(tmp) - 1] = '\0';
1124
1125
1126         sprintf(tmp2, "[%s]", td->o.ioscheduler);
1127         if (!strstr(tmp, tmp2)) {
1128                 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
1129                 td_verror(td, EINVAL, "iosched_switch");
1130                 fclose(f);
1131                 return 1;
1132         }
1133
1134         fclose(f);
1135         return 0;
1136 }
1137
1138 static int keep_running(struct thread_data *td)
1139 {
1140         if (td->done)
1141                 return 0;
1142         if (td->o.time_based)
1143                 return 1;
1144         if (td->o.loops) {
1145                 td->o.loops--;
1146                 return 1;
1147         }
1148         if (exceeds_number_ios(td))
1149                 return 0;
1150
1151         if (td->o.size != -1ULL && ddir_rw_sum(td->io_bytes) < td->o.size) {
1152                 uint64_t diff;
1153
1154                 /*
1155                  * If the difference is less than the minimum IO size, we
1156                  * are done.
1157                  */
1158                 diff = td->o.size - ddir_rw_sum(td->io_bytes);
1159                 if (diff < td_max_bs(td))
1160                         return 0;
1161
1162                 if (fio_files_done(td))
1163                         return 0;
1164
1165                 return 1;
1166         }
1167
1168         return 0;
1169 }
1170
1171 static int exec_string(struct thread_options *o, const char *string, const char *mode)
1172 {
1173         int ret, newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
1174         char *str;
1175
1176         str = malloc(newlen);
1177         sprintf(str, "%s &> %s.%s.txt", string, o->name, mode);
1178
1179         log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode);
1180         ret = system(str);
1181         if (ret == -1)
1182                 log_err("fio: exec of cmd <%s> failed\n", str);
1183
1184         free(str);
1185         return ret;
1186 }
1187
1188 /*
1189  * Dry run to compute correct state of numberio for verification.
1190  */
1191 static uint64_t do_dry_run(struct thread_data *td)
1192 {
1193         uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
1194
1195         td_set_runstate(td, TD_RUNNING);
1196
1197         while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
1198                 (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) {
1199                 struct io_u *io_u;
1200                 int ret;
1201
1202                 if (td->terminate || td->done)
1203                         break;
1204
1205                 io_u = get_io_u(td);
1206                 if (!io_u)
1207                         break;
1208
1209                 io_u->flags |= IO_U_F_FLIGHT;
1210                 io_u->error = 0;
1211                 io_u->resid = 0;
1212                 if (ddir_rw(acct_ddir(io_u)))
1213                         td->io_issues[acct_ddir(io_u)]++;
1214                 if (ddir_rw(io_u->ddir)) {
1215                         io_u_mark_depth(td, 1);
1216                         td->ts.total_io_u[io_u->ddir]++;
1217                 }
1218
1219                 if (td_write(td) && io_u->ddir == DDIR_WRITE &&
1220                     td->o.do_verify &&
1221                     td->o.verify != VERIFY_NONE &&
1222                     !td->o.experimental_verify)
1223                         log_io_piece(td, io_u);
1224
1225                 ret = io_u_sync_complete(td, io_u, bytes_done);
1226                 (void) ret;
1227         }
1228
1229         return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
1230 }
1231
1232 /*
1233  * Entry point for the thread based jobs. The process based jobs end up
1234  * here as well, after a little setup.
1235  */
1236 static void *thread_main(void *data)
1237 {
1238         unsigned long long elapsed;
1239         struct thread_data *td = data;
1240         struct thread_options *o = &td->o;
1241         pthread_condattr_t attr;
1242         int clear_state;
1243         int ret;
1244
1245         if (!o->use_thread) {
1246                 setsid();
1247                 td->pid = getpid();
1248         } else
1249                 td->pid = gettid();
1250
1251         /*
1252          * fio_time_init() may not have been called yet if running as a server
1253          */
1254         fio_time_init();
1255
1256         fio_local_clock_init(o->use_thread);
1257
1258         dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
1259
1260         if (is_backend)
1261                 fio_server_send_start(td);
1262
1263         INIT_FLIST_HEAD(&td->io_log_list);
1264         INIT_FLIST_HEAD(&td->io_hist_list);
1265         INIT_FLIST_HEAD(&td->verify_list);
1266         INIT_FLIST_HEAD(&td->trim_list);
1267         INIT_FLIST_HEAD(&td->next_rand_list);
1268         pthread_mutex_init(&td->io_u_lock, NULL);
1269         td->io_hist_tree = RB_ROOT;
1270
1271         pthread_condattr_init(&attr);
1272         pthread_cond_init(&td->verify_cond, &attr);
1273         pthread_cond_init(&td->free_cond, &attr);
1274
1275         td_set_runstate(td, TD_INITIALIZED);
1276         dprint(FD_MUTEX, "up startup_mutex\n");
1277         fio_mutex_up(startup_mutex);
1278         dprint(FD_MUTEX, "wait on td->mutex\n");
1279         fio_mutex_down(td->mutex);
1280         dprint(FD_MUTEX, "done waiting on td->mutex\n");
1281
1282         /*
1283          * A new gid requires privilege, so we need to do this before setting
1284          * the uid.
1285          */
1286         if (o->gid != -1U && setgid(o->gid)) {
1287                 td_verror(td, errno, "setgid");
1288                 goto err;
1289         }
1290         if (o->uid != -1U && setuid(o->uid)) {
1291                 td_verror(td, errno, "setuid");
1292                 goto err;
1293         }
1294
1295         /*
1296          * If we have a gettimeofday() thread, make sure we exclude that
1297          * thread from this job
1298          */
1299         if (o->gtod_cpu)
1300                 fio_cpu_clear(&o->cpumask, o->gtod_cpu);
1301
1302         /*
1303          * Set affinity first, in case it has an impact on the memory
1304          * allocations.
1305          */
1306         if (o->cpumask_set) {
1307                 if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
1308                         ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
1309                         if (!ret) {
1310                                 log_err("fio: no CPUs set\n");
1311                                 log_err("fio: Try increasing number of available CPUs\n");
1312                                 td_verror(td, EINVAL, "cpus_split");
1313                                 goto err;
1314                         }
1315                 }
1316                 ret = fio_setaffinity(td->pid, o->cpumask);
1317                 if (ret == -1) {
1318                         td_verror(td, errno, "cpu_set_affinity");
1319                         goto err;
1320                 }
1321         }
1322
1323 #ifdef CONFIG_LIBNUMA
1324         /* numa node setup */
1325         if (o->numa_cpumask_set || o->numa_memmask_set) {
1326                 int ret;
1327
1328                 if (numa_available() < 0) {
1329                         td_verror(td, errno, "Does not support NUMA API\n");
1330                         goto err;
1331                 }
1332
1333                 if (o->numa_cpumask_set) {
1334                         ret = numa_run_on_node_mask(o->numa_cpunodesmask);
1335                         if (ret == -1) {
1336                                 td_verror(td, errno, \
1337                                         "numa_run_on_node_mask failed\n");
1338                                 goto err;
1339                         }
1340                 }
1341
1342                 if (o->numa_memmask_set) {
1343
1344                         switch (o->numa_mem_mode) {
1345                         case MPOL_INTERLEAVE:
1346                                 numa_set_interleave_mask(o->numa_memnodesmask);
1347                                 break;
1348                         case MPOL_BIND:
1349                                 numa_set_membind(o->numa_memnodesmask);
1350                                 break;
1351                         case MPOL_LOCAL:
1352                                 numa_set_localalloc();
1353                                 break;
1354                         case MPOL_PREFERRED:
1355                                 numa_set_preferred(o->numa_mem_prefer_node);
1356                                 break;
1357                         case MPOL_DEFAULT:
1358                         default:
1359                                 break;
1360                         }
1361
1362                 }
1363         }
1364 #endif
1365
1366         if (fio_pin_memory(td))
1367                 goto err;
1368
1369         /*
1370          * May alter parameters that init_io_u() will use, so we need to
1371          * do this first.
1372          */
1373         if (init_iolog(td))
1374                 goto err;
1375
1376         if (init_io_u(td))
1377                 goto err;
1378
1379         if (o->verify_async && verify_async_init(td))
1380                 goto err;
1381
1382         if (o->ioprio) {
1383                 ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio);
1384                 if (ret == -1) {
1385                         td_verror(td, errno, "ioprio_set");
1386                         goto err;
1387                 }
1388         }
1389
1390         if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt))
1391                 goto err;
1392
1393         errno = 0;
1394         if (nice(o->nice) == -1 && errno != 0) {
1395                 td_verror(td, errno, "nice");
1396                 goto err;
1397         }
1398
1399         if (o->ioscheduler && switch_ioscheduler(td))
1400                 goto err;
1401
1402         if (!o->create_serialize && setup_files(td))
1403                 goto err;
1404
1405         if (td_io_init(td))
1406                 goto err;
1407
1408         if (init_random_map(td))
1409                 goto err;
1410
1411         if (o->exec_prerun && exec_string(o, o->exec_prerun, (const char *)"prerun"))
1412                 goto err;
1413
1414         if (o->pre_read) {
1415                 if (pre_read_files(td) < 0)
1416                         goto err;
1417         }
1418
1419         fio_verify_init(td);
1420
1421         fio_gettime(&td->epoch, NULL);
1422         fio_getrusage(&td->ru_start);
1423         clear_state = 0;
1424         while (keep_running(td)) {
1425                 uint64_t verify_bytes;
1426
1427                 fio_gettime(&td->start, NULL);
1428                 memcpy(&td->bw_sample_time, &td->start, sizeof(td->start));
1429                 memcpy(&td->iops_sample_time, &td->start, sizeof(td->start));
1430                 memcpy(&td->tv_cache, &td->start, sizeof(td->start));
1431
1432                 if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] ||
1433                                 o->ratemin[DDIR_TRIM]) {
1434                         memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time,
1435                                                 sizeof(td->bw_sample_time));
1436                         memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time,
1437                                                 sizeof(td->bw_sample_time));
1438                         memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time,
1439                                                 sizeof(td->bw_sample_time));
1440                 }
1441
1442                 if (clear_state)
1443                         clear_io_state(td);
1444
1445                 prune_io_piece_log(td);
1446
1447                 if (td->o.verify_only && (td_write(td) || td_rw(td)))
1448                         verify_bytes = do_dry_run(td);
1449                 else
1450                         verify_bytes = do_io(td);
1451
1452                 clear_state = 1;
1453
1454                 if (td_read(td) && td->io_bytes[DDIR_READ]) {
1455                         elapsed = utime_since_now(&td->start);
1456                         td->ts.runtime[DDIR_READ] += elapsed;
1457                 }
1458                 if (td_write(td) && td->io_bytes[DDIR_WRITE]) {
1459                         elapsed = utime_since_now(&td->start);
1460                         td->ts.runtime[DDIR_WRITE] += elapsed;
1461                 }
1462                 if (td_trim(td) && td->io_bytes[DDIR_TRIM]) {
1463                         elapsed = utime_since_now(&td->start);
1464                         td->ts.runtime[DDIR_TRIM] += elapsed;
1465                 }
1466
1467                 if (td->error || td->terminate)
1468                         break;
1469
1470                 if (!o->do_verify ||
1471                     o->verify == VERIFY_NONE ||
1472                     (td->io_ops->flags & FIO_UNIDIR))
1473                         continue;
1474
1475                 clear_io_state(td);
1476
1477                 fio_gettime(&td->start, NULL);
1478
1479                 do_verify(td, verify_bytes);
1480
1481                 td->ts.runtime[DDIR_READ] += utime_since_now(&td->start);
1482
1483                 if (td->error || td->terminate)
1484                         break;
1485         }
1486
1487         update_rusage_stat(td);
1488         td->ts.runtime[DDIR_READ] = (td->ts.runtime[DDIR_READ] + 999) / 1000;
1489         td->ts.runtime[DDIR_WRITE] = (td->ts.runtime[DDIR_WRITE] + 999) / 1000;
1490         td->ts.runtime[DDIR_TRIM] = (td->ts.runtime[DDIR_TRIM] + 999) / 1000;
1491         td->ts.total_run_time = mtime_since_now(&td->epoch);
1492         td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1493         td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1494         td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1495
1496         fio_unpin_memory(td);
1497
1498         fio_writeout_logs(td);
1499
1500         if (o->exec_postrun)
1501                 exec_string(o, o->exec_postrun, (const char *)"postrun");
1502
1503         if (exitall_on_terminate)
1504                 fio_terminate_threads(td->groupid);
1505
1506 err:
1507         if (td->error)
1508                 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error,
1509                                                         td->verror);
1510
1511         if (o->verify_async)
1512                 verify_async_exit(td);
1513
1514         close_and_free_files(td);
1515         cleanup_io_u(td);
1516         close_ioengine(td);
1517         cgroup_shutdown(td, &cgroup_mnt);
1518
1519         if (o->cpumask_set) {
1520                 int ret = fio_cpuset_exit(&o->cpumask);
1521
1522                 td_verror(td, ret, "fio_cpuset_exit");
1523         }
1524
1525         /*
1526          * do this very late, it will log file closing as well
1527          */
1528         if (o->write_iolog_file)
1529                 write_iolog_close(td);
1530
1531         fio_mutex_remove(td->rusage_sem);
1532         td->rusage_sem = NULL;
1533
1534         fio_mutex_remove(td->mutex);
1535         td->mutex = NULL;
1536
1537         td_set_runstate(td, TD_EXITED);
1538         return (void *) (uintptr_t) td->error;
1539 }
1540
1541
1542 /*
1543  * We cannot pass the td data into a forked process, so attach the td and
1544  * pass it to the thread worker.
1545  */
1546 static int fork_main(int shmid, int offset)
1547 {
1548         struct thread_data *td;
1549         void *data, *ret;
1550
1551 #ifndef __hpux
1552         data = shmat(shmid, NULL, 0);
1553         if (data == (void *) -1) {
1554                 int __err = errno;
1555
1556                 perror("shmat");
1557                 return __err;
1558         }
1559 #else
1560         /*
1561          * HP-UX inherits shm mappings?
1562          */
1563         data = threads;
1564 #endif
1565
1566         td = data + offset * sizeof(struct thread_data);
1567         ret = thread_main(td);
1568         shmdt(data);
1569         return (int) (uintptr_t) ret;
1570 }
1571
1572 /*
1573  * Run over the job map and reap the threads that have exited, if any.
1574  */
1575 static void reap_threads(unsigned int *nr_running, unsigned int *t_rate,
1576                          unsigned int *m_rate)
1577 {
1578         struct thread_data *td;
1579         unsigned int cputhreads, realthreads, pending;
1580         int i, status, ret;
1581
1582         /*
1583          * reap exited threads (TD_EXITED -> TD_REAPED)
1584          */
1585         realthreads = pending = cputhreads = 0;
1586         for_each_td(td, i) {
1587                 int flags = 0;
1588
1589                 /*
1590                  * ->io_ops is NULL for a thread that has closed its
1591                  * io engine
1592                  */
1593                 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
1594                         cputhreads++;
1595                 else
1596                         realthreads++;
1597
1598                 if (!td->pid) {
1599                         pending++;
1600                         continue;
1601                 }
1602                 if (td->runstate == TD_REAPED)
1603                         continue;
1604                 if (td->o.use_thread) {
1605                         if (td->runstate == TD_EXITED) {
1606                                 td_set_runstate(td, TD_REAPED);
1607                                 goto reaped;
1608                         }
1609                         continue;
1610                 }
1611
1612                 flags = WNOHANG;
1613                 if (td->runstate == TD_EXITED)
1614                         flags = 0;
1615
1616                 /*
1617                  * check if someone quit or got killed in an unusual way
1618                  */
1619                 ret = waitpid(td->pid, &status, flags);
1620                 if (ret < 0) {
1621                         if (errno == ECHILD) {
1622                                 log_err("fio: pid=%d disappeared %d\n",
1623                                                 (int) td->pid, td->runstate);
1624                                 td->sig = ECHILD;
1625                                 td_set_runstate(td, TD_REAPED);
1626                                 goto reaped;
1627                         }
1628                         perror("waitpid");
1629                 } else if (ret == td->pid) {
1630                         if (WIFSIGNALED(status)) {
1631                                 int sig = WTERMSIG(status);
1632
1633                                 if (sig != SIGTERM && sig != SIGUSR2)
1634                                         log_err("fio: pid=%d, got signal=%d\n",
1635                                                         (int) td->pid, sig);
1636                                 td->sig = sig;
1637                                 td_set_runstate(td, TD_REAPED);
1638                                 goto reaped;
1639                         }
1640                         if (WIFEXITED(status)) {
1641                                 if (WEXITSTATUS(status) && !td->error)
1642                                         td->error = WEXITSTATUS(status);
1643
1644                                 td_set_runstate(td, TD_REAPED);
1645                                 goto reaped;
1646                         }
1647                 }
1648
1649                 /*
1650                  * thread is not dead, continue
1651                  */
1652                 pending++;
1653                 continue;
1654 reaped:
1655                 (*nr_running)--;
1656                 (*m_rate) -= ddir_rw_sum(td->o.ratemin);
1657                 (*t_rate) -= ddir_rw_sum(td->o.rate);
1658                 if (!td->pid)
1659                         pending--;
1660
1661                 if (td->error)
1662                         exit_value++;
1663
1664                 done_secs += mtime_since_now(&td->epoch) / 1000;
1665                 profile_td_exit(td);
1666         }
1667
1668         if (*nr_running == cputhreads && !pending && realthreads)
1669                 fio_terminate_threads(TERMINATE_ALL);
1670 }
1671
1672 static void do_usleep(unsigned int usecs)
1673 {
1674         check_for_running_stats();
1675         usleep(usecs);
1676 }
1677
1678 /*
1679  * Main function for kicking off and reaping jobs, as needed.
1680  */
1681 static void run_threads(void)
1682 {
1683         struct thread_data *td;
1684         unsigned int i, todo, nr_running, m_rate, t_rate, nr_started;
1685         uint64_t spent;
1686
1687         if (fio_gtod_offload && fio_start_gtod_thread())
1688                 return;
1689
1690         fio_idle_prof_init();
1691
1692         set_sig_handlers();
1693
1694         nr_thread = nr_process = 0;
1695         for_each_td(td, i) {
1696                 if (td->o.use_thread)
1697                         nr_thread++;
1698                 else
1699                         nr_process++;
1700         }
1701
1702         if (output_format == FIO_OUTPUT_NORMAL) {
1703                 log_info("Starting ");
1704                 if (nr_thread)
1705                         log_info("%d thread%s", nr_thread,
1706                                                 nr_thread > 1 ? "s" : "");
1707                 if (nr_process) {
1708                         if (nr_thread)
1709                                 log_info(" and ");
1710                         log_info("%d process%s", nr_process,
1711                                                 nr_process > 1 ? "es" : "");
1712                 }
1713                 log_info("\n");
1714                 fflush(stdout);
1715         }
1716
1717         todo = thread_number;
1718         nr_running = 0;
1719         nr_started = 0;
1720         m_rate = t_rate = 0;
1721
1722         for_each_td(td, i) {
1723                 print_status_init(td->thread_number - 1);
1724
1725                 if (!td->o.create_serialize)
1726                         continue;
1727
1728                 /*
1729                  * do file setup here so it happens sequentially,
1730                  * we don't want X number of threads getting their
1731                  * client data interspersed on disk
1732                  */
1733                 if (setup_files(td)) {
1734                         exit_value++;
1735                         if (td->error)
1736                                 log_err("fio: pid=%d, err=%d/%s\n",
1737                                         (int) td->pid, td->error, td->verror);
1738                         td_set_runstate(td, TD_REAPED);
1739                         todo--;
1740                 } else {
1741                         struct fio_file *f;
1742                         unsigned int j;
1743
1744                         /*
1745                          * for sharing to work, each job must always open
1746                          * its own files. so close them, if we opened them
1747                          * for creation
1748                          */
1749                         for_each_file(td, f, j) {
1750                                 if (fio_file_open(f))
1751                                         td_io_close_file(td, f);
1752                         }
1753                 }
1754         }
1755
1756         /* start idle threads before io threads start to run */
1757         fio_idle_prof_start();
1758
1759         set_genesis_time();
1760
1761         while (todo) {
1762                 struct thread_data *map[REAL_MAX_JOBS];
1763                 struct timeval this_start;
1764                 int this_jobs = 0, left;
1765
1766                 /*
1767                  * create threads (TD_NOT_CREATED -> TD_CREATED)
1768                  */
1769                 for_each_td(td, i) {
1770                         if (td->runstate != TD_NOT_CREATED)
1771                                 continue;
1772
1773                         /*
1774                          * never got a chance to start, killed by other
1775                          * thread for some reason
1776                          */
1777                         if (td->terminate) {
1778                                 todo--;
1779                                 continue;
1780                         }
1781
1782                         if (td->o.start_delay) {
1783                                 spent = utime_since_genesis();
1784
1785                                 if (td->o.start_delay > spent)
1786                                         continue;
1787                         }
1788
1789                         if (td->o.stonewall && (nr_started || nr_running)) {
1790                                 dprint(FD_PROCESS, "%s: stonewall wait\n",
1791                                                         td->o.name);
1792                                 break;
1793                         }
1794
1795                         init_disk_util(td);
1796
1797                         td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED);
1798                         td->update_rusage = 0;
1799
1800                         /*
1801                          * Set state to created. Thread will transition
1802                          * to TD_INITIALIZED when it's done setting up.
1803                          */
1804                         td_set_runstate(td, TD_CREATED);
1805                         map[this_jobs++] = td;
1806                         nr_started++;
1807
1808                         if (td->o.use_thread) {
1809                                 int ret;
1810
1811                                 dprint(FD_PROCESS, "will pthread_create\n");
1812                                 ret = pthread_create(&td->thread, NULL,
1813                                                         thread_main, td);
1814                                 if (ret) {
1815                                         log_err("pthread_create: %s\n",
1816                                                         strerror(ret));
1817                                         nr_started--;
1818                                         break;
1819                                 }
1820                                 ret = pthread_detach(td->thread);
1821                                 if (ret)
1822                                         log_err("pthread_detach: %s",
1823                                                         strerror(ret));
1824                         } else {
1825                                 pid_t pid;
1826                                 dprint(FD_PROCESS, "will fork\n");
1827                                 pid = fork();
1828                                 if (!pid) {
1829                                         int ret = fork_main(shm_id, i);
1830
1831                                         _exit(ret);
1832                                 } else if (i == fio_debug_jobno)
1833                                         *fio_debug_jobp = pid;
1834                         }
1835                         dprint(FD_MUTEX, "wait on startup_mutex\n");
1836                         if (fio_mutex_down_timeout(startup_mutex, 10)) {
1837                                 log_err("fio: job startup hung? exiting.\n");
1838                                 fio_terminate_threads(TERMINATE_ALL);
1839                                 fio_abort = 1;
1840                                 nr_started--;
1841                                 break;
1842                         }
1843                         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
1844                 }
1845
1846                 /*
1847                  * Wait for the started threads to transition to
1848                  * TD_INITIALIZED.
1849                  */
1850                 fio_gettime(&this_start, NULL);
1851                 left = this_jobs;
1852                 while (left && !fio_abort) {
1853                         if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
1854                                 break;
1855
1856                         do_usleep(100000);
1857
1858                         for (i = 0; i < this_jobs; i++) {
1859                                 td = map[i];
1860                                 if (!td)
1861                                         continue;
1862                                 if (td->runstate == TD_INITIALIZED) {
1863                                         map[i] = NULL;
1864                                         left--;
1865                                 } else if (td->runstate >= TD_EXITED) {
1866                                         map[i] = NULL;
1867                                         left--;
1868                                         todo--;
1869                                         nr_running++; /* work-around... */
1870                                 }
1871                         }
1872                 }
1873
1874                 if (left) {
1875                         log_err("fio: %d job%s failed to start\n", left,
1876                                         left > 1 ? "s" : "");
1877                         for (i = 0; i < this_jobs; i++) {
1878                                 td = map[i];
1879                                 if (!td)
1880                                         continue;
1881                                 kill(td->pid, SIGTERM);
1882                         }
1883                         break;
1884                 }
1885
1886                 /*
1887                  * start created threads (TD_INITIALIZED -> TD_RUNNING).
1888                  */
1889                 for_each_td(td, i) {
1890                         if (td->runstate != TD_INITIALIZED)
1891                                 continue;
1892
1893                         if (in_ramp_time(td))
1894                                 td_set_runstate(td, TD_RAMP);
1895                         else
1896                                 td_set_runstate(td, TD_RUNNING);
1897                         nr_running++;
1898                         nr_started--;
1899                         m_rate += ddir_rw_sum(td->o.ratemin);
1900                         t_rate += ddir_rw_sum(td->o.rate);
1901                         todo--;
1902                         fio_mutex_up(td->mutex);
1903                 }
1904
1905                 reap_threads(&nr_running, &t_rate, &m_rate);
1906
1907                 if (todo)
1908                         do_usleep(100000);
1909         }
1910
1911         while (nr_running) {
1912                 reap_threads(&nr_running, &t_rate, &m_rate);
1913                 do_usleep(10000);
1914         }
1915
1916         fio_idle_prof_stop();
1917
1918         update_io_ticks();
1919 }
1920
1921 void wait_for_disk_thread_exit(void)
1922 {
1923         fio_mutex_down(disk_thread_mutex);
1924 }
1925
1926 static void free_disk_util(void)
1927 {
1928         disk_util_start_exit();
1929         wait_for_disk_thread_exit();
1930         disk_util_prune_entries();
1931 }
1932
1933 static void *disk_thread_main(void *data)
1934 {
1935         int ret = 0;
1936
1937         fio_mutex_up(startup_mutex);
1938
1939         while (threads && !ret) {
1940                 usleep(DISK_UTIL_MSEC * 1000);
1941                 if (!threads)
1942                         break;
1943                 ret = update_io_ticks();
1944
1945                 if (!is_backend)
1946                         print_thread_status();
1947         }
1948
1949         fio_mutex_up(disk_thread_mutex);
1950         return NULL;
1951 }
1952
1953 static int create_disk_util_thread(void)
1954 {
1955         int ret;
1956
1957         setup_disk_util();
1958
1959         disk_thread_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
1960
1961         ret = pthread_create(&disk_util_thread, NULL, disk_thread_main, NULL);
1962         if (ret) {
1963                 fio_mutex_remove(disk_thread_mutex);
1964                 log_err("Can't create disk util thread: %s\n", strerror(ret));
1965                 return 1;
1966         }
1967
1968         ret = pthread_detach(disk_util_thread);
1969         if (ret) {
1970                 fio_mutex_remove(disk_thread_mutex);
1971                 log_err("Can't detatch disk util thread: %s\n", strerror(ret));
1972                 return 1;
1973         }
1974
1975         dprint(FD_MUTEX, "wait on startup_mutex\n");
1976         fio_mutex_down(startup_mutex);
1977         dprint(FD_MUTEX, "done waiting on startup_mutex\n");
1978         return 0;
1979 }
1980
1981 int fio_backend(void)
1982 {
1983         struct thread_data *td;
1984         int i;
1985
1986         if (exec_profile) {
1987                 if (load_profile(exec_profile))
1988                         return 1;
1989                 free(exec_profile);
1990                 exec_profile = NULL;
1991         }
1992         if (!thread_number)
1993                 return 0;
1994
1995         if (write_bw_log) {
1996                 setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW);
1997                 setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW);
1998                 setup_log(&agg_io_log[DDIR_TRIM], 0, IO_LOG_TYPE_BW);
1999         }
2000
2001         startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
2002         if (startup_mutex == NULL)
2003                 return 1;
2004
2005         set_genesis_time();
2006         stat_init();
2007         create_disk_util_thread();
2008
2009         cgroup_list = smalloc(sizeof(*cgroup_list));
2010         INIT_FLIST_HEAD(cgroup_list);
2011
2012         run_threads();
2013
2014         if (!fio_abort) {
2015                 show_run_stats();
2016                 if (write_bw_log) {
2017                         __finish_log(agg_io_log[DDIR_READ], "agg-read_bw.log");
2018                         __finish_log(agg_io_log[DDIR_WRITE],
2019                                         "agg-write_bw.log");
2020                         __finish_log(agg_io_log[DDIR_TRIM],
2021                                         "agg-write_bw.log");
2022                 }
2023         }
2024
2025         for_each_td(td, i)
2026                 fio_options_free(td);
2027
2028         free_disk_util();
2029         cgroup_kill(cgroup_list);
2030         sfree(cgroup_list);
2031         sfree(cgroup_mnt);
2032
2033         fio_mutex_remove(startup_mutex);
2034         fio_mutex_remove(disk_thread_mutex);
2035         stat_exit();
2036         return exit_value;
2037 }