Still need to put io_u on early exit
[fio.git] / fio.c
CommitLineData
ebac4655
JA
1/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
aae22ca7 5 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
ebac4655 6 *
8e9fe637
JA
7 * The license below covers all files distributed with fio unless otherwise
8 * noted in the file itself.
9 *
ebac4655 10 * This program is free software; you can redistribute it and/or modify
8e9fe637
JA
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
ebac4655
JA
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
ebac4655
JA
24#include <unistd.h>
25#include <fcntl.h>
26#include <string.h>
ebac4655
JA
27#include <signal.h>
28#include <time.h>
dbe1125e 29#include <locale.h>
36167d82 30#include <assert.h>
ebac4655
JA
31#include <sys/stat.h>
32#include <sys/wait.h>
33#include <sys/ipc.h>
34#include <sys/shm.h>
ebac4655
JA
35#include <sys/mman.h>
36
37#include "fio.h"
38#include "os.h"
39
29d610e1
JA
40static unsigned long page_mask;
41#define ALIGN(buf) \
42 (char *) (((unsigned long) (buf) + page_mask) & ~page_mask)
ebac4655
JA
43
44int groupid = 0;
45int thread_number = 0;
ebac4655 46int shm_id = 0;
53cdc686 47int temp_stall_ts;
ebac4655 48
bbfd6b00 49static volatile int startup_sem;
6ce15a32 50static volatile int fio_abort;
437c9b71 51static int exit_value;
ebac4655 52
bb3884d8
JA
53struct io_log *agg_io_log[2];
54
ebac4655 55#define TERMINATE_ALL (-1)
75154845 56#define JOB_START_TIMEOUT (5 * 1000)
ebac4655 57
6ce15a32
JA
58static inline void td_set_runstate(struct thread_data *td, int runstate)
59{
60 td->runstate = runstate;
61}
62
63static void terminate_threads(int group_id, int forced_kill)
ebac4655 64{
34572e28 65 struct thread_data *td;
ebac4655
JA
66 int i;
67
34572e28 68 for_each_td(td, i) {
ebac4655
JA
69 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
70 td->terminate = 1;
71 td->start_delay = 0;
6ce15a32
JA
72 if (forced_kill)
73 td_set_runstate(td, TD_EXITED);
ebac4655
JA
74 }
75 }
76}
77
78static void sig_handler(int sig)
79{
80 switch (sig) {
81 case SIGALRM:
82 update_io_ticks();
83 disk_util_timer_arm();
84 print_thread_status();
85 break;
86 default:
6ce15a32 87 printf("\nfio: terminating on signal %d\n", sig);
ebac4655 88 fflush(stdout);
6ce15a32 89 terminate_threads(TERMINATE_ALL, 0);
ebac4655
JA
90 break;
91 }
92}
93
906c8d75
JA
94/*
95 * Check if we are above the minimum rate given.
96 */
ebac4655
JA
97static int check_min_rate(struct thread_data *td, struct timeval *now)
98{
99 unsigned long spent;
100 unsigned long rate;
101 int ddir = td->ddir;
102
103 /*
104 * allow a 2 second settle period in the beginning
105 */
106 if (mtime_since(&td->start, now) < 2000)
107 return 0;
108
109 /*
110 * if rate blocks is set, sample is running
111 */
112 if (td->rate_bytes) {
113 spent = mtime_since(&td->lastrate, now);
114 if (spent < td->ratecycle)
115 return 0;
116
117 rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
118 if (rate < td->ratemin) {
1e97cce9 119 fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
ebac4655
JA
120 return 1;
121 }
122 }
123
124 td->rate_bytes = td->this_io_bytes[ddir];
125 memcpy(&td->lastrate, now, sizeof(*now));
126 return 0;
127}
128
129static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
130{
131 if (!td->timeout)
132 return 0;
133 if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
134 return 1;
135
136 return 0;
137}
138
906c8d75
JA
139/*
140 * When job exits, we can cancel the in-flight IO if we are using async
141 * io. Attempt to do so.
142 */
ebac4655
JA
143static void cleanup_pending_aio(struct thread_data *td)
144{
ebac4655 145 struct list_head *entry, *n;
ebac4655
JA
146 struct io_u *io_u;
147 int r;
148
149 /*
150 * get immediately available events, if any
151 */
b2fdda43
JA
152 r = io_u_queued_complete(td, 0, NULL);
153 if (r < 0)
154 return;
ebac4655
JA
155
156 /*
157 * now cancel remaining active events
158 */
2866c82d 159 if (td->io_ops->cancel) {
ebac4655
JA
160 list_for_each_safe(entry, n, &td->io_u_busylist) {
161 io_u = list_entry(entry, struct io_u, list);
162
2866c82d 163 r = td->io_ops->cancel(td, io_u);
ebac4655
JA
164 if (!r)
165 put_io_u(td, io_u);
166 }
167 }
168
97601024 169 if (td->cur_depth)
b2fdda43 170 r = io_u_queued_complete(td, td->cur_depth, NULL);
ebac4655
JA
171}
172
858a3d47
JA
173/*
174 * Helper to handle the final sync of a file. Works just like the normal
175 * io path, just does everything sync.
176 */
177static int fio_io_sync(struct thread_data *td, struct fio_file *f)
178{
179 struct io_u *io_u = __get_io_u(td);
858a3d47
JA
180 int ret;
181
182 if (!io_u)
183 return 1;
184
185 io_u->ddir = DDIR_SYNC;
186 io_u->file = f;
187
188 if (td_io_prep(td, io_u)) {
189 put_io_u(td, io_u);
190 return 1;
191 }
192
755200a3 193requeue:
858a3d47 194 ret = td_io_queue(td, io_u);
36167d82 195 if (ret < 0) {
353a7e0e 196 td_verror(td, io_u->error);
858a3d47 197 put_io_u(td, io_u);
858a3d47 198 return 1;
36167d82 199 } else if (ret == FIO_Q_QUEUED) {
49db69aa 200 if (io_u_queued_complete(td, 1, NULL) < 0)
36167d82 201 return 1;
36167d82
JA
202 } else if (ret == FIO_Q_COMPLETED) {
203 if (io_u->error) {
204 td_verror(td, io_u->error);
205 return 1;
206 }
858a3d47 207
b2fdda43
JA
208 if (io_u_sync_complete(td, io_u, NULL) < 0)
209 return 1;
755200a3
JA
210 } else if (ret == FIO_Q_BUSY) {
211 if (td_io_commit(td))
212 return 1;
213 goto requeue;
858a3d47
JA
214 }
215
216 return 0;
217}
218
906c8d75
JA
219/*
220 * The main verify engine. Runs over the writes we previusly submitted,
221 * reads the blocks back in, and checks the crc/md5 of the data.
222 */
1e97cce9 223static void do_verify(struct thread_data *td)
ebac4655 224{
53cdc686 225 struct fio_file *f;
36167d82 226 struct io_u *io_u;
3af6ef39 227 int ret, i, min_events;
e5b401d4
JA
228
229 /*
230 * sync io first and invalidate cache, to make sure we really
231 * read from disk.
232 */
233 for_each_file(td, f, i) {
b2fdda43
JA
234 if (fio_io_sync(td, f))
235 break;
236 if (file_invalidate_cache(td, f))
237 break;
e5b401d4 238 }
ebac4655 239
b2fdda43
JA
240 if (td->error)
241 return;
242
ebac4655
JA
243 td_set_runstate(td, TD_VERIFYING);
244
36167d82
JA
245 io_u = NULL;
246 while (!td->terminate) {
ebac4655
JA
247 io_u = __get_io_u(td);
248 if (!io_u)
249 break;
250
069c2918
JA
251 if (runtime_exceeded(td, &io_u->start_time)) {
252 put_io_u(td, io_u);
02bcaa8c 253 break;
069c2918 254 }
02bcaa8c 255
069c2918
JA
256 if (get_next_verify(td, io_u)) {
257 put_io_u(td, io_u);
ebac4655 258 break;
069c2918 259 }
ebac4655 260
069c2918
JA
261 if (td_io_prep(td, io_u)) {
262 put_io_u(td, io_u);
53cdc686 263 break;
069c2918 264 }
36167d82
JA
265requeue:
266 ret = td_io_queue(td, io_u);
53cdc686 267
36167d82
JA
268 switch (ret) {
269 case FIO_Q_COMPLETED:
270 if (io_u->error)
22819ec2 271 ret = -io_u->error;
36167d82
JA
272 if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
273 int bytes = io_u->xfer_buflen - io_u->resid;
ebac4655 274
36167d82
JA
275 io_u->xfer_buflen = io_u->resid;
276 io_u->xfer_buf += bytes;
277 goto requeue;
278 }
99784632 279 ret = io_u_sync_complete(td, io_u, verify_io_u);
49db69aa 280 if (ret < 0)
36167d82 281 break;
36167d82
JA
282 continue;
283 case FIO_Q_QUEUED:
284 break;
755200a3
JA
285 case FIO_Q_BUSY:
286 requeue_io_u(td, &io_u);
287 ret = td_io_commit(td);
288 break;
36167d82
JA
289 default:
290 assert(ret < 0);
22819ec2 291 td_verror(td, -ret);
ebac4655
JA
292 break;
293 }
294
99784632 295 if (ret < 0 || td->error)
3af6ef39
JA
296 break;
297
ebac4655 298 /*
3af6ef39
JA
299 * if we can queue more, do so. but check if there are
300 * completed io_u's first.
ebac4655 301 */
97601024 302 min_events = 0;
e916b390 303 if (queue_full(td) || ret == FIO_Q_BUSY) {
3af6ef39 304 min_events = 1;
3af6ef39 305
e916b390
JA
306 if (td->cur_depth > td->iodepth_low)
307 min_events = td->cur_depth - td->iodepth_low;
308 }
309
3af6ef39
JA
310 /*
311 * Reap required number of io units, if any, and do the
312 * verification on them through the callback handler
313 */
49db69aa 314 if (io_u_queued_complete(td, min_events, verify_io_u) < 0)
ebac4655 315 break;
36167d82 316 }
ebac4655 317
ebac4655
JA
318 if (td->cur_depth)
319 cleanup_pending_aio(td);
320
321 td_set_runstate(td, TD_RUNNING);
322}
323
b990b5c0
JA
324/*
325 * Not really an io thread, all it does is burn CPU cycles in the specified
326 * manner.
327 */
328static void do_cpuio(struct thread_data *td)
329{
330 struct timeval e;
331 int split = 100 / td->cpuload;
332 int i = 0;
333
334 while (!td->terminate) {
02bcaa8c 335 fio_gettime(&e, NULL);
b990b5c0
JA
336
337 if (runtime_exceeded(td, &e))
338 break;
339
340 if (!(i % split))
341 __usec_sleep(10000);
342 else
343 usec_sleep(td, 10000);
344
345 i++;
346 }
347}
348
32cd46a0 349/*
906c8d75 350 * Main IO worker function. It retrieves io_u's to process and queues
32cd46a0
JA
351 * and reaps them, checking for rate and errors along the way.
352 */
ebac4655
JA
353static void do_io(struct thread_data *td)
354{
02bcaa8c 355 struct timeval s;
ebac4655 356 unsigned long usec;
84585003 357 int i, ret = 0;
ebac4655 358
5853e5a8
JA
359 td_set_runstate(td, TD_RUNNING);
360
3951414d 361 while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) {
97601024
JA
362 struct timeval comp_time;
363 long bytes_done = 0;
84585003 364 int min_evts = 0;
ebac4655
JA
365 struct io_u *io_u;
366
367 if (td->terminate)
368 break;
369
3d7c391d 370 io_u = get_io_u(td);
ebac4655
JA
371 if (!io_u)
372 break;
373
374 memcpy(&s, &io_u->start_time, sizeof(s));
97601024
JA
375
376 if (runtime_exceeded(td, &s)) {
377 put_io_u(td, io_u);
378 break;
379 }
cec6b55d 380requeue:
45bee283 381 ret = td_io_queue(td, io_u);
36167d82
JA
382
383 switch (ret) {
384 case FIO_Q_COMPLETED:
385 if (io_u->error) {
386 ret = io_u->error;
387 break;
388 }
389 if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
390 int bytes = io_u->xfer_buflen - io_u->resid;
391
cec6b55d 392 io_u->xfer_buflen = io_u->resid;
36167d82 393 io_u->xfer_buf += bytes;
cec6b55d 394 goto requeue;
cec6b55d 395 }
97601024
JA
396 fio_gettime(&comp_time, NULL);
397 bytes_done = io_u_sync_complete(td, io_u, NULL);
99784632
JA
398 if (bytes_done < 0)
399 ret = bytes_done;
36167d82
JA
400 break;
401 case FIO_Q_QUEUED:
7e77dd02
JA
402 /*
403 * if the engine doesn't have a commit hook,
404 * the io_u is really queued. if it does have such
405 * a hook, it has to call io_u_queued() itself.
406 */
407 if (td->io_ops->commit == NULL)
408 io_u_queued(td, io_u);
36167d82 409 break;
755200a3
JA
410 case FIO_Q_BUSY:
411 requeue_io_u(td, &io_u);
412 ret = td_io_commit(td);
413 break;
36167d82
JA
414 default:
415 assert(ret < 0);
416 put_io_u(td, io_u);
417 break;
ebac4655
JA
418 }
419
99784632 420 if (ret < 0 || td->error)
36167d82
JA
421 break;
422
97601024
JA
423 /*
424 * See if we need to complete some commands
425 */
755200a3 426 if (ret == FIO_Q_QUEUED || ret == FIO_Q_BUSY) {
97601024 427 min_evts = 0;
e916b390 428 if (queue_full(td) || ret == FIO_Q_BUSY) {
36167d82 429 min_evts = 1;
ebac4655 430
e916b390
JA
431 if (td->cur_depth > td->iodepth_low)
432 min_evts = td->cur_depth - td->iodepth_low;
433 }
434
97601024
JA
435 fio_gettime(&comp_time, NULL);
436 bytes_done = io_u_queued_complete(td, min_evts, NULL);
437 if (bytes_done < 0)
36167d82 438 break;
ebac4655
JA
439 }
440
97601024
JA
441 if (!bytes_done)
442 continue;
443
ebac4655
JA
444 /*
445 * the rate is batched for now, it should work for batches
446 * of completions except the very first one which may look
447 * a little bursty
448 */
97601024 449 usec = utime_since(&s, &comp_time);
ebac4655 450
97601024 451 rate_throttle(td, usec, bytes_done, td->ddir);
ebac4655 452
97601024 453 if (check_min_rate(td, &comp_time)) {
98aa62d8 454 if (exitall_on_terminate)
6ce15a32 455 terminate_threads(td->groupid, 0);
fd841467 456 td_verror(td, ENODATA);
ebac4655
JA
457 break;
458 }
459
9c1f7434
JA
460 if (td->thinktime) {
461 unsigned long long b;
462
463 b = td->io_blocks[0] + td->io_blocks[1];
48097d5c
JA
464 if (!(b % td->thinktime_blocks)) {
465 int left;
466
467 if (td->thinktime_spin)
468 __usec_sleep(td->thinktime_spin);
469
470 left = td->thinktime - td->thinktime_spin;
471 if (left)
472 usec_sleep(td, left);
473 }
9c1f7434 474 }
ebac4655
JA
475 }
476
4d2413c6 477 if (!td->error) {
3d7c391d
JA
478 struct fio_file *f;
479
84585003
JA
480 if (td->cur_depth)
481 cleanup_pending_aio(td);
ebac4655 482
84585003
JA
483 if (should_fsync(td) && td->end_fsync) {
484 td_set_runstate(td, TD_FSYNCING);
485 for_each_file(td, f, i)
858a3d47 486 fio_io_sync(td, f);
84585003 487 }
5853e5a8 488 }
ebac4655
JA
489}
490
ebac4655
JA
491static void cleanup_io_u(struct thread_data *td)
492{
493 struct list_head *entry, *n;
494 struct io_u *io_u;
495
496 list_for_each_safe(entry, n, &td->io_u_freelist) {
497 io_u = list_entry(entry, struct io_u, list);
498
499 list_del(&io_u->list);
500 free(io_u);
501 }
502
2f9ade3c 503 free_io_mem(td);
ebac4655
JA
504}
505
6b9cea23
JA
506/*
507 * "randomly" fill the buffer contents
508 */
66eeb296 509static void fill_rand_buf(struct io_u *io_u, int max_bs)
6b9cea23 510{
66eeb296 511 int *ptr = io_u->buf;
6b9cea23
JA
512
513 while ((void *) ptr - io_u->buf < max_bs) {
514 *ptr = rand() * 0x9e370001;
515 ptr++;
516 }
517}
518
ebac4655
JA
519static int init_io_u(struct thread_data *td)
520{
521 struct io_u *io_u;
a00735e6 522 unsigned int max_bs;
ebac4655
JA
523 int i, max_units;
524 char *p;
525
2866c82d 526 if (td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
527 return 0;
528
2866c82d 529 if (td->io_ops->flags & FIO_SYNCIO)
ebac4655
JA
530 max_units = 1;
531 else
532 max_units = td->iodepth;
533
a00735e6 534 max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]);
74b025b0
JA
535 td->orig_buffer_size = max_bs * max_units;
536
d0bdaf49 537 if (td->mem_type == MEM_SHMHUGE || td->mem_type == MEM_MMAPHUGE)
56bb17f2 538 td->orig_buffer_size = (td->orig_buffer_size + td->hugepage_size - 1) & ~(td->hugepage_size - 1);
74b025b0 539 else
29d610e1 540 td->orig_buffer_size += page_mask;
ebac4655 541
2f9ade3c
JA
542 if (allocate_io_mem(td))
543 return 1;
ebac4655 544
ebac4655
JA
545 p = ALIGN(td->orig_buffer);
546 for (i = 0; i < max_units; i++) {
547 io_u = malloc(sizeof(*io_u));
548 memset(io_u, 0, sizeof(*io_u));
549 INIT_LIST_HEAD(&io_u->list);
550
a00735e6 551 io_u->buf = p + max_bs * i;
6b9cea23 552 if (td_write(td) || td_rw(td))
a00735e6 553 fill_rand_buf(io_u, max_bs);
6b9cea23 554
b1ff3403 555 io_u->index = i;
ebac4655
JA
556 list_add(&io_u->list, &td->io_u_freelist);
557 }
558
559 return 0;
560}
561
da86774e
JA
562static int switch_ioscheduler(struct thread_data *td)
563{
564 char tmp[256], tmp2[128];
565 FILE *f;
566 int ret;
567
f48b467c
JA
568 if (td->io_ops->flags & FIO_CPUIO)
569 return 0;
570
da86774e
JA
571 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
572
573 f = fopen(tmp, "r+");
574 if (!f) {
575 td_verror(td, errno);
576 return 1;
577 }
578
579 /*
580 * Set io scheduler.
581 */
582 ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
583 if (ferror(f) || ret != 1) {
584 td_verror(td, errno);
585 fclose(f);
586 return 1;
587 }
588
589 rewind(f);
590
591 /*
592 * Read back and check that the selected scheduler is now the default.
593 */
594 ret = fread(tmp, 1, sizeof(tmp), f);
595 if (ferror(f) || ret < 0) {
596 td_verror(td, errno);
597 fclose(f);
598 return 1;
599 }
600
601 sprintf(tmp2, "[%s]", td->ioscheduler);
602 if (!strstr(tmp, tmp2)) {
3b70d7e5 603 log_err("fio: io scheduler %s not found\n", td->ioscheduler);
da86774e
JA
604 td_verror(td, EINVAL);
605 fclose(f);
606 return 1;
607 }
608
609 fclose(f);
610 return 0;
611}
612
ebac4655
JA
613static void clear_io_state(struct thread_data *td)
614{
53cdc686
JA
615 struct fio_file *f;
616 int i;
ebac4655 617
079ad09b 618 td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
ebac4655 619 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
20dc95c4 620 td->zone_bytes = 0;
ebac4655 621
c1324df1
JA
622 td->last_was_sync = 0;
623
53cdc686 624 for_each_file(td, f, i) {
c1324df1
JA
625 f->last_completed_pos = 0;
626
53cdc686
JA
627 f->last_pos = 0;
628 if (td->io_ops->flags & FIO_SYNCIO)
629 lseek(f->fd, SEEK_SET, 0);
630
631 if (f->file_map)
632 memset(f->file_map, 0, f->num_maps * sizeof(long));
633 }
ebac4655
JA
634}
635
906c8d75
JA
636/*
637 * Entry point for the thread based jobs. The process based jobs end up
638 * here as well, after a little setup.
639 */
ebac4655
JA
640static void *thread_main(void *data)
641{
69008999 642 unsigned long long runtime[2];
ebac4655 643 struct thread_data *td = data;
ebac4655
JA
644
645 if (!td->use_thread)
646 setsid();
647
648 td->pid = getpid();
649
aea47d44
JA
650 INIT_LIST_HEAD(&td->io_u_freelist);
651 INIT_LIST_HEAD(&td->io_u_busylist);
755200a3 652 INIT_LIST_HEAD(&td->io_u_requeues);
aea47d44
JA
653 INIT_LIST_HEAD(&td->io_hist_list);
654 INIT_LIST_HEAD(&td->io_log_list);
655
ebac4655
JA
656 if (init_io_u(td))
657 goto err;
658
659 if (fio_setaffinity(td) == -1) {
660 td_verror(td, errno);
661 goto err;
662 }
663
aea47d44
JA
664 if (init_iolog(td))
665 goto err;
666
ebac4655
JA
667 if (td->ioprio) {
668 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
669 td_verror(td, errno);
670 goto err;
671 }
672 }
673
1056eaad 674 if (nice(td->nice) == -1) {
b6f4d880
JA
675 td_verror(td, errno);
676 goto err;
677 }
678
75154845
JA
679 if (init_random_state(td))
680 goto err;
681
56f9498d
JA
682 if (td->ioscheduler && switch_ioscheduler(td))
683 goto err;
da86774e 684
75154845 685 td_set_runstate(td, TD_INITIALIZED);
bbfd6b00
JA
686 fio_sem_up(&startup_sem);
687 fio_sem_down(&td->mutex);
ebac4655 688
53cdc686 689 if (!td->create_serialize && setup_files(td))
ebac4655 690 goto err;
21972cde
JA
691 if (open_files(td))
692 goto err;
ebac4655 693
7d6c5283
JA
694 /*
695 * Do this late, as some IO engines would like to have the
696 * files setup prior to initializing structures.
697 */
698 if (td_io_init(td))
699 goto err;
700
69cfd7e0
JA
701 if (td->exec_prerun) {
702 if (system(td->exec_prerun) < 0)
703 goto err;
704 }
4e0ba8af 705
69008999 706 fio_gettime(&td->epoch, NULL);
079ad09b 707 getrusage(RUSAGE_SELF, &td->ts.ru_start);
69008999
JA
708
709 runtime[0] = runtime[1] = 0;
ebac4655 710 while (td->loops--) {
02bcaa8c 711 fio_gettime(&td->start, NULL);
079ad09b 712 memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
ebac4655
JA
713
714 if (td->ratemin)
079ad09b 715 memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate));
ebac4655
JA
716
717 clear_io_state(td);
718 prune_io_piece_log(td);
719
2866c82d 720 if (td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
721 do_cpuio(td);
722 else
723 do_io(td);
ebac4655 724
69008999 725 runtime[td->ddir] += utime_since_now(&td->start);
aea47d44 726 if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
69008999 727 runtime[td->ddir ^ 1] = runtime[td->ddir];
3d60d1ed 728
ebac4655
JA
729 if (td->error || td->terminate)
730 break;
731
732 if (td->verify == VERIFY_NONE)
733 continue;
734
735 clear_io_state(td);
02bcaa8c 736 fio_gettime(&td->start, NULL);
ebac4655
JA
737
738 do_verify(td);
739
69008999 740 runtime[DDIR_READ] += utime_since_now(&td->start);
ebac4655
JA
741
742 if (td->error || td->terminate)
743 break;
744 }
745
36dff966 746 update_rusage_stat(td);
69008999
JA
747 fio_gettime(&td->end_time, NULL);
748 td->runtime[0] = runtime[0] / 1000;
749 td->runtime[1] = runtime[1] / 1000;
750
079ad09b
JA
751 if (td->ts.bw_log)
752 finish_log(td, td->ts.bw_log, "bw");
753 if (td->ts.slat_log)
754 finish_log(td, td->ts.slat_log, "slat");
755 if (td->ts.clat_log)
756 finish_log(td, td->ts.clat_log, "clat");
076efc7c 757 if (td->write_iolog_file)
843a7413 758 write_iolog_close(td);
69cfd7e0
JA
759 if (td->exec_postrun) {
760 if (system(td->exec_postrun) < 0)
761 log_err("fio: postrun %s failed\n", td->exec_postrun);
762 }
ebac4655
JA
763
764 if (exitall_on_terminate)
6ce15a32 765 terminate_threads(td->groupid, 0);
ebac4655
JA
766
767err:
5bf13a5a
JA
768 if (td->error)
769 printf("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
53cdc686 770 close_files(td);
2866c82d 771 close_ioengine(td);
ebac4655 772 cleanup_io_u(td);
ebac4655 773 td_set_runstate(td, TD_EXITED);
43d76807 774 return (void *) (unsigned long) td->error;
ebac4655
JA
775}
776
906c8d75
JA
777/*
778 * We cannot pass the td data into a forked process, so attach the td and
779 * pass it to the thread worker.
780 */
a6418147 781static int fork_main(int shmid, int offset)
ebac4655
JA
782{
783 struct thread_data *td;
a6418147 784 void *data, *ret;
ebac4655
JA
785
786 data = shmat(shmid, NULL, 0);
787 if (data == (void *) -1) {
a6418147
JA
788 int __err = errno;
789
ebac4655 790 perror("shmat");
a6418147 791 return __err;
ebac4655
JA
792 }
793
794 td = data + offset * sizeof(struct thread_data);
a6418147 795 ret = thread_main(td);
ebac4655 796 shmdt(data);
43d76807 797 return (int) (unsigned long) ret;
ebac4655
JA
798}
799
906c8d75
JA
800/*
801 * Run over the job map and reap the threads that have exited, if any.
802 */
ebac4655
JA
803static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
804{
34572e28 805 struct thread_data *td;
fab6aa71 806 int i, cputhreads, pending, status, ret;
ebac4655
JA
807
808 /*
809 * reap exited threads (TD_EXITED -> TD_REAPED)
810 */
4d2413c6 811 pending = cputhreads = 0;
34572e28 812 for_each_td(td, i) {
84585003
JA
813 /*
814 * ->io_ops is NULL for a thread that has closed its
815 * io engine
816 */
817 if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
818 cputhreads++;
819
fab6aa71
JA
820 if (td->runstate < TD_EXITED) {
821 /*
822 * check if someone quit or got killed in an unusual way
823 */
824 ret = waitpid(td->pid, &status, WNOHANG);
825 if (ret < 0)
826 perror("waitpid");
827 else if ((ret == td->pid) && WIFSIGNALED(status)) {
828 int sig = WTERMSIG(status);
829
830 log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
831 td_set_runstate(td, TD_REAPED);
832 goto reaped;
833 }
834 }
835
4d2413c6
JA
836 if (td->runstate != TD_EXITED) {
837 if (td->runstate < TD_RUNNING)
838 pending++;
839
ebac4655 840 continue;
4d2413c6 841 }
ebac4655 842
437c9b71
JA
843 if (td->error)
844 exit_value++;
845
ebac4655
JA
846 td_set_runstate(td, TD_REAPED);
847
848 if (td->use_thread) {
849 long ret;
850
851 if (pthread_join(td->thread, (void *) &ret))
852 perror("thread_join");
a6418147
JA
853 } else {
854 int status;
855
fab6aa71
JA
856 ret = waitpid(td->pid, &status, 0);
857 if (ret < 0)
858 perror("waitpid");
73170f19 859 else if (WIFEXITED(status) && WEXITSTATUS(status)) {
a6418147
JA
860 if (!exit_value)
861 exit_value++;
862 }
863 }
ebac4655 864
fab6aa71 865reaped:
ebac4655
JA
866 (*nr_running)--;
867 (*m_rate) -= td->ratemin;
868 (*t_rate) -= td->rate;
869 }
b990b5c0 870
4d2413c6 871 if (*nr_running == cputhreads && !pending)
6ce15a32 872 terminate_threads(TERMINATE_ALL, 0);
ebac4655
JA
873}
874
906c8d75
JA
875/*
876 * Main function for kicking off and reaping jobs, as needed.
877 */
ebac4655
JA
878static void run_threads(void)
879{
ebac4655
JA
880 struct thread_data *td;
881 unsigned long spent;
882 int i, todo, nr_running, m_rate, t_rate, nr_started;
fcb6ade2 883
2f9ade3c
JA
884 if (fio_pin_memory())
885 return;
ebac4655 886
c6ae0a5b
JA
887 if (!terse_output) {
888 printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
889 fflush(stdout);
890 }
c04f7ec3 891
4efa970e
JA
892 signal(SIGINT, sig_handler);
893 signal(SIGALRM, sig_handler);
894
ebac4655
JA
895 todo = thread_number;
896 nr_running = 0;
897 nr_started = 0;
898 m_rate = t_rate = 0;
899
34572e28 900 for_each_td(td, i) {
263e529f 901 print_status_init(td->thread_number - 1);
ebac4655 902
380cf265
JA
903 if (!td->create_serialize) {
904 init_disk_util(td);
ebac4655 905 continue;
380cf265 906 }
ebac4655
JA
907
908 /*
909 * do file setup here so it happens sequentially,
910 * we don't want X number of threads getting their
911 * client data interspersed on disk
912 */
53cdc686 913 if (setup_files(td)) {
5bf13a5a
JA
914 exit_value++;
915 if (td->error)
916 log_err("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
ebac4655
JA
917 td_set_runstate(td, TD_REAPED);
918 todo--;
919 }
380cf265
JA
920
921 init_disk_util(td);
ebac4655
JA
922 }
923
ebac4655 924 while (todo) {
75154845
JA
925 struct thread_data *map[MAX_JOBS];
926 struct timeval this_start;
927 int this_jobs = 0, left;
928
ebac4655
JA
929 /*
930 * create threads (TD_NOT_CREATED -> TD_CREATED)
931 */
34572e28 932 for_each_td(td, i) {
ebac4655
JA
933 if (td->runstate != TD_NOT_CREATED)
934 continue;
935
936 /*
937 * never got a chance to start, killed by other
938 * thread for some reason
939 */
940 if (td->terminate) {
941 todo--;
942 continue;
943 }
944
945 if (td->start_delay) {
263e529f 946 spent = mtime_since_genesis();
ebac4655
JA
947
948 if (td->start_delay * 1000 > spent)
949 continue;
950 }
951
952 if (td->stonewall && (nr_started || nr_running))
953 break;
954
75154845
JA
955 /*
956 * Set state to created. Thread will transition
957 * to TD_INITIALIZED when it's done setting up.
958 */
ebac4655 959 td_set_runstate(td, TD_CREATED);
75154845 960 map[this_jobs++] = td;
bbfd6b00 961 fio_sem_init(&startup_sem, 1);
ebac4655
JA
962 nr_started++;
963
964 if (td->use_thread) {
965 if (pthread_create(&td->thread, NULL, thread_main, td)) {
966 perror("thread_create");
967 nr_started--;
968 }
969 } else {
970 if (fork())
bbfd6b00 971 fio_sem_down(&startup_sem);
ebac4655 972 else {
a6418147
JA
973 int ret = fork_main(shm_id, i);
974
975 exit(ret);
ebac4655
JA
976 }
977 }
978 }
979
980 /*
75154845
JA
981 * Wait for the started threads to transition to
982 * TD_INITIALIZED.
ebac4655 983 */
02bcaa8c 984 fio_gettime(&this_start, NULL);
75154845 985 left = this_jobs;
6ce15a32 986 while (left && !fio_abort) {
75154845
JA
987 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
988 break;
989
990 usleep(100000);
991
992 for (i = 0; i < this_jobs; i++) {
993 td = map[i];
994 if (!td)
995 continue;
b6f4d880 996 if (td->runstate == TD_INITIALIZED) {
75154845
JA
997 map[i] = NULL;
998 left--;
b6f4d880
JA
999 } else if (td->runstate >= TD_EXITED) {
1000 map[i] = NULL;
1001 left--;
1002 todo--;
1003 nr_running++; /* work-around... */
75154845
JA
1004 }
1005 }
1006 }
1007
1008 if (left) {
3b70d7e5 1009 log_err("fio: %d jobs failed to start\n", left);
75154845
JA
1010 for (i = 0; i < this_jobs; i++) {
1011 td = map[i];
1012 if (!td)
1013 continue;
1014 kill(td->pid, SIGTERM);
1015 }
1016 break;
1017 }
1018
1019 /*
b6f4d880 1020 * start created threads (TD_INITIALIZED -> TD_RUNNING).
75154845 1021 */
34572e28 1022 for_each_td(td, i) {
75154845 1023 if (td->runstate != TD_INITIALIZED)
ebac4655
JA
1024 continue;
1025
1026 td_set_runstate(td, TD_RUNNING);
1027 nr_running++;
1028 nr_started--;
1029 m_rate += td->ratemin;
1030 t_rate += td->rate;
75154845 1031 todo--;
bbfd6b00 1032 fio_sem_up(&td->mutex);
ebac4655
JA
1033 }
1034
1035 reap_threads(&nr_running, &t_rate, &m_rate);
1036
1037 if (todo)
1038 usleep(100000);
1039 }
1040
1041 while (nr_running) {
1042 reap_threads(&nr_running, &t_rate, &m_rate);
1043 usleep(10000);
1044 }
1045
1046 update_io_ticks();
2f9ade3c 1047 fio_unpin_memory();
ebac4655
JA
1048}
1049
ebac4655
JA
1050int main(int argc, char *argv[])
1051{
29d610e1
JA
1052 long ps;
1053
dbe1125e
JA
1054 /*
1055 * We need locale for number printing, if it isn't set then just
1056 * go with the US format.
1057 */
1058 if (!getenv("LC_NUMERIC"))
1059 setlocale(LC_NUMERIC, "en_US");
1060
ebac4655
JA
1061 if (parse_options(argc, argv))
1062 return 1;
1063
1064 if (!thread_number) {
3b70d7e5 1065 log_err("Nothing to do\n");
ebac4655
JA
1066 return 1;
1067 }
1068
29d610e1
JA
1069 ps = sysconf(_SC_PAGESIZE);
1070 if (ps < 0) {
1071 log_err("Failed to get page size\n");
1072 return 1;
1073 }
1074
1075 page_mask = ps - 1;
1076
bb3884d8
JA
1077 if (write_bw_log) {
1078 setup_log(&agg_io_log[DDIR_READ]);
1079 setup_log(&agg_io_log[DDIR_WRITE]);
1080 }
1081
ebac4655
JA
1082 disk_util_timer_arm();
1083
1084 run_threads();
6ce15a32 1085
bb3884d8 1086 if (!fio_abort) {
6ce15a32 1087 show_run_stats();
bb3884d8
JA
1088 if (write_bw_log) {
1089 __finish_log(agg_io_log[DDIR_READ],"agg-read_bw.log");
1090 __finish_log(agg_io_log[DDIR_WRITE],"agg-write_bw.log");
1091 }
1092 }
ebac4655 1093
437c9b71 1094 return exit_value;
ebac4655 1095}