Fix crash on thread exit
[fio.git] / fio.c
CommitLineData
ebac4655
JA
1/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
aae22ca7 5 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
ebac4655 6 *
8e9fe637
JA
7 * The license below covers all files distributed with fio unless otherwise
8 * noted in the file itself.
9 *
ebac4655 10 * This program is free software; you can redistribute it and/or modify
8e9fe637
JA
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
ebac4655
JA
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
ebac4655
JA
24#include <unistd.h>
25#include <fcntl.h>
26#include <string.h>
ebac4655
JA
27#include <signal.h>
28#include <time.h>
dbe1125e 29#include <locale.h>
36167d82 30#include <assert.h>
ebac4655
JA
31#include <sys/stat.h>
32#include <sys/wait.h>
33#include <sys/ipc.h>
34#include <sys/shm.h>
ebac4655
JA
35#include <sys/mman.h>
36
37#include "fio.h"
38#include "os.h"
39
29d610e1
JA
40static unsigned long page_mask;
41#define ALIGN(buf) \
42 (char *) (((unsigned long) (buf) + page_mask) & ~page_mask)
ebac4655
JA
43
44int groupid = 0;
45int thread_number = 0;
ebac4655 46int shm_id = 0;
53cdc686 47int temp_stall_ts;
ebac4655 48
bbfd6b00 49static volatile int startup_sem;
6ce15a32 50static volatile int fio_abort;
437c9b71 51static int exit_value;
ebac4655 52
bb3884d8
JA
53struct io_log *agg_io_log[2];
54
ebac4655 55#define TERMINATE_ALL (-1)
75154845 56#define JOB_START_TIMEOUT (5 * 1000)
ebac4655 57
6ce15a32
JA
58static inline void td_set_runstate(struct thread_data *td, int runstate)
59{
60 td->runstate = runstate;
61}
62
63static void terminate_threads(int group_id, int forced_kill)
ebac4655 64{
34572e28 65 struct thread_data *td;
ebac4655
JA
66 int i;
67
34572e28 68 for_each_td(td, i) {
ebac4655
JA
69 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
70 td->terminate = 1;
71 td->start_delay = 0;
6ce15a32
JA
72 if (forced_kill)
73 td_set_runstate(td, TD_EXITED);
ebac4655
JA
74 }
75 }
76}
77
78static void sig_handler(int sig)
79{
80 switch (sig) {
81 case SIGALRM:
82 update_io_ticks();
83 disk_util_timer_arm();
84 print_thread_status();
85 break;
86 default:
6ce15a32 87 printf("\nfio: terminating on signal %d\n", sig);
ebac4655 88 fflush(stdout);
6ce15a32 89 terminate_threads(TERMINATE_ALL, 0);
ebac4655
JA
90 break;
91 }
92}
93
906c8d75
JA
94/*
95 * Check if we are above the minimum rate given.
96 */
ebac4655
JA
97static int check_min_rate(struct thread_data *td, struct timeval *now)
98{
99 unsigned long spent;
100 unsigned long rate;
101 int ddir = td->ddir;
102
103 /*
104 * allow a 2 second settle period in the beginning
105 */
106 if (mtime_since(&td->start, now) < 2000)
107 return 0;
108
109 /*
110 * if rate blocks is set, sample is running
111 */
112 if (td->rate_bytes) {
113 spent = mtime_since(&td->lastrate, now);
114 if (spent < td->ratecycle)
115 return 0;
116
117 rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
118 if (rate < td->ratemin) {
1e97cce9 119 fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
ebac4655
JA
120 return 1;
121 }
122 }
123
124 td->rate_bytes = td->this_io_bytes[ddir];
125 memcpy(&td->lastrate, now, sizeof(*now));
126 return 0;
127}
128
129static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
130{
131 if (!td->timeout)
132 return 0;
133 if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
134 return 1;
135
136 return 0;
137}
138
906c8d75
JA
139/*
140 * When job exits, we can cancel the in-flight IO if we are using async
141 * io. Attempt to do so.
142 */
ebac4655
JA
143static void cleanup_pending_aio(struct thread_data *td)
144{
ebac4655 145 struct list_head *entry, *n;
ebac4655
JA
146 struct io_u *io_u;
147 int r;
148
149 /*
150 * get immediately available events, if any
151 */
b2fdda43
JA
152 r = io_u_queued_complete(td, 0, NULL);
153 if (r < 0)
154 return;
ebac4655
JA
155
156 /*
157 * now cancel remaining active events
158 */
2866c82d 159 if (td->io_ops->cancel) {
ebac4655
JA
160 list_for_each_safe(entry, n, &td->io_u_busylist) {
161 io_u = list_entry(entry, struct io_u, list);
162
0c6e7517
JA
163 /*
164 * if the io_u isn't in flight, then that generally
165 * means someone leaked an io_u. complain but fix
166 * it up, so we don't stall here.
167 */
168 if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
169 log_err("fio: non-busy IO on busy list\n");
ebac4655 170 put_io_u(td, io_u);
0c6e7517
JA
171 } else {
172 r = td->io_ops->cancel(td, io_u);
173 if (!r)
174 put_io_u(td, io_u);
175 }
ebac4655
JA
176 }
177 }
178
97601024 179 if (td->cur_depth)
b2fdda43 180 r = io_u_queued_complete(td, td->cur_depth, NULL);
ebac4655
JA
181}
182
858a3d47
JA
183/*
184 * Helper to handle the final sync of a file. Works just like the normal
185 * io path, just does everything sync.
186 */
187static int fio_io_sync(struct thread_data *td, struct fio_file *f)
188{
189 struct io_u *io_u = __get_io_u(td);
858a3d47
JA
190 int ret;
191
192 if (!io_u)
193 return 1;
194
195 io_u->ddir = DDIR_SYNC;
196 io_u->file = f;
197
198 if (td_io_prep(td, io_u)) {
199 put_io_u(td, io_u);
200 return 1;
201 }
202
755200a3 203requeue:
858a3d47 204 ret = td_io_queue(td, io_u);
36167d82 205 if (ret < 0) {
353a7e0e 206 td_verror(td, io_u->error);
858a3d47 207 put_io_u(td, io_u);
858a3d47 208 return 1;
36167d82 209 } else if (ret == FIO_Q_QUEUED) {
49db69aa 210 if (io_u_queued_complete(td, 1, NULL) < 0)
36167d82 211 return 1;
36167d82
JA
212 } else if (ret == FIO_Q_COMPLETED) {
213 if (io_u->error) {
214 td_verror(td, io_u->error);
215 return 1;
216 }
858a3d47 217
b2fdda43
JA
218 if (io_u_sync_complete(td, io_u, NULL) < 0)
219 return 1;
755200a3
JA
220 } else if (ret == FIO_Q_BUSY) {
221 if (td_io_commit(td))
222 return 1;
223 goto requeue;
858a3d47
JA
224 }
225
226 return 0;
227}
228
906c8d75
JA
229/*
230 * The main verify engine. Runs over the writes we previusly submitted,
231 * reads the blocks back in, and checks the crc/md5 of the data.
232 */
1e97cce9 233static void do_verify(struct thread_data *td)
ebac4655 234{
53cdc686 235 struct fio_file *f;
36167d82 236 struct io_u *io_u;
3af6ef39 237 int ret, i, min_events;
e5b401d4
JA
238
239 /*
240 * sync io first and invalidate cache, to make sure we really
241 * read from disk.
242 */
243 for_each_file(td, f, i) {
b2fdda43
JA
244 if (fio_io_sync(td, f))
245 break;
246 if (file_invalidate_cache(td, f))
247 break;
e5b401d4 248 }
ebac4655 249
b2fdda43
JA
250 if (td->error)
251 return;
252
ebac4655
JA
253 td_set_runstate(td, TD_VERIFYING);
254
36167d82
JA
255 io_u = NULL;
256 while (!td->terminate) {
ebac4655
JA
257 io_u = __get_io_u(td);
258 if (!io_u)
259 break;
260
069c2918
JA
261 if (runtime_exceeded(td, &io_u->start_time)) {
262 put_io_u(td, io_u);
02bcaa8c 263 break;
069c2918 264 }
02bcaa8c 265
069c2918
JA
266 if (get_next_verify(td, io_u)) {
267 put_io_u(td, io_u);
ebac4655 268 break;
069c2918 269 }
ebac4655 270
069c2918
JA
271 if (td_io_prep(td, io_u)) {
272 put_io_u(td, io_u);
53cdc686 273 break;
069c2918 274 }
36167d82
JA
275requeue:
276 ret = td_io_queue(td, io_u);
53cdc686 277
36167d82
JA
278 switch (ret) {
279 case FIO_Q_COMPLETED:
280 if (io_u->error)
22819ec2 281 ret = -io_u->error;
36167d82
JA
282 if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
283 int bytes = io_u->xfer_buflen - io_u->resid;
ebac4655 284
36167d82
JA
285 io_u->xfer_buflen = io_u->resid;
286 io_u->xfer_buf += bytes;
287 goto requeue;
288 }
99784632 289 ret = io_u_sync_complete(td, io_u, verify_io_u);
49db69aa 290 if (ret < 0)
36167d82 291 break;
36167d82
JA
292 continue;
293 case FIO_Q_QUEUED:
294 break;
755200a3
JA
295 case FIO_Q_BUSY:
296 requeue_io_u(td, &io_u);
297 ret = td_io_commit(td);
298 break;
36167d82
JA
299 default:
300 assert(ret < 0);
22819ec2 301 td_verror(td, -ret);
ebac4655
JA
302 break;
303 }
304
99784632 305 if (ret < 0 || td->error)
3af6ef39
JA
306 break;
307
ebac4655 308 /*
3af6ef39
JA
309 * if we can queue more, do so. but check if there are
310 * completed io_u's first.
ebac4655 311 */
97601024 312 min_events = 0;
e916b390 313 if (queue_full(td) || ret == FIO_Q_BUSY) {
3af6ef39 314 min_events = 1;
3af6ef39 315
e916b390
JA
316 if (td->cur_depth > td->iodepth_low)
317 min_events = td->cur_depth - td->iodepth_low;
318 }
319
3af6ef39
JA
320 /*
321 * Reap required number of io units, if any, and do the
322 * verification on them through the callback handler
323 */
49db69aa 324 if (io_u_queued_complete(td, min_events, verify_io_u) < 0)
ebac4655 325 break;
36167d82 326 }
ebac4655 327
ebac4655
JA
328 if (td->cur_depth)
329 cleanup_pending_aio(td);
330
331 td_set_runstate(td, TD_RUNNING);
332}
333
b990b5c0
JA
334/*
335 * Not really an io thread, all it does is burn CPU cycles in the specified
336 * manner.
337 */
338static void do_cpuio(struct thread_data *td)
339{
340 struct timeval e;
341 int split = 100 / td->cpuload;
342 int i = 0;
343
344 while (!td->terminate) {
02bcaa8c 345 fio_gettime(&e, NULL);
b990b5c0
JA
346
347 if (runtime_exceeded(td, &e))
348 break;
349
350 if (!(i % split))
351 __usec_sleep(10000);
352 else
353 usec_sleep(td, 10000);
354
355 i++;
356 }
357}
358
32cd46a0 359/*
906c8d75 360 * Main IO worker function. It retrieves io_u's to process and queues
32cd46a0
JA
361 * and reaps them, checking for rate and errors along the way.
362 */
ebac4655
JA
363static void do_io(struct thread_data *td)
364{
02bcaa8c 365 struct timeval s;
ebac4655 366 unsigned long usec;
84585003 367 int i, ret = 0;
ebac4655 368
5853e5a8
JA
369 td_set_runstate(td, TD_RUNNING);
370
3951414d 371 while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) {
97601024
JA
372 struct timeval comp_time;
373 long bytes_done = 0;
84585003 374 int min_evts = 0;
ebac4655
JA
375 struct io_u *io_u;
376
377 if (td->terminate)
378 break;
379
3d7c391d 380 io_u = get_io_u(td);
ebac4655
JA
381 if (!io_u)
382 break;
383
384 memcpy(&s, &io_u->start_time, sizeof(s));
97601024
JA
385
386 if (runtime_exceeded(td, &s)) {
387 put_io_u(td, io_u);
388 break;
389 }
cec6b55d 390requeue:
45bee283 391 ret = td_io_queue(td, io_u);
36167d82
JA
392
393 switch (ret) {
394 case FIO_Q_COMPLETED:
395 if (io_u->error) {
396 ret = io_u->error;
397 break;
398 }
399 if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
400 int bytes = io_u->xfer_buflen - io_u->resid;
401
cec6b55d 402 io_u->xfer_buflen = io_u->resid;
36167d82 403 io_u->xfer_buf += bytes;
cec6b55d 404 goto requeue;
cec6b55d 405 }
97601024
JA
406 fio_gettime(&comp_time, NULL);
407 bytes_done = io_u_sync_complete(td, io_u, NULL);
99784632
JA
408 if (bytes_done < 0)
409 ret = bytes_done;
36167d82
JA
410 break;
411 case FIO_Q_QUEUED:
7e77dd02
JA
412 /*
413 * if the engine doesn't have a commit hook,
414 * the io_u is really queued. if it does have such
415 * a hook, it has to call io_u_queued() itself.
416 */
417 if (td->io_ops->commit == NULL)
418 io_u_queued(td, io_u);
36167d82 419 break;
755200a3
JA
420 case FIO_Q_BUSY:
421 requeue_io_u(td, &io_u);
422 ret = td_io_commit(td);
423 break;
36167d82
JA
424 default:
425 assert(ret < 0);
426 put_io_u(td, io_u);
427 break;
ebac4655
JA
428 }
429
99784632 430 if (ret < 0 || td->error)
36167d82
JA
431 break;
432
97601024
JA
433 /*
434 * See if we need to complete some commands
435 */
755200a3 436 if (ret == FIO_Q_QUEUED || ret == FIO_Q_BUSY) {
97601024 437 min_evts = 0;
e916b390 438 if (queue_full(td) || ret == FIO_Q_BUSY) {
36167d82 439 min_evts = 1;
ebac4655 440
e916b390
JA
441 if (td->cur_depth > td->iodepth_low)
442 min_evts = td->cur_depth - td->iodepth_low;
443 }
444
97601024
JA
445 fio_gettime(&comp_time, NULL);
446 bytes_done = io_u_queued_complete(td, min_evts, NULL);
447 if (bytes_done < 0)
36167d82 448 break;
ebac4655
JA
449 }
450
97601024
JA
451 if (!bytes_done)
452 continue;
453
ebac4655
JA
454 /*
455 * the rate is batched for now, it should work for batches
456 * of completions except the very first one which may look
457 * a little bursty
458 */
97601024 459 usec = utime_since(&s, &comp_time);
ebac4655 460
97601024 461 rate_throttle(td, usec, bytes_done, td->ddir);
ebac4655 462
97601024 463 if (check_min_rate(td, &comp_time)) {
98aa62d8 464 if (exitall_on_terminate)
6ce15a32 465 terminate_threads(td->groupid, 0);
fd841467 466 td_verror(td, ENODATA);
ebac4655
JA
467 break;
468 }
469
9c1f7434
JA
470 if (td->thinktime) {
471 unsigned long long b;
472
473 b = td->io_blocks[0] + td->io_blocks[1];
48097d5c
JA
474 if (!(b % td->thinktime_blocks)) {
475 int left;
476
477 if (td->thinktime_spin)
478 __usec_sleep(td->thinktime_spin);
479
480 left = td->thinktime - td->thinktime_spin;
481 if (left)
482 usec_sleep(td, left);
483 }
9c1f7434 484 }
ebac4655
JA
485 }
486
4d2413c6 487 if (!td->error) {
3d7c391d
JA
488 struct fio_file *f;
489
84585003
JA
490 if (td->cur_depth)
491 cleanup_pending_aio(td);
ebac4655 492
84585003
JA
493 if (should_fsync(td) && td->end_fsync) {
494 td_set_runstate(td, TD_FSYNCING);
495 for_each_file(td, f, i)
858a3d47 496 fio_io_sync(td, f);
84585003 497 }
5853e5a8 498 }
ebac4655
JA
499}
500
ebac4655
JA
501static void cleanup_io_u(struct thread_data *td)
502{
503 struct list_head *entry, *n;
504 struct io_u *io_u;
505
506 list_for_each_safe(entry, n, &td->io_u_freelist) {
507 io_u = list_entry(entry, struct io_u, list);
508
509 list_del(&io_u->list);
510 free(io_u);
511 }
512
2f9ade3c 513 free_io_mem(td);
ebac4655
JA
514}
515
6b9cea23
JA
516/*
517 * "randomly" fill the buffer contents
518 */
66eeb296 519static void fill_rand_buf(struct io_u *io_u, int max_bs)
6b9cea23 520{
66eeb296 521 int *ptr = io_u->buf;
6b9cea23
JA
522
523 while ((void *) ptr - io_u->buf < max_bs) {
524 *ptr = rand() * 0x9e370001;
525 ptr++;
526 }
527}
528
ebac4655
JA
529static int init_io_u(struct thread_data *td)
530{
531 struct io_u *io_u;
a00735e6 532 unsigned int max_bs;
ebac4655
JA
533 int i, max_units;
534 char *p;
535
2866c82d 536 if (td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
537 return 0;
538
2866c82d 539 if (td->io_ops->flags & FIO_SYNCIO)
ebac4655
JA
540 max_units = 1;
541 else
542 max_units = td->iodepth;
543
a00735e6 544 max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]);
74b025b0
JA
545 td->orig_buffer_size = max_bs * max_units;
546
d0bdaf49 547 if (td->mem_type == MEM_SHMHUGE || td->mem_type == MEM_MMAPHUGE)
56bb17f2 548 td->orig_buffer_size = (td->orig_buffer_size + td->hugepage_size - 1) & ~(td->hugepage_size - 1);
74b025b0 549 else
29d610e1 550 td->orig_buffer_size += page_mask;
ebac4655 551
2f9ade3c
JA
552 if (allocate_io_mem(td))
553 return 1;
ebac4655 554
ebac4655
JA
555 p = ALIGN(td->orig_buffer);
556 for (i = 0; i < max_units; i++) {
557 io_u = malloc(sizeof(*io_u));
558 memset(io_u, 0, sizeof(*io_u));
559 INIT_LIST_HEAD(&io_u->list);
560
a00735e6 561 io_u->buf = p + max_bs * i;
6b9cea23 562 if (td_write(td) || td_rw(td))
a00735e6 563 fill_rand_buf(io_u, max_bs);
6b9cea23 564
b1ff3403 565 io_u->index = i;
0c6e7517 566 io_u->flags = IO_U_F_FREE;
ebac4655
JA
567 list_add(&io_u->list, &td->io_u_freelist);
568 }
569
433afcb4
JA
570 io_u_init_timeout();
571
ebac4655
JA
572 return 0;
573}
574
da86774e
JA
575static int switch_ioscheduler(struct thread_data *td)
576{
577 char tmp[256], tmp2[128];
578 FILE *f;
579 int ret;
580
f48b467c
JA
581 if (td->io_ops->flags & FIO_CPUIO)
582 return 0;
583
da86774e
JA
584 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
585
586 f = fopen(tmp, "r+");
587 if (!f) {
588 td_verror(td, errno);
589 return 1;
590 }
591
592 /*
593 * Set io scheduler.
594 */
595 ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
596 if (ferror(f) || ret != 1) {
597 td_verror(td, errno);
598 fclose(f);
599 return 1;
600 }
601
602 rewind(f);
603
604 /*
605 * Read back and check that the selected scheduler is now the default.
606 */
607 ret = fread(tmp, 1, sizeof(tmp), f);
608 if (ferror(f) || ret < 0) {
609 td_verror(td, errno);
610 fclose(f);
611 return 1;
612 }
613
614 sprintf(tmp2, "[%s]", td->ioscheduler);
615 if (!strstr(tmp, tmp2)) {
3b70d7e5 616 log_err("fio: io scheduler %s not found\n", td->ioscheduler);
da86774e
JA
617 td_verror(td, EINVAL);
618 fclose(f);
619 return 1;
620 }
621
622 fclose(f);
623 return 0;
624}
625
ebac4655
JA
626static void clear_io_state(struct thread_data *td)
627{
53cdc686
JA
628 struct fio_file *f;
629 int i;
ebac4655 630
079ad09b 631 td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
ebac4655 632 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
20dc95c4 633 td->zone_bytes = 0;
ebac4655 634
c1324df1
JA
635 td->last_was_sync = 0;
636
53cdc686 637 for_each_file(td, f, i) {
c1324df1
JA
638 f->last_completed_pos = 0;
639
53cdc686
JA
640 f->last_pos = 0;
641 if (td->io_ops->flags & FIO_SYNCIO)
642 lseek(f->fd, SEEK_SET, 0);
643
644 if (f->file_map)
645 memset(f->file_map, 0, f->num_maps * sizeof(long));
646 }
ebac4655
JA
647}
648
906c8d75
JA
649/*
650 * Entry point for the thread based jobs. The process based jobs end up
651 * here as well, after a little setup.
652 */
ebac4655
JA
653static void *thread_main(void *data)
654{
69008999 655 unsigned long long runtime[2];
ebac4655 656 struct thread_data *td = data;
ebac4655
JA
657
658 if (!td->use_thread)
659 setsid();
660
661 td->pid = getpid();
662
aea47d44
JA
663 INIT_LIST_HEAD(&td->io_u_freelist);
664 INIT_LIST_HEAD(&td->io_u_busylist);
755200a3 665 INIT_LIST_HEAD(&td->io_u_requeues);
aea47d44
JA
666 INIT_LIST_HEAD(&td->io_hist_list);
667 INIT_LIST_HEAD(&td->io_log_list);
668
ebac4655
JA
669 if (init_io_u(td))
670 goto err;
671
672 if (fio_setaffinity(td) == -1) {
673 td_verror(td, errno);
674 goto err;
675 }
676
aea47d44
JA
677 if (init_iolog(td))
678 goto err;
679
ebac4655
JA
680 if (td->ioprio) {
681 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
682 td_verror(td, errno);
683 goto err;
684 }
685 }
686
1056eaad 687 if (nice(td->nice) == -1) {
b6f4d880
JA
688 td_verror(td, errno);
689 goto err;
690 }
691
75154845
JA
692 if (init_random_state(td))
693 goto err;
694
56f9498d
JA
695 if (td->ioscheduler && switch_ioscheduler(td))
696 goto err;
da86774e 697
75154845 698 td_set_runstate(td, TD_INITIALIZED);
bbfd6b00
JA
699 fio_sem_up(&startup_sem);
700 fio_sem_down(&td->mutex);
ebac4655 701
53cdc686 702 if (!td->create_serialize && setup_files(td))
ebac4655 703 goto err;
21972cde
JA
704 if (open_files(td))
705 goto err;
ebac4655 706
7d6c5283
JA
707 /*
708 * Do this late, as some IO engines would like to have the
709 * files setup prior to initializing structures.
710 */
711 if (td_io_init(td))
712 goto err;
713
69cfd7e0
JA
714 if (td->exec_prerun) {
715 if (system(td->exec_prerun) < 0)
716 goto err;
717 }
4e0ba8af 718
69008999 719 fio_gettime(&td->epoch, NULL);
433afcb4 720 memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch));
079ad09b 721 getrusage(RUSAGE_SELF, &td->ts.ru_start);
69008999
JA
722
723 runtime[0] = runtime[1] = 0;
ebac4655 724 while (td->loops--) {
02bcaa8c 725 fio_gettime(&td->start, NULL);
079ad09b 726 memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
ebac4655
JA
727
728 if (td->ratemin)
079ad09b 729 memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate));
ebac4655
JA
730
731 clear_io_state(td);
732 prune_io_piece_log(td);
733
2866c82d 734 if (td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
735 do_cpuio(td);
736 else
737 do_io(td);
ebac4655 738
69008999 739 runtime[td->ddir] += utime_since_now(&td->start);
aea47d44 740 if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
69008999 741 runtime[td->ddir ^ 1] = runtime[td->ddir];
3d60d1ed 742
ebac4655
JA
743 if (td->error || td->terminate)
744 break;
745
746 if (td->verify == VERIFY_NONE)
747 continue;
748
749 clear_io_state(td);
02bcaa8c 750 fio_gettime(&td->start, NULL);
ebac4655
JA
751
752 do_verify(td);
753
69008999 754 runtime[DDIR_READ] += utime_since_now(&td->start);
ebac4655
JA
755
756 if (td->error || td->terminate)
757 break;
758 }
759
36dff966 760 update_rusage_stat(td);
69008999
JA
761 fio_gettime(&td->end_time, NULL);
762 td->runtime[0] = runtime[0] / 1000;
763 td->runtime[1] = runtime[1] / 1000;
764
079ad09b
JA
765 if (td->ts.bw_log)
766 finish_log(td, td->ts.bw_log, "bw");
767 if (td->ts.slat_log)
768 finish_log(td, td->ts.slat_log, "slat");
769 if (td->ts.clat_log)
770 finish_log(td, td->ts.clat_log, "clat");
076efc7c 771 if (td->write_iolog_file)
843a7413 772 write_iolog_close(td);
69cfd7e0
JA
773 if (td->exec_postrun) {
774 if (system(td->exec_postrun) < 0)
775 log_err("fio: postrun %s failed\n", td->exec_postrun);
776 }
ebac4655
JA
777
778 if (exitall_on_terminate)
6ce15a32 779 terminate_threads(td->groupid, 0);
ebac4655
JA
780
781err:
5bf13a5a
JA
782 if (td->error)
783 printf("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
53cdc686 784 close_files(td);
2866c82d 785 close_ioengine(td);
ebac4655 786 cleanup_io_u(td);
ebac4655 787 td_set_runstate(td, TD_EXITED);
43d76807 788 return (void *) (unsigned long) td->error;
ebac4655
JA
789}
790
906c8d75
JA
791/*
792 * We cannot pass the td data into a forked process, so attach the td and
793 * pass it to the thread worker.
794 */
a6418147 795static int fork_main(int shmid, int offset)
ebac4655
JA
796{
797 struct thread_data *td;
a6418147 798 void *data, *ret;
ebac4655
JA
799
800 data = shmat(shmid, NULL, 0);
801 if (data == (void *) -1) {
a6418147
JA
802 int __err = errno;
803
ebac4655 804 perror("shmat");
a6418147 805 return __err;
ebac4655
JA
806 }
807
808 td = data + offset * sizeof(struct thread_data);
a6418147 809 ret = thread_main(td);
ebac4655 810 shmdt(data);
43d76807 811 return (int) (unsigned long) ret;
ebac4655
JA
812}
813
906c8d75
JA
814/*
815 * Run over the job map and reap the threads that have exited, if any.
816 */
ebac4655
JA
817static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
818{
34572e28 819 struct thread_data *td;
fab6aa71 820 int i, cputhreads, pending, status, ret;
ebac4655
JA
821
822 /*
823 * reap exited threads (TD_EXITED -> TD_REAPED)
824 */
4d2413c6 825 pending = cputhreads = 0;
34572e28 826 for_each_td(td, i) {
a2f77c9f
JA
827 int flags;
828
84585003
JA
829 /*
830 * ->io_ops is NULL for a thread that has closed its
831 * io engine
832 */
833 if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
834 cputhreads++;
835
a2f77c9f
JA
836 if (!td->pid || td->runstate == TD_REAPED)
837 continue;
838
839 flags = WNOHANG;
840 if (td->runstate == TD_EXITED)
841 flags = 0;
842
843 /*
844 * check if someone quit or got killed in an unusual way
845 */
846 ret = waitpid(td->pid, &status, flags);
50070b5a 847 if (ret < 0 && !td->use_thread) {
a2f77c9f
JA
848 if (errno == ECHILD) {
849 log_err("fio: pid=%d disappeared %d\n", td->pid, td->runstate);
850 td_set_runstate(td, TD_REAPED);
851 goto reaped;
852 }
853 perror("waitpid");
854 } else if (ret == td->pid) {
855 if (WIFSIGNALED(status)) {
fab6aa71
JA
856 int sig = WTERMSIG(status);
857
858 log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
859 td_set_runstate(td, TD_REAPED);
860 goto reaped;
861 }
a2f77c9f
JA
862 if (WIFEXITED(status)) {
863 if (WEXITSTATUS(status) && !td->error)
864 td->error = WEXITSTATUS(status);
865 if (td->use_thread) {
866 long ret;
867
868 if (pthread_join(td->thread, (void *) &ret))
869 perror("pthread_join");
433afcb4 870 }
a2f77c9f
JA
871 td_set_runstate(td, TD_REAPED);
872 goto reaped;
a6418147
JA
873 }
874 }
ebac4655 875
a2f77c9f
JA
876 /*
877 * thread is not dead, continue
878 */
879 continue;
fab6aa71 880reaped:
ebac4655
JA
881 (*nr_running)--;
882 (*m_rate) -= td->ratemin;
883 (*t_rate) -= td->rate;
a2f77c9f
JA
884
885 if (td->error)
886 exit_value++;
ebac4655 887 }
b990b5c0 888
4d2413c6 889 if (*nr_running == cputhreads && !pending)
6ce15a32 890 terminate_threads(TERMINATE_ALL, 0);
ebac4655
JA
891}
892
906c8d75
JA
893/*
894 * Main function for kicking off and reaping jobs, as needed.
895 */
ebac4655
JA
896static void run_threads(void)
897{
ebac4655
JA
898 struct thread_data *td;
899 unsigned long spent;
900 int i, todo, nr_running, m_rate, t_rate, nr_started;
fcb6ade2 901
2f9ade3c
JA
902 if (fio_pin_memory())
903 return;
ebac4655 904
c6ae0a5b
JA
905 if (!terse_output) {
906 printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
907 fflush(stdout);
908 }
c04f7ec3 909
4efa970e
JA
910 signal(SIGINT, sig_handler);
911 signal(SIGALRM, sig_handler);
912
ebac4655
JA
913 todo = thread_number;
914 nr_running = 0;
915 nr_started = 0;
916 m_rate = t_rate = 0;
917
34572e28 918 for_each_td(td, i) {
263e529f 919 print_status_init(td->thread_number - 1);
ebac4655 920
380cf265
JA
921 if (!td->create_serialize) {
922 init_disk_util(td);
ebac4655 923 continue;
380cf265 924 }
ebac4655
JA
925
926 /*
927 * do file setup here so it happens sequentially,
928 * we don't want X number of threads getting their
929 * client data interspersed on disk
930 */
53cdc686 931 if (setup_files(td)) {
5bf13a5a
JA
932 exit_value++;
933 if (td->error)
934 log_err("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
ebac4655
JA
935 td_set_runstate(td, TD_REAPED);
936 todo--;
937 }
380cf265
JA
938
939 init_disk_util(td);
ebac4655
JA
940 }
941
a2f77c9f
JA
942 set_genesis_time();
943
ebac4655 944 while (todo) {
75154845
JA
945 struct thread_data *map[MAX_JOBS];
946 struct timeval this_start;
947 int this_jobs = 0, left;
948
ebac4655
JA
949 /*
950 * create threads (TD_NOT_CREATED -> TD_CREATED)
951 */
34572e28 952 for_each_td(td, i) {
ebac4655
JA
953 if (td->runstate != TD_NOT_CREATED)
954 continue;
955
956 /*
957 * never got a chance to start, killed by other
958 * thread for some reason
959 */
960 if (td->terminate) {
961 todo--;
962 continue;
963 }
964
965 if (td->start_delay) {
263e529f 966 spent = mtime_since_genesis();
ebac4655
JA
967
968 if (td->start_delay * 1000 > spent)
969 continue;
970 }
971
972 if (td->stonewall && (nr_started || nr_running))
973 break;
974
75154845
JA
975 /*
976 * Set state to created. Thread will transition
977 * to TD_INITIALIZED when it's done setting up.
978 */
ebac4655 979 td_set_runstate(td, TD_CREATED);
75154845 980 map[this_jobs++] = td;
bbfd6b00 981 fio_sem_init(&startup_sem, 1);
ebac4655
JA
982 nr_started++;
983
984 if (td->use_thread) {
985 if (pthread_create(&td->thread, NULL, thread_main, td)) {
986 perror("thread_create");
987 nr_started--;
988 }
989 } else {
990 if (fork())
bbfd6b00 991 fio_sem_down(&startup_sem);
ebac4655 992 else {
a6418147
JA
993 int ret = fork_main(shm_id, i);
994
995 exit(ret);
ebac4655
JA
996 }
997 }
998 }
999
1000 /*
75154845
JA
1001 * Wait for the started threads to transition to
1002 * TD_INITIALIZED.
ebac4655 1003 */
02bcaa8c 1004 fio_gettime(&this_start, NULL);
75154845 1005 left = this_jobs;
6ce15a32 1006 while (left && !fio_abort) {
75154845
JA
1007 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
1008 break;
1009
1010 usleep(100000);
1011
1012 for (i = 0; i < this_jobs; i++) {
1013 td = map[i];
1014 if (!td)
1015 continue;
b6f4d880 1016 if (td->runstate == TD_INITIALIZED) {
75154845
JA
1017 map[i] = NULL;
1018 left--;
b6f4d880
JA
1019 } else if (td->runstate >= TD_EXITED) {
1020 map[i] = NULL;
1021 left--;
1022 todo--;
1023 nr_running++; /* work-around... */
75154845
JA
1024 }
1025 }
1026 }
1027
1028 if (left) {
3b70d7e5 1029 log_err("fio: %d jobs failed to start\n", left);
75154845
JA
1030 for (i = 0; i < this_jobs; i++) {
1031 td = map[i];
1032 if (!td)
1033 continue;
1034 kill(td->pid, SIGTERM);
1035 }
1036 break;
1037 }
1038
1039 /*
b6f4d880 1040 * start created threads (TD_INITIALIZED -> TD_RUNNING).
75154845 1041 */
34572e28 1042 for_each_td(td, i) {
75154845 1043 if (td->runstate != TD_INITIALIZED)
ebac4655
JA
1044 continue;
1045
1046 td_set_runstate(td, TD_RUNNING);
1047 nr_running++;
1048 nr_started--;
1049 m_rate += td->ratemin;
1050 t_rate += td->rate;
75154845 1051 todo--;
bbfd6b00 1052 fio_sem_up(&td->mutex);
ebac4655
JA
1053 }
1054
1055 reap_threads(&nr_running, &t_rate, &m_rate);
1056
1057 if (todo)
1058 usleep(100000);
1059 }
1060
1061 while (nr_running) {
1062 reap_threads(&nr_running, &t_rate, &m_rate);
1063 usleep(10000);
1064 }
1065
1066 update_io_ticks();
2f9ade3c 1067 fio_unpin_memory();
ebac4655
JA
1068}
1069
ebac4655
JA
1070int main(int argc, char *argv[])
1071{
29d610e1
JA
1072 long ps;
1073
dbe1125e
JA
1074 /*
1075 * We need locale for number printing, if it isn't set then just
1076 * go with the US format.
1077 */
1078 if (!getenv("LC_NUMERIC"))
1079 setlocale(LC_NUMERIC, "en_US");
1080
ebac4655
JA
1081 if (parse_options(argc, argv))
1082 return 1;
1083
1084 if (!thread_number) {
3b70d7e5 1085 log_err("Nothing to do\n");
ebac4655
JA
1086 return 1;
1087 }
1088
29d610e1
JA
1089 ps = sysconf(_SC_PAGESIZE);
1090 if (ps < 0) {
1091 log_err("Failed to get page size\n");
1092 return 1;
1093 }
1094
1095 page_mask = ps - 1;
1096
bb3884d8
JA
1097 if (write_bw_log) {
1098 setup_log(&agg_io_log[DDIR_READ]);
1099 setup_log(&agg_io_log[DDIR_WRITE]);
1100 }
1101
a2f77c9f
JA
1102 set_genesis_time();
1103
ebac4655
JA
1104 disk_util_timer_arm();
1105
1106 run_threads();
6ce15a32 1107
bb3884d8 1108 if (!fio_abort) {
6ce15a32 1109 show_run_stats();
bb3884d8
JA
1110 if (write_bw_log) {
1111 __finish_log(agg_io_log[DDIR_READ],"agg-read_bw.log");
1112 __finish_log(agg_io_log[DDIR_WRITE],"agg-write_bw.log");
1113 }
1114 }
ebac4655 1115
437c9b71 1116 return exit_value;
ebac4655 1117}