Improve error logging and handling
[fio.git] / fio.c
CommitLineData
ebac4655
JA
1/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
aae22ca7 5 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
ebac4655 6 *
8e9fe637
JA
7 * The license below covers all files distributed with fio unless otherwise
8 * noted in the file itself.
9 *
ebac4655 10 * This program is free software; you can redistribute it and/or modify
8e9fe637
JA
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
ebac4655
JA
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
ebac4655
JA
24#include <unistd.h>
25#include <fcntl.h>
26#include <string.h>
ebac4655
JA
27#include <signal.h>
28#include <time.h>
dbe1125e 29#include <locale.h>
36167d82 30#include <assert.h>
ebac4655
JA
31#include <sys/stat.h>
32#include <sys/wait.h>
33#include <sys/ipc.h>
34#include <sys/shm.h>
ebac4655
JA
35#include <sys/mman.h>
36
37#include "fio.h"
38#include "os.h"
39
29d610e1
JA
40static unsigned long page_mask;
41#define ALIGN(buf) \
42 (char *) (((unsigned long) (buf) + page_mask) & ~page_mask)
ebac4655
JA
43
44int groupid = 0;
45int thread_number = 0;
ebac4655 46int shm_id = 0;
53cdc686 47int temp_stall_ts;
ebac4655 48
bbfd6b00 49static volatile int startup_sem;
6ce15a32 50static volatile int fio_abort;
437c9b71 51static int exit_value;
ebac4655 52
bb3884d8
JA
53struct io_log *agg_io_log[2];
54
ebac4655 55#define TERMINATE_ALL (-1)
75154845 56#define JOB_START_TIMEOUT (5 * 1000)
ebac4655 57
6ce15a32
JA
58static inline void td_set_runstate(struct thread_data *td, int runstate)
59{
60 td->runstate = runstate;
61}
62
63static void terminate_threads(int group_id, int forced_kill)
ebac4655 64{
34572e28 65 struct thread_data *td;
ebac4655
JA
66 int i;
67
34572e28 68 for_each_td(td, i) {
ebac4655
JA
69 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
70 td->terminate = 1;
71 td->start_delay = 0;
6ce15a32
JA
72 if (forced_kill)
73 td_set_runstate(td, TD_EXITED);
ebac4655
JA
74 }
75 }
76}
77
78static void sig_handler(int sig)
79{
80 switch (sig) {
81 case SIGALRM:
82 update_io_ticks();
83 disk_util_timer_arm();
84 print_thread_status();
85 break;
86 default:
6ce15a32 87 printf("\nfio: terminating on signal %d\n", sig);
ebac4655 88 fflush(stdout);
6ce15a32 89 terminate_threads(TERMINATE_ALL, 0);
ebac4655
JA
90 break;
91 }
92}
93
906c8d75
JA
94/*
95 * Check if we are above the minimum rate given.
96 */
ebac4655
JA
97static int check_min_rate(struct thread_data *td, struct timeval *now)
98{
0904200b 99 unsigned long long bytes = 0;
ebac4655
JA
100 unsigned long spent;
101 unsigned long rate;
ebac4655 102
780bf1a1
JA
103 /*
104 * No minimum rate set, always ok
105 */
106 if (!td->ratemin)
107 return 0;
108
ebac4655
JA
109 /*
110 * allow a 2 second settle period in the beginning
111 */
112 if (mtime_since(&td->start, now) < 2000)
113 return 0;
114
0904200b
JA
115 if (td_read(td))
116 bytes += td->this_io_bytes[DDIR_READ];
117 if (td_write(td))
118 bytes += td->this_io_bytes[DDIR_WRITE];
119
ebac4655
JA
120 /*
121 * if rate blocks is set, sample is running
122 */
123 if (td->rate_bytes) {
124 spent = mtime_since(&td->lastrate, now);
125 if (spent < td->ratecycle)
126 return 0;
127
413dd459
JA
128 if (bytes < td->rate_bytes) {
129 fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin);
ebac4655 130 return 1;
413dd459
JA
131 } else {
132 rate = (bytes - td->rate_bytes) / spent;
133 if (rate < td->ratemin || bytes < td->rate_bytes) {
134 fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
135 return 1;
136 }
ebac4655
JA
137 }
138 }
139
0904200b 140 td->rate_bytes = bytes;
ebac4655
JA
141 memcpy(&td->lastrate, now, sizeof(*now));
142 return 0;
143}
144
145static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
146{
147 if (!td->timeout)
148 return 0;
149 if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
150 return 1;
151
152 return 0;
153}
154
906c8d75
JA
155/*
156 * When job exits, we can cancel the in-flight IO if we are using async
157 * io. Attempt to do so.
158 */
ebac4655
JA
159static void cleanup_pending_aio(struct thread_data *td)
160{
ebac4655 161 struct list_head *entry, *n;
ebac4655
JA
162 struct io_u *io_u;
163 int r;
164
165 /*
166 * get immediately available events, if any
167 */
d7762cf8 168 r = io_u_queued_complete(td, 0);
b2fdda43
JA
169 if (r < 0)
170 return;
ebac4655
JA
171
172 /*
173 * now cancel remaining active events
174 */
2866c82d 175 if (td->io_ops->cancel) {
ebac4655
JA
176 list_for_each_safe(entry, n, &td->io_u_busylist) {
177 io_u = list_entry(entry, struct io_u, list);
178
0c6e7517
JA
179 /*
180 * if the io_u isn't in flight, then that generally
181 * means someone leaked an io_u. complain but fix
182 * it up, so we don't stall here.
183 */
184 if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
185 log_err("fio: non-busy IO on busy list\n");
ebac4655 186 put_io_u(td, io_u);
0c6e7517
JA
187 } else {
188 r = td->io_ops->cancel(td, io_u);
189 if (!r)
190 put_io_u(td, io_u);
191 }
ebac4655
JA
192 }
193 }
194
97601024 195 if (td->cur_depth)
d7762cf8 196 r = io_u_queued_complete(td, td->cur_depth);
ebac4655
JA
197}
198
858a3d47
JA
199/*
200 * Helper to handle the final sync of a file. Works just like the normal
201 * io path, just does everything sync.
202 */
203static int fio_io_sync(struct thread_data *td, struct fio_file *f)
204{
205 struct io_u *io_u = __get_io_u(td);
858a3d47
JA
206 int ret;
207
208 if (!io_u)
209 return 1;
210
211 io_u->ddir = DDIR_SYNC;
212 io_u->file = f;
213
214 if (td_io_prep(td, io_u)) {
215 put_io_u(td, io_u);
216 return 1;
217 }
218
755200a3 219requeue:
858a3d47 220 ret = td_io_queue(td, io_u);
36167d82 221 if (ret < 0) {
e1161c32 222 td_verror(td, io_u->error, "td_io_queue");
858a3d47 223 put_io_u(td, io_u);
858a3d47 224 return 1;
36167d82 225 } else if (ret == FIO_Q_QUEUED) {
d7762cf8 226 if (io_u_queued_complete(td, 1) < 0)
36167d82 227 return 1;
36167d82
JA
228 } else if (ret == FIO_Q_COMPLETED) {
229 if (io_u->error) {
e1161c32 230 td_verror(td, io_u->error, "td_io_queue");
36167d82
JA
231 return 1;
232 }
858a3d47 233
d7762cf8 234 if (io_u_sync_complete(td, io_u) < 0)
b2fdda43 235 return 1;
755200a3
JA
236 } else if (ret == FIO_Q_BUSY) {
237 if (td_io_commit(td))
238 return 1;
239 goto requeue;
858a3d47
JA
240 }
241
242 return 0;
243}
244
906c8d75 245/*
34403fb1 246 * The main verify engine. Runs over the writes we previously submitted,
906c8d75
JA
247 * reads the blocks back in, and checks the crc/md5 of the data.
248 */
1e97cce9 249static void do_verify(struct thread_data *td)
ebac4655 250{
53cdc686 251 struct fio_file *f;
36167d82 252 struct io_u *io_u;
3af6ef39 253 int ret, i, min_events;
e5b401d4
JA
254
255 /*
256 * sync io first and invalidate cache, to make sure we really
257 * read from disk.
258 */
259 for_each_file(td, f, i) {
b2fdda43
JA
260 if (fio_io_sync(td, f))
261 break;
262 if (file_invalidate_cache(td, f))
263 break;
e5b401d4 264 }
ebac4655 265
b2fdda43
JA
266 if (td->error)
267 return;
268
ebac4655
JA
269 td_set_runstate(td, TD_VERIFYING);
270
36167d82
JA
271 io_u = NULL;
272 while (!td->terminate) {
5451792e
JA
273 int ret2;
274
ebac4655
JA
275 io_u = __get_io_u(td);
276 if (!io_u)
277 break;
278
069c2918
JA
279 if (runtime_exceeded(td, &io_u->start_time)) {
280 put_io_u(td, io_u);
02bcaa8c 281 break;
069c2918 282 }
02bcaa8c 283
069c2918
JA
284 if (get_next_verify(td, io_u)) {
285 put_io_u(td, io_u);
ebac4655 286 break;
069c2918 287 }
ebac4655 288
069c2918
JA
289 if (td_io_prep(td, io_u)) {
290 put_io_u(td, io_u);
53cdc686 291 break;
069c2918 292 }
d7762cf8
JA
293
294 io_u->end_io = verify_io_u;
36167d82
JA
295requeue:
296 ret = td_io_queue(td, io_u);
53cdc686 297
36167d82
JA
298 switch (ret) {
299 case FIO_Q_COMPLETED:
300 if (io_u->error)
22819ec2 301 ret = -io_u->error;
5451792e 302 else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
36167d82 303 int bytes = io_u->xfer_buflen - io_u->resid;
ebac4655 304
36167d82
JA
305 io_u->xfer_buflen = io_u->resid;
306 io_u->xfer_buf += bytes;
307 goto requeue;
308 }
d7762cf8 309 ret = io_u_sync_complete(td, io_u);
49db69aa 310 if (ret < 0)
36167d82 311 break;
36167d82
JA
312 continue;
313 case FIO_Q_QUEUED:
314 break;
755200a3
JA
315 case FIO_Q_BUSY:
316 requeue_io_u(td, &io_u);
5451792e
JA
317 ret2 = td_io_commit(td);
318 if (ret2 < 0)
319 ret = ret2;
755200a3 320 break;
36167d82
JA
321 default:
322 assert(ret < 0);
e1161c32 323 td_verror(td, -ret, "td_io_queue");
ebac4655
JA
324 break;
325 }
326
99784632 327 if (ret < 0 || td->error)
3af6ef39
JA
328 break;
329
ebac4655 330 /*
3af6ef39
JA
331 * if we can queue more, do so. but check if there are
332 * completed io_u's first.
ebac4655 333 */
97601024 334 min_events = 0;
e916b390 335 if (queue_full(td) || ret == FIO_Q_BUSY) {
3af6ef39 336 min_events = 1;
3af6ef39 337
e916b390
JA
338 if (td->cur_depth > td->iodepth_low)
339 min_events = td->cur_depth - td->iodepth_low;
340 }
341
3af6ef39
JA
342 /*
343 * Reap required number of io units, if any, and do the
344 * verification on them through the callback handler
345 */
d7762cf8 346 if (io_u_queued_complete(td, min_events) < 0)
ebac4655 347 break;
36167d82 348 }
ebac4655 349
c01c0395
JA
350 if (!td->error) {
351 min_events = td->cur_depth;
352
353 if (min_events)
354 ret = io_u_queued_complete(td, min_events);
355 } else
ebac4655
JA
356 cleanup_pending_aio(td);
357
358 td_set_runstate(td, TD_RUNNING);
359}
360
b990b5c0
JA
361/*
362 * Not really an io thread, all it does is burn CPU cycles in the specified
363 * manner.
364 */
365static void do_cpuio(struct thread_data *td)
366{
367 struct timeval e;
368 int split = 100 / td->cpuload;
369 int i = 0;
370
371 while (!td->terminate) {
02bcaa8c 372 fio_gettime(&e, NULL);
b990b5c0
JA
373
374 if (runtime_exceeded(td, &e))
375 break;
376
377 if (!(i % split))
378 __usec_sleep(10000);
379 else
380 usec_sleep(td, 10000);
381
382 i++;
383 }
384}
385
32cd46a0 386/*
906c8d75 387 * Main IO worker function. It retrieves io_u's to process and queues
32cd46a0
JA
388 * and reaps them, checking for rate and errors along the way.
389 */
ebac4655
JA
390static void do_io(struct thread_data *td)
391{
02bcaa8c 392 struct timeval s;
ebac4655 393 unsigned long usec;
84585003 394 int i, ret = 0;
ebac4655 395
5853e5a8
JA
396 td_set_runstate(td, TD_RUNNING);
397
3951414d 398 while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) {
97601024
JA
399 struct timeval comp_time;
400 long bytes_done = 0;
84585003 401 int min_evts = 0;
ebac4655 402 struct io_u *io_u;
5451792e 403 int ret2;
ebac4655
JA
404
405 if (td->terminate)
406 break;
407
3d7c391d 408 io_u = get_io_u(td);
ebac4655
JA
409 if (!io_u)
410 break;
411
412 memcpy(&s, &io_u->start_time, sizeof(s));
97601024
JA
413
414 if (runtime_exceeded(td, &s)) {
415 put_io_u(td, io_u);
416 break;
417 }
cec6b55d 418requeue:
45bee283 419 ret = td_io_queue(td, io_u);
36167d82
JA
420
421 switch (ret) {
422 case FIO_Q_COMPLETED:
5451792e
JA
423 if (io_u->error)
424 ret = -io_u->error;
425 else if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
36167d82
JA
426 int bytes = io_u->xfer_buflen - io_u->resid;
427
cec6b55d 428 io_u->xfer_buflen = io_u->resid;
36167d82 429 io_u->xfer_buf += bytes;
cec6b55d 430 goto requeue;
cec6b55d 431 }
97601024 432 fio_gettime(&comp_time, NULL);
d7762cf8 433 bytes_done = io_u_sync_complete(td, io_u);
99784632
JA
434 if (bytes_done < 0)
435 ret = bytes_done;
36167d82
JA
436 break;
437 case FIO_Q_QUEUED:
7e77dd02
JA
438 /*
439 * if the engine doesn't have a commit hook,
440 * the io_u is really queued. if it does have such
441 * a hook, it has to call io_u_queued() itself.
442 */
443 if (td->io_ops->commit == NULL)
444 io_u_queued(td, io_u);
36167d82 445 break;
755200a3
JA
446 case FIO_Q_BUSY:
447 requeue_io_u(td, &io_u);
5451792e
JA
448 ret2 = td_io_commit(td);
449 if (ret2 < 0)
450 ret = ret2;
755200a3 451 break;
36167d82
JA
452 default:
453 assert(ret < 0);
454 put_io_u(td, io_u);
455 break;
ebac4655
JA
456 }
457
99784632 458 if (ret < 0 || td->error)
36167d82
JA
459 break;
460
97601024
JA
461 /*
462 * See if we need to complete some commands
463 */
755200a3 464 if (ret == FIO_Q_QUEUED || ret == FIO_Q_BUSY) {
97601024 465 min_evts = 0;
e916b390 466 if (queue_full(td) || ret == FIO_Q_BUSY) {
36167d82 467 min_evts = 1;
ebac4655 468
e916b390
JA
469 if (td->cur_depth > td->iodepth_low)
470 min_evts = td->cur_depth - td->iodepth_low;
471 }
472
97601024 473 fio_gettime(&comp_time, NULL);
d7762cf8 474 bytes_done = io_u_queued_complete(td, min_evts);
97601024 475 if (bytes_done < 0)
36167d82 476 break;
ebac4655
JA
477 }
478
97601024
JA
479 if (!bytes_done)
480 continue;
481
ebac4655
JA
482 /*
483 * the rate is batched for now, it should work for batches
484 * of completions except the very first one which may look
485 * a little bursty
486 */
97601024 487 usec = utime_since(&s, &comp_time);
ebac4655 488
413dd459 489 rate_throttle(td, usec, bytes_done);
ebac4655 490
97601024 491 if (check_min_rate(td, &comp_time)) {
98aa62d8 492 if (exitall_on_terminate)
6ce15a32 493 terminate_threads(td->groupid, 0);
e1161c32 494 td_verror(td, ENODATA, "check_min_rate");
ebac4655
JA
495 break;
496 }
497
9c1f7434
JA
498 if (td->thinktime) {
499 unsigned long long b;
500
501 b = td->io_blocks[0] + td->io_blocks[1];
48097d5c
JA
502 if (!(b % td->thinktime_blocks)) {
503 int left;
504
505 if (td->thinktime_spin)
506 __usec_sleep(td->thinktime_spin);
507
508 left = td->thinktime - td->thinktime_spin;
509 if (left)
510 usec_sleep(td, left);
511 }
9c1f7434 512 }
ebac4655
JA
513 }
514
4d2413c6 515 if (!td->error) {
3d7c391d
JA
516 struct fio_file *f;
517
c01c0395
JA
518 i = td->cur_depth;
519 if (i)
520 ret = io_u_queued_complete(td, i);
ebac4655 521
84585003
JA
522 if (should_fsync(td) && td->end_fsync) {
523 td_set_runstate(td, TD_FSYNCING);
524 for_each_file(td, f, i)
858a3d47 525 fio_io_sync(td, f);
84585003 526 }
c01c0395
JA
527 } else
528 cleanup_pending_aio(td);
ebac4655
JA
529}
530
ebac4655
JA
531static void cleanup_io_u(struct thread_data *td)
532{
533 struct list_head *entry, *n;
534 struct io_u *io_u;
535
536 list_for_each_safe(entry, n, &td->io_u_freelist) {
537 io_u = list_entry(entry, struct io_u, list);
538
539 list_del(&io_u->list);
540 free(io_u);
541 }
542
2f9ade3c 543 free_io_mem(td);
ebac4655
JA
544}
545
6b9cea23
JA
546/*
547 * "randomly" fill the buffer contents
548 */
66eeb296 549static void fill_rand_buf(struct io_u *io_u, int max_bs)
6b9cea23 550{
66eeb296 551 int *ptr = io_u->buf;
6b9cea23
JA
552
553 while ((void *) ptr - io_u->buf < max_bs) {
554 *ptr = rand() * 0x9e370001;
555 ptr++;
556 }
557}
558
ebac4655
JA
559static int init_io_u(struct thread_data *td)
560{
561 struct io_u *io_u;
a00735e6 562 unsigned int max_bs;
ebac4655
JA
563 int i, max_units;
564 char *p;
565
2866c82d 566 if (td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
567 return 0;
568
2866c82d 569 if (td->io_ops->flags & FIO_SYNCIO)
ebac4655
JA
570 max_units = 1;
571 else
572 max_units = td->iodepth;
573
a00735e6 574 max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]);
74b025b0
JA
575 td->orig_buffer_size = max_bs * max_units;
576
d0bdaf49 577 if (td->mem_type == MEM_SHMHUGE || td->mem_type == MEM_MMAPHUGE)
56bb17f2 578 td->orig_buffer_size = (td->orig_buffer_size + td->hugepage_size - 1) & ~(td->hugepage_size - 1);
74b025b0 579 else
29d610e1 580 td->orig_buffer_size += page_mask;
ebac4655 581
2f9ade3c
JA
582 if (allocate_io_mem(td))
583 return 1;
ebac4655 584
ebac4655
JA
585 p = ALIGN(td->orig_buffer);
586 for (i = 0; i < max_units; i++) {
587 io_u = malloc(sizeof(*io_u));
588 memset(io_u, 0, sizeof(*io_u));
589 INIT_LIST_HEAD(&io_u->list);
590
a00735e6 591 io_u->buf = p + max_bs * i;
6b9cea23 592 if (td_write(td) || td_rw(td))
a00735e6 593 fill_rand_buf(io_u, max_bs);
6b9cea23 594
b1ff3403 595 io_u->index = i;
0c6e7517 596 io_u->flags = IO_U_F_FREE;
ebac4655
JA
597 list_add(&io_u->list, &td->io_u_freelist);
598 }
599
433afcb4
JA
600 io_u_init_timeout();
601
ebac4655
JA
602 return 0;
603}
604
da86774e
JA
605static int switch_ioscheduler(struct thread_data *td)
606{
607 char tmp[256], tmp2[128];
608 FILE *f;
609 int ret;
610
f48b467c
JA
611 if (td->io_ops->flags & FIO_CPUIO)
612 return 0;
613
da86774e
JA
614 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
615
616 f = fopen(tmp, "r+");
617 if (!f) {
e1161c32 618 td_verror(td, errno, "fopen");
da86774e
JA
619 return 1;
620 }
621
622 /*
623 * Set io scheduler.
624 */
625 ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
626 if (ferror(f) || ret != 1) {
e1161c32 627 td_verror(td, errno, "fwrite");
da86774e
JA
628 fclose(f);
629 return 1;
630 }
631
632 rewind(f);
633
634 /*
635 * Read back and check that the selected scheduler is now the default.
636 */
637 ret = fread(tmp, 1, sizeof(tmp), f);
638 if (ferror(f) || ret < 0) {
e1161c32 639 td_verror(td, errno, "fread");
da86774e
JA
640 fclose(f);
641 return 1;
642 }
643
644 sprintf(tmp2, "[%s]", td->ioscheduler);
645 if (!strstr(tmp, tmp2)) {
3b70d7e5 646 log_err("fio: io scheduler %s not found\n", td->ioscheduler);
e1161c32 647 td_verror(td, EINVAL, "iosched_switch");
da86774e
JA
648 fclose(f);
649 return 1;
650 }
651
652 fclose(f);
653 return 0;
654}
655
ebac4655
JA
656static void clear_io_state(struct thread_data *td)
657{
53cdc686
JA
658 struct fio_file *f;
659 int i;
ebac4655 660
079ad09b 661 td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
ebac4655 662 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
20dc95c4 663 td->zone_bytes = 0;
ebac4655 664
c1324df1
JA
665 td->last_was_sync = 0;
666
53cdc686 667 for_each_file(td, f, i) {
c1324df1
JA
668 f->last_completed_pos = 0;
669
53cdc686
JA
670 f->last_pos = 0;
671 if (td->io_ops->flags & FIO_SYNCIO)
672 lseek(f->fd, SEEK_SET, 0);
673
674 if (f->file_map)
675 memset(f->file_map, 0, f->num_maps * sizeof(long));
676 }
ebac4655
JA
677}
678
906c8d75
JA
679/*
680 * Entry point for the thread based jobs. The process based jobs end up
681 * here as well, after a little setup.
682 */
ebac4655
JA
683static void *thread_main(void *data)
684{
69008999 685 unsigned long long runtime[2];
ebac4655 686 struct thread_data *td = data;
ebac4655
JA
687
688 if (!td->use_thread)
689 setsid();
690
691 td->pid = getpid();
692
aea47d44
JA
693 INIT_LIST_HEAD(&td->io_u_freelist);
694 INIT_LIST_HEAD(&td->io_u_busylist);
755200a3 695 INIT_LIST_HEAD(&td->io_u_requeues);
aea47d44
JA
696 INIT_LIST_HEAD(&td->io_hist_list);
697 INIT_LIST_HEAD(&td->io_log_list);
698
ebac4655
JA
699 if (init_io_u(td))
700 goto err;
701
702 if (fio_setaffinity(td) == -1) {
e1161c32 703 td_verror(td, errno, "cpu_set_affinity");
ebac4655
JA
704 goto err;
705 }
706
aea47d44
JA
707 if (init_iolog(td))
708 goto err;
709
ebac4655
JA
710 if (td->ioprio) {
711 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
e1161c32 712 td_verror(td, errno, "ioprio_set");
ebac4655
JA
713 goto err;
714 }
715 }
716
1056eaad 717 if (nice(td->nice) == -1) {
e1161c32 718 td_verror(td, errno, "nice");
b6f4d880
JA
719 goto err;
720 }
721
75154845
JA
722 if (init_random_state(td))
723 goto err;
724
56f9498d
JA
725 if (td->ioscheduler && switch_ioscheduler(td))
726 goto err;
da86774e 727
75154845 728 td_set_runstate(td, TD_INITIALIZED);
bbfd6b00
JA
729 fio_sem_up(&startup_sem);
730 fio_sem_down(&td->mutex);
ebac4655 731
53cdc686 732 if (!td->create_serialize && setup_files(td))
ebac4655 733 goto err;
21972cde
JA
734 if (open_files(td))
735 goto err;
ebac4655 736
7d6c5283
JA
737 /*
738 * Do this late, as some IO engines would like to have the
739 * files setup prior to initializing structures.
740 */
741 if (td_io_init(td))
742 goto err;
743
69cfd7e0
JA
744 if (td->exec_prerun) {
745 if (system(td->exec_prerun) < 0)
746 goto err;
747 }
4e0ba8af 748
69008999 749 fio_gettime(&td->epoch, NULL);
433afcb4 750 memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch));
079ad09b 751 getrusage(RUSAGE_SELF, &td->ts.ru_start);
69008999
JA
752
753 runtime[0] = runtime[1] = 0;
ebac4655 754 while (td->loops--) {
02bcaa8c 755 fio_gettime(&td->start, NULL);
079ad09b 756 memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
ebac4655
JA
757
758 if (td->ratemin)
079ad09b 759 memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate));
ebac4655
JA
760
761 clear_io_state(td);
762 prune_io_piece_log(td);
763
2866c82d 764 if (td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
765 do_cpuio(td);
766 else
767 do_io(td);
ebac4655 768
413dd459
JA
769 if (td_read(td) && td->io_bytes[DDIR_READ])
770 runtime[DDIR_READ] += utime_since_now(&td->start);
771 if (td_write(td) && td->io_bytes[DDIR_WRITE])
772 runtime[DDIR_WRITE] += utime_since_now(&td->start);
773
ebac4655
JA
774 if (td->error || td->terminate)
775 break;
776
777 if (td->verify == VERIFY_NONE)
778 continue;
779
780 clear_io_state(td);
02bcaa8c 781 fio_gettime(&td->start, NULL);
ebac4655
JA
782
783 do_verify(td);
784
69008999 785 runtime[DDIR_READ] += utime_since_now(&td->start);
ebac4655
JA
786
787 if (td->error || td->terminate)
788 break;
789 }
790
36dff966 791 update_rusage_stat(td);
69008999
JA
792 fio_gettime(&td->end_time, NULL);
793 td->runtime[0] = runtime[0] / 1000;
794 td->runtime[1] = runtime[1] / 1000;
795
079ad09b
JA
796 if (td->ts.bw_log)
797 finish_log(td, td->ts.bw_log, "bw");
798 if (td->ts.slat_log)
799 finish_log(td, td->ts.slat_log, "slat");
800 if (td->ts.clat_log)
801 finish_log(td, td->ts.clat_log, "clat");
076efc7c 802 if (td->write_iolog_file)
843a7413 803 write_iolog_close(td);
69cfd7e0
JA
804 if (td->exec_postrun) {
805 if (system(td->exec_postrun) < 0)
806 log_err("fio: postrun %s failed\n", td->exec_postrun);
807 }
ebac4655
JA
808
809 if (exitall_on_terminate)
6ce15a32 810 terminate_threads(td->groupid, 0);
ebac4655
JA
811
812err:
5bf13a5a
JA
813 if (td->error)
814 printf("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
53cdc686 815 close_files(td);
2866c82d 816 close_ioengine(td);
ebac4655 817 cleanup_io_u(td);
ebac4655 818 td_set_runstate(td, TD_EXITED);
43d76807 819 return (void *) (unsigned long) td->error;
ebac4655
JA
820}
821
906c8d75
JA
822/*
823 * We cannot pass the td data into a forked process, so attach the td and
824 * pass it to the thread worker.
825 */
a6418147 826static int fork_main(int shmid, int offset)
ebac4655
JA
827{
828 struct thread_data *td;
a6418147 829 void *data, *ret;
ebac4655
JA
830
831 data = shmat(shmid, NULL, 0);
832 if (data == (void *) -1) {
a6418147
JA
833 int __err = errno;
834
ebac4655 835 perror("shmat");
a6418147 836 return __err;
ebac4655
JA
837 }
838
839 td = data + offset * sizeof(struct thread_data);
a6418147 840 ret = thread_main(td);
ebac4655 841 shmdt(data);
43d76807 842 return (int) (unsigned long) ret;
ebac4655
JA
843}
844
906c8d75
JA
845/*
846 * Run over the job map and reap the threads that have exited, if any.
847 */
ebac4655
JA
848static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
849{
34572e28 850 struct thread_data *td;
fab6aa71 851 int i, cputhreads, pending, status, ret;
ebac4655
JA
852
853 /*
854 * reap exited threads (TD_EXITED -> TD_REAPED)
855 */
4d2413c6 856 pending = cputhreads = 0;
34572e28 857 for_each_td(td, i) {
3707f45b 858 int flags = 0;
a2f77c9f 859
84585003
JA
860 /*
861 * ->io_ops is NULL for a thread that has closed its
862 * io engine
863 */
864 if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
865 cputhreads++;
866
a2f77c9f
JA
867 if (!td->pid || td->runstate == TD_REAPED)
868 continue;
3707f45b
JA
869 if (td->use_thread) {
870 if (td->runstate == TD_EXITED) {
871 td_set_runstate(td, TD_REAPED);
872 goto reaped;
873 }
874 continue;
875 }
a2f77c9f
JA
876
877 flags = WNOHANG;
878 if (td->runstate == TD_EXITED)
879 flags = 0;
880
881 /*
882 * check if someone quit or got killed in an unusual way
883 */
884 ret = waitpid(td->pid, &status, flags);
3707f45b 885 if (ret < 0) {
a2f77c9f
JA
886 if (errno == ECHILD) {
887 log_err("fio: pid=%d disappeared %d\n", td->pid, td->runstate);
888 td_set_runstate(td, TD_REAPED);
889 goto reaped;
890 }
891 perror("waitpid");
892 } else if (ret == td->pid) {
893 if (WIFSIGNALED(status)) {
fab6aa71
JA
894 int sig = WTERMSIG(status);
895
896 log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
897 td_set_runstate(td, TD_REAPED);
898 goto reaped;
899 }
a2f77c9f
JA
900 if (WIFEXITED(status)) {
901 if (WEXITSTATUS(status) && !td->error)
902 td->error = WEXITSTATUS(status);
a2f77c9f 903
a2f77c9f
JA
904 td_set_runstate(td, TD_REAPED);
905 goto reaped;
a6418147
JA
906 }
907 }
ebac4655 908
a2f77c9f
JA
909 /*
910 * thread is not dead, continue
911 */
912 continue;
fab6aa71 913reaped:
3707f45b
JA
914 if (td->use_thread) {
915 long ret;
916
917 if (pthread_join(td->thread, (void *) &ret))
918 perror("pthread_join");
919 }
920
ebac4655
JA
921 (*nr_running)--;
922 (*m_rate) -= td->ratemin;
923 (*t_rate) -= td->rate;
a2f77c9f
JA
924
925 if (td->error)
926 exit_value++;
ebac4655 927 }
b990b5c0 928
4d2413c6 929 if (*nr_running == cputhreads && !pending)
6ce15a32 930 terminate_threads(TERMINATE_ALL, 0);
ebac4655
JA
931}
932
906c8d75
JA
933/*
934 * Main function for kicking off and reaping jobs, as needed.
935 */
ebac4655
JA
936static void run_threads(void)
937{
ebac4655
JA
938 struct thread_data *td;
939 unsigned long spent;
940 int i, todo, nr_running, m_rate, t_rate, nr_started;
fcb6ade2 941
2f9ade3c
JA
942 if (fio_pin_memory())
943 return;
ebac4655 944
c6ae0a5b
JA
945 if (!terse_output) {
946 printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
947 fflush(stdout);
948 }
c04f7ec3 949
4efa970e
JA
950 signal(SIGINT, sig_handler);
951 signal(SIGALRM, sig_handler);
952
ebac4655
JA
953 todo = thread_number;
954 nr_running = 0;
955 nr_started = 0;
956 m_rate = t_rate = 0;
957
34572e28 958 for_each_td(td, i) {
263e529f 959 print_status_init(td->thread_number - 1);
ebac4655 960
380cf265
JA
961 if (!td->create_serialize) {
962 init_disk_util(td);
ebac4655 963 continue;
380cf265 964 }
ebac4655
JA
965
966 /*
967 * do file setup here so it happens sequentially,
968 * we don't want X number of threads getting their
969 * client data interspersed on disk
970 */
53cdc686 971 if (setup_files(td)) {
5bf13a5a
JA
972 exit_value++;
973 if (td->error)
974 log_err("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
ebac4655
JA
975 td_set_runstate(td, TD_REAPED);
976 todo--;
977 }
380cf265
JA
978
979 init_disk_util(td);
ebac4655
JA
980 }
981
a2f77c9f
JA
982 set_genesis_time();
983
ebac4655 984 while (todo) {
75154845
JA
985 struct thread_data *map[MAX_JOBS];
986 struct timeval this_start;
987 int this_jobs = 0, left;
988
ebac4655
JA
989 /*
990 * create threads (TD_NOT_CREATED -> TD_CREATED)
991 */
34572e28 992 for_each_td(td, i) {
ebac4655
JA
993 if (td->runstate != TD_NOT_CREATED)
994 continue;
995
996 /*
997 * never got a chance to start, killed by other
998 * thread for some reason
999 */
1000 if (td->terminate) {
1001 todo--;
1002 continue;
1003 }
1004
1005 if (td->start_delay) {
263e529f 1006 spent = mtime_since_genesis();
ebac4655
JA
1007
1008 if (td->start_delay * 1000 > spent)
1009 continue;
1010 }
1011
1012 if (td->stonewall && (nr_started || nr_running))
1013 break;
1014
75154845
JA
1015 /*
1016 * Set state to created. Thread will transition
1017 * to TD_INITIALIZED when it's done setting up.
1018 */
ebac4655 1019 td_set_runstate(td, TD_CREATED);
75154845 1020 map[this_jobs++] = td;
bbfd6b00 1021 fio_sem_init(&startup_sem, 1);
ebac4655
JA
1022 nr_started++;
1023
1024 if (td->use_thread) {
1025 if (pthread_create(&td->thread, NULL, thread_main, td)) {
1026 perror("thread_create");
1027 nr_started--;
1028 }
1029 } else {
1030 if (fork())
bbfd6b00 1031 fio_sem_down(&startup_sem);
ebac4655 1032 else {
a6418147
JA
1033 int ret = fork_main(shm_id, i);
1034
1035 exit(ret);
ebac4655
JA
1036 }
1037 }
1038 }
1039
1040 /*
75154845
JA
1041 * Wait for the started threads to transition to
1042 * TD_INITIALIZED.
ebac4655 1043 */
02bcaa8c 1044 fio_gettime(&this_start, NULL);
75154845 1045 left = this_jobs;
6ce15a32 1046 while (left && !fio_abort) {
75154845
JA
1047 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
1048 break;
1049
1050 usleep(100000);
1051
1052 for (i = 0; i < this_jobs; i++) {
1053 td = map[i];
1054 if (!td)
1055 continue;
b6f4d880 1056 if (td->runstate == TD_INITIALIZED) {
75154845
JA
1057 map[i] = NULL;
1058 left--;
b6f4d880
JA
1059 } else if (td->runstate >= TD_EXITED) {
1060 map[i] = NULL;
1061 left--;
1062 todo--;
1063 nr_running++; /* work-around... */
75154845
JA
1064 }
1065 }
1066 }
1067
1068 if (left) {
3b70d7e5 1069 log_err("fio: %d jobs failed to start\n", left);
75154845
JA
1070 for (i = 0; i < this_jobs; i++) {
1071 td = map[i];
1072 if (!td)
1073 continue;
1074 kill(td->pid, SIGTERM);
1075 }
1076 break;
1077 }
1078
1079 /*
b6f4d880 1080 * start created threads (TD_INITIALIZED -> TD_RUNNING).
75154845 1081 */
34572e28 1082 for_each_td(td, i) {
75154845 1083 if (td->runstate != TD_INITIALIZED)
ebac4655
JA
1084 continue;
1085
1086 td_set_runstate(td, TD_RUNNING);
1087 nr_running++;
1088 nr_started--;
1089 m_rate += td->ratemin;
1090 t_rate += td->rate;
75154845 1091 todo--;
bbfd6b00 1092 fio_sem_up(&td->mutex);
ebac4655
JA
1093 }
1094
1095 reap_threads(&nr_running, &t_rate, &m_rate);
1096
1097 if (todo)
1098 usleep(100000);
1099 }
1100
1101 while (nr_running) {
1102 reap_threads(&nr_running, &t_rate, &m_rate);
1103 usleep(10000);
1104 }
1105
1106 update_io_ticks();
2f9ade3c 1107 fio_unpin_memory();
ebac4655
JA
1108}
1109
ebac4655
JA
1110int main(int argc, char *argv[])
1111{
29d610e1
JA
1112 long ps;
1113
dbe1125e
JA
1114 /*
1115 * We need locale for number printing, if it isn't set then just
1116 * go with the US format.
1117 */
1118 if (!getenv("LC_NUMERIC"))
1119 setlocale(LC_NUMERIC, "en_US");
1120
ebac4655
JA
1121 if (parse_options(argc, argv))
1122 return 1;
1123
1124 if (!thread_number) {
3b70d7e5 1125 log_err("Nothing to do\n");
ebac4655
JA
1126 return 1;
1127 }
1128
29d610e1
JA
1129 ps = sysconf(_SC_PAGESIZE);
1130 if (ps < 0) {
1131 log_err("Failed to get page size\n");
1132 return 1;
1133 }
1134
1135 page_mask = ps - 1;
1136
bb3884d8
JA
1137 if (write_bw_log) {
1138 setup_log(&agg_io_log[DDIR_READ]);
1139 setup_log(&agg_io_log[DDIR_WRITE]);
1140 }
1141
a2f77c9f
JA
1142 set_genesis_time();
1143
ebac4655
JA
1144 disk_util_timer_arm();
1145
1146 run_threads();
6ce15a32 1147
bb3884d8 1148 if (!fio_abort) {
6ce15a32 1149 show_run_stats();
bb3884d8
JA
1150 if (write_bw_log) {
1151 __finish_log(agg_io_log[DDIR_READ],"agg-read_bw.log");
1152 __finish_log(agg_io_log[DDIR_WRITE],"agg-write_bw.log");
1153 }
1154 }
ebac4655 1155
437c9b71 1156 return exit_value;
ebac4655 1157}