Fix double io_u free on error in do_verify()
[fio.git] / fio.c
CommitLineData
ebac4655
JA
1/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
aae22ca7 5 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
ebac4655 6 *
8e9fe637
JA
7 * The license below covers all files distributed with fio unless otherwise
8 * noted in the file itself.
9 *
ebac4655 10 * This program is free software; you can redistribute it and/or modify
8e9fe637
JA
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
ebac4655
JA
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
ebac4655
JA
24#include <unistd.h>
25#include <fcntl.h>
26#include <string.h>
ebac4655
JA
27#include <signal.h>
28#include <time.h>
dbe1125e 29#include <locale.h>
36167d82 30#include <assert.h>
ebac4655
JA
31#include <sys/stat.h>
32#include <sys/wait.h>
33#include <sys/ipc.h>
34#include <sys/shm.h>
ebac4655
JA
35#include <sys/mman.h>
36
37#include "fio.h"
38#include "os.h"
39
29d610e1
JA
40static unsigned long page_mask;
41#define ALIGN(buf) \
42 (char *) (((unsigned long) (buf) + page_mask) & ~page_mask)
ebac4655
JA
43
44int groupid = 0;
45int thread_number = 0;
ebac4655 46int shm_id = 0;
53cdc686 47int temp_stall_ts;
ebac4655 48
bbfd6b00 49static volatile int startup_sem;
6ce15a32 50static volatile int fio_abort;
437c9b71 51static int exit_value;
ebac4655 52
bb3884d8
JA
53struct io_log *agg_io_log[2];
54
ebac4655 55#define TERMINATE_ALL (-1)
75154845 56#define JOB_START_TIMEOUT (5 * 1000)
ebac4655 57
6ce15a32
JA
58static inline void td_set_runstate(struct thread_data *td, int runstate)
59{
60 td->runstate = runstate;
61}
62
63static void terminate_threads(int group_id, int forced_kill)
ebac4655 64{
34572e28 65 struct thread_data *td;
ebac4655
JA
66 int i;
67
34572e28 68 for_each_td(td, i) {
ebac4655
JA
69 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
70 td->terminate = 1;
71 td->start_delay = 0;
6ce15a32
JA
72 if (forced_kill)
73 td_set_runstate(td, TD_EXITED);
ebac4655
JA
74 }
75 }
76}
77
78static void sig_handler(int sig)
79{
80 switch (sig) {
81 case SIGALRM:
82 update_io_ticks();
83 disk_util_timer_arm();
84 print_thread_status();
85 break;
86 default:
6ce15a32 87 printf("\nfio: terminating on signal %d\n", sig);
ebac4655 88 fflush(stdout);
6ce15a32 89 terminate_threads(TERMINATE_ALL, 0);
ebac4655
JA
90 break;
91 }
92}
93
906c8d75
JA
94/*
95 * Check if we are above the minimum rate given.
96 */
ebac4655
JA
97static int check_min_rate(struct thread_data *td, struct timeval *now)
98{
99 unsigned long spent;
100 unsigned long rate;
101 int ddir = td->ddir;
102
103 /*
104 * allow a 2 second settle period in the beginning
105 */
106 if (mtime_since(&td->start, now) < 2000)
107 return 0;
108
109 /*
110 * if rate blocks is set, sample is running
111 */
112 if (td->rate_bytes) {
113 spent = mtime_since(&td->lastrate, now);
114 if (spent < td->ratecycle)
115 return 0;
116
117 rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
118 if (rate < td->ratemin) {
1e97cce9 119 fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
ebac4655
JA
120 return 1;
121 }
122 }
123
124 td->rate_bytes = td->this_io_bytes[ddir];
125 memcpy(&td->lastrate, now, sizeof(*now));
126 return 0;
127}
128
129static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
130{
131 if (!td->timeout)
132 return 0;
133 if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
134 return 1;
135
136 return 0;
137}
138
906c8d75
JA
139/*
140 * When job exits, we can cancel the in-flight IO if we are using async
141 * io. Attempt to do so.
142 */
ebac4655
JA
143static void cleanup_pending_aio(struct thread_data *td)
144{
ebac4655 145 struct list_head *entry, *n;
ebac4655
JA
146 struct io_u *io_u;
147 int r;
148
149 /*
150 * get immediately available events, if any
151 */
b2fdda43
JA
152 r = io_u_queued_complete(td, 0, NULL);
153 if (r < 0)
154 return;
ebac4655
JA
155
156 /*
157 * now cancel remaining active events
158 */
2866c82d 159 if (td->io_ops->cancel) {
ebac4655
JA
160 list_for_each_safe(entry, n, &td->io_u_busylist) {
161 io_u = list_entry(entry, struct io_u, list);
162
2866c82d 163 r = td->io_ops->cancel(td, io_u);
ebac4655
JA
164 if (!r)
165 put_io_u(td, io_u);
166 }
167 }
168
97601024 169 if (td->cur_depth)
b2fdda43 170 r = io_u_queued_complete(td, td->cur_depth, NULL);
ebac4655
JA
171}
172
858a3d47
JA
173/*
174 * Helper to handle the final sync of a file. Works just like the normal
175 * io path, just does everything sync.
176 */
177static int fio_io_sync(struct thread_data *td, struct fio_file *f)
178{
179 struct io_u *io_u = __get_io_u(td);
858a3d47
JA
180 int ret;
181
182 if (!io_u)
183 return 1;
184
185 io_u->ddir = DDIR_SYNC;
186 io_u->file = f;
187
188 if (td_io_prep(td, io_u)) {
189 put_io_u(td, io_u);
190 return 1;
191 }
192
755200a3 193requeue:
858a3d47 194 ret = td_io_queue(td, io_u);
36167d82 195 if (ret < 0) {
353a7e0e 196 td_verror(td, io_u->error);
858a3d47 197 put_io_u(td, io_u);
858a3d47 198 return 1;
36167d82 199 } else if (ret == FIO_Q_QUEUED) {
97601024 200 if (io_u_queued_complete(td, 1, NULL))
36167d82 201 return 1;
36167d82
JA
202 } else if (ret == FIO_Q_COMPLETED) {
203 if (io_u->error) {
204 td_verror(td, io_u->error);
205 return 1;
206 }
858a3d47 207
b2fdda43
JA
208 if (io_u_sync_complete(td, io_u, NULL) < 0)
209 return 1;
755200a3
JA
210 } else if (ret == FIO_Q_BUSY) {
211 if (td_io_commit(td))
212 return 1;
213 goto requeue;
858a3d47
JA
214 }
215
216 return 0;
217}
218
906c8d75
JA
219/*
220 * The main verify engine. Runs over the writes we previusly submitted,
221 * reads the blocks back in, and checks the crc/md5 of the data.
222 */
1e97cce9 223static void do_verify(struct thread_data *td)
ebac4655 224{
53cdc686 225 struct fio_file *f;
36167d82 226 struct io_u *io_u;
3af6ef39 227 int ret, i, min_events;
e5b401d4
JA
228
229 /*
230 * sync io first and invalidate cache, to make sure we really
231 * read from disk.
232 */
233 for_each_file(td, f, i) {
b2fdda43
JA
234 if (fio_io_sync(td, f))
235 break;
236 if (file_invalidate_cache(td, f))
237 break;
e5b401d4 238 }
ebac4655 239
b2fdda43
JA
240 if (td->error)
241 return;
242
ebac4655
JA
243 td_set_runstate(td, TD_VERIFYING);
244
36167d82
JA
245 io_u = NULL;
246 while (!td->terminate) {
ebac4655
JA
247 io_u = __get_io_u(td);
248 if (!io_u)
249 break;
250
36167d82 251 if (runtime_exceeded(td, &io_u->start_time))
02bcaa8c 252 break;
02bcaa8c 253
36167d82 254 if (get_next_verify(td, io_u))
ebac4655 255 break;
ebac4655 256
36167d82 257 if (td_io_prep(td, io_u))
53cdc686
JA
258 break;
259
36167d82
JA
260requeue:
261 ret = td_io_queue(td, io_u);
53cdc686 262
36167d82
JA
263 switch (ret) {
264 case FIO_Q_COMPLETED:
265 if (io_u->error)
22819ec2 266 ret = -io_u->error;
36167d82
JA
267 if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
268 int bytes = io_u->xfer_buflen - io_u->resid;
ebac4655 269
36167d82
JA
270 io_u->xfer_buflen = io_u->resid;
271 io_u->xfer_buf += bytes;
272 goto requeue;
273 }
99784632
JA
274 ret = io_u_sync_complete(td, io_u, verify_io_u);
275 if (ret)
36167d82 276 break;
36167d82
JA
277 continue;
278 case FIO_Q_QUEUED:
279 break;
755200a3
JA
280 case FIO_Q_BUSY:
281 requeue_io_u(td, &io_u);
282 ret = td_io_commit(td);
283 break;
36167d82
JA
284 default:
285 assert(ret < 0);
22819ec2 286 td_verror(td, -ret);
ebac4655
JA
287 break;
288 }
289
99784632 290 if (ret < 0 || td->error)
3af6ef39
JA
291 break;
292
ebac4655 293 /*
3af6ef39
JA
294 * if we can queue more, do so. but check if there are
295 * completed io_u's first.
ebac4655 296 */
97601024 297 min_events = 0;
e916b390 298 if (queue_full(td) || ret == FIO_Q_BUSY) {
3af6ef39 299 min_events = 1;
3af6ef39 300
e916b390
JA
301 if (td->cur_depth > td->iodepth_low)
302 min_events = td->cur_depth - td->iodepth_low;
303 }
304
3af6ef39
JA
305 /*
306 * Reap required number of io units, if any, and do the
307 * verification on them through the callback handler
308 */
97601024 309 if (io_u_queued_complete(td, min_events, verify_io_u))
ebac4655 310 break;
36167d82 311 }
ebac4655 312
ebac4655
JA
313 if (td->cur_depth)
314 cleanup_pending_aio(td);
315
316 td_set_runstate(td, TD_RUNNING);
317}
318
b990b5c0
JA
319/*
320 * Not really an io thread, all it does is burn CPU cycles in the specified
321 * manner.
322 */
323static void do_cpuio(struct thread_data *td)
324{
325 struct timeval e;
326 int split = 100 / td->cpuload;
327 int i = 0;
328
329 while (!td->terminate) {
02bcaa8c 330 fio_gettime(&e, NULL);
b990b5c0
JA
331
332 if (runtime_exceeded(td, &e))
333 break;
334
335 if (!(i % split))
336 __usec_sleep(10000);
337 else
338 usec_sleep(td, 10000);
339
340 i++;
341 }
342}
343
32cd46a0 344/*
906c8d75 345 * Main IO worker function. It retrieves io_u's to process and queues
32cd46a0
JA
346 * and reaps them, checking for rate and errors along the way.
347 */
ebac4655
JA
348static void do_io(struct thread_data *td)
349{
02bcaa8c 350 struct timeval s;
ebac4655 351 unsigned long usec;
84585003 352 int i, ret = 0;
ebac4655 353
5853e5a8
JA
354 td_set_runstate(td, TD_RUNNING);
355
3951414d 356 while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) {
97601024
JA
357 struct timeval comp_time;
358 long bytes_done = 0;
84585003 359 int min_evts = 0;
ebac4655
JA
360 struct io_u *io_u;
361
362 if (td->terminate)
363 break;
364
3d7c391d 365 io_u = get_io_u(td);
ebac4655
JA
366 if (!io_u)
367 break;
368
369 memcpy(&s, &io_u->start_time, sizeof(s));
97601024
JA
370
371 if (runtime_exceeded(td, &s)) {
372 put_io_u(td, io_u);
373 break;
374 }
cec6b55d 375requeue:
45bee283 376 ret = td_io_queue(td, io_u);
36167d82
JA
377
378 switch (ret) {
379 case FIO_Q_COMPLETED:
380 if (io_u->error) {
381 ret = io_u->error;
382 break;
383 }
384 if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
385 int bytes = io_u->xfer_buflen - io_u->resid;
386
cec6b55d 387 io_u->xfer_buflen = io_u->resid;
36167d82 388 io_u->xfer_buf += bytes;
cec6b55d 389 goto requeue;
cec6b55d 390 }
97601024
JA
391 fio_gettime(&comp_time, NULL);
392 bytes_done = io_u_sync_complete(td, io_u, NULL);
99784632
JA
393 if (bytes_done < 0)
394 ret = bytes_done;
36167d82
JA
395 break;
396 case FIO_Q_QUEUED:
7e77dd02
JA
397 /*
398 * if the engine doesn't have a commit hook,
399 * the io_u is really queued. if it does have such
400 * a hook, it has to call io_u_queued() itself.
401 */
402 if (td->io_ops->commit == NULL)
403 io_u_queued(td, io_u);
36167d82 404 break;
755200a3
JA
405 case FIO_Q_BUSY:
406 requeue_io_u(td, &io_u);
407 ret = td_io_commit(td);
408 break;
36167d82
JA
409 default:
410 assert(ret < 0);
411 put_io_u(td, io_u);
412 break;
ebac4655
JA
413 }
414
99784632 415 if (ret < 0 || td->error)
36167d82
JA
416 break;
417
97601024
JA
418 /*
419 * See if we need to complete some commands
420 */
755200a3 421 if (ret == FIO_Q_QUEUED || ret == FIO_Q_BUSY) {
97601024 422 min_evts = 0;
e916b390 423 if (queue_full(td) || ret == FIO_Q_BUSY) {
36167d82 424 min_evts = 1;
ebac4655 425
e916b390
JA
426 if (td->cur_depth > td->iodepth_low)
427 min_evts = td->cur_depth - td->iodepth_low;
428 }
429
97601024
JA
430 fio_gettime(&comp_time, NULL);
431 bytes_done = io_u_queued_complete(td, min_evts, NULL);
432 if (bytes_done < 0)
36167d82 433 break;
ebac4655
JA
434 }
435
97601024
JA
436 if (!bytes_done)
437 continue;
438
ebac4655
JA
439 /*
440 * the rate is batched for now, it should work for batches
441 * of completions except the very first one which may look
442 * a little bursty
443 */
97601024 444 usec = utime_since(&s, &comp_time);
ebac4655 445
97601024 446 rate_throttle(td, usec, bytes_done, td->ddir);
ebac4655 447
97601024 448 if (check_min_rate(td, &comp_time)) {
98aa62d8 449 if (exitall_on_terminate)
6ce15a32 450 terminate_threads(td->groupid, 0);
fd841467 451 td_verror(td, ENODATA);
ebac4655
JA
452 break;
453 }
454
9c1f7434
JA
455 if (td->thinktime) {
456 unsigned long long b;
457
458 b = td->io_blocks[0] + td->io_blocks[1];
48097d5c
JA
459 if (!(b % td->thinktime_blocks)) {
460 int left;
461
462 if (td->thinktime_spin)
463 __usec_sleep(td->thinktime_spin);
464
465 left = td->thinktime - td->thinktime_spin;
466 if (left)
467 usec_sleep(td, left);
468 }
9c1f7434 469 }
ebac4655
JA
470 }
471
4d2413c6 472 if (!td->error) {
3d7c391d
JA
473 struct fio_file *f;
474
84585003
JA
475 if (td->cur_depth)
476 cleanup_pending_aio(td);
ebac4655 477
84585003
JA
478 if (should_fsync(td) && td->end_fsync) {
479 td_set_runstate(td, TD_FSYNCING);
480 for_each_file(td, f, i)
858a3d47 481 fio_io_sync(td, f);
84585003 482 }
5853e5a8 483 }
ebac4655
JA
484}
485
ebac4655
JA
486static void cleanup_io_u(struct thread_data *td)
487{
488 struct list_head *entry, *n;
489 struct io_u *io_u;
490
491 list_for_each_safe(entry, n, &td->io_u_freelist) {
492 io_u = list_entry(entry, struct io_u, list);
493
494 list_del(&io_u->list);
495 free(io_u);
496 }
497
2f9ade3c 498 free_io_mem(td);
ebac4655
JA
499}
500
6b9cea23
JA
501/*
502 * "randomly" fill the buffer contents
503 */
66eeb296 504static void fill_rand_buf(struct io_u *io_u, int max_bs)
6b9cea23 505{
66eeb296 506 int *ptr = io_u->buf;
6b9cea23
JA
507
508 while ((void *) ptr - io_u->buf < max_bs) {
509 *ptr = rand() * 0x9e370001;
510 ptr++;
511 }
512}
513
ebac4655
JA
514static int init_io_u(struct thread_data *td)
515{
516 struct io_u *io_u;
a00735e6 517 unsigned int max_bs;
ebac4655
JA
518 int i, max_units;
519 char *p;
520
2866c82d 521 if (td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
522 return 0;
523
2866c82d 524 if (td->io_ops->flags & FIO_SYNCIO)
ebac4655
JA
525 max_units = 1;
526 else
527 max_units = td->iodepth;
528
a00735e6 529 max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]);
74b025b0
JA
530 td->orig_buffer_size = max_bs * max_units;
531
d0bdaf49 532 if (td->mem_type == MEM_SHMHUGE || td->mem_type == MEM_MMAPHUGE)
56bb17f2 533 td->orig_buffer_size = (td->orig_buffer_size + td->hugepage_size - 1) & ~(td->hugepage_size - 1);
74b025b0 534 else
29d610e1 535 td->orig_buffer_size += page_mask;
ebac4655 536
2f9ade3c
JA
537 if (allocate_io_mem(td))
538 return 1;
ebac4655 539
ebac4655
JA
540 p = ALIGN(td->orig_buffer);
541 for (i = 0; i < max_units; i++) {
542 io_u = malloc(sizeof(*io_u));
543 memset(io_u, 0, sizeof(*io_u));
544 INIT_LIST_HEAD(&io_u->list);
545
a00735e6 546 io_u->buf = p + max_bs * i;
6b9cea23 547 if (td_write(td) || td_rw(td))
a00735e6 548 fill_rand_buf(io_u, max_bs);
6b9cea23 549
b1ff3403 550 io_u->index = i;
ebac4655
JA
551 list_add(&io_u->list, &td->io_u_freelist);
552 }
553
554 return 0;
555}
556
da86774e
JA
557static int switch_ioscheduler(struct thread_data *td)
558{
559 char tmp[256], tmp2[128];
560 FILE *f;
561 int ret;
562
f48b467c
JA
563 if (td->io_ops->flags & FIO_CPUIO)
564 return 0;
565
da86774e
JA
566 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
567
568 f = fopen(tmp, "r+");
569 if (!f) {
570 td_verror(td, errno);
571 return 1;
572 }
573
574 /*
575 * Set io scheduler.
576 */
577 ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
578 if (ferror(f) || ret != 1) {
579 td_verror(td, errno);
580 fclose(f);
581 return 1;
582 }
583
584 rewind(f);
585
586 /*
587 * Read back and check that the selected scheduler is now the default.
588 */
589 ret = fread(tmp, 1, sizeof(tmp), f);
590 if (ferror(f) || ret < 0) {
591 td_verror(td, errno);
592 fclose(f);
593 return 1;
594 }
595
596 sprintf(tmp2, "[%s]", td->ioscheduler);
597 if (!strstr(tmp, tmp2)) {
3b70d7e5 598 log_err("fio: io scheduler %s not found\n", td->ioscheduler);
da86774e
JA
599 td_verror(td, EINVAL);
600 fclose(f);
601 return 1;
602 }
603
604 fclose(f);
605 return 0;
606}
607
ebac4655
JA
608static void clear_io_state(struct thread_data *td)
609{
53cdc686
JA
610 struct fio_file *f;
611 int i;
ebac4655 612
079ad09b 613 td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
ebac4655 614 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
20dc95c4 615 td->zone_bytes = 0;
ebac4655 616
c1324df1
JA
617 td->last_was_sync = 0;
618
53cdc686 619 for_each_file(td, f, i) {
c1324df1
JA
620 f->last_completed_pos = 0;
621
53cdc686
JA
622 f->last_pos = 0;
623 if (td->io_ops->flags & FIO_SYNCIO)
624 lseek(f->fd, SEEK_SET, 0);
625
626 if (f->file_map)
627 memset(f->file_map, 0, f->num_maps * sizeof(long));
628 }
ebac4655
JA
629}
630
906c8d75
JA
631/*
632 * Entry point for the thread based jobs. The process based jobs end up
633 * here as well, after a little setup.
634 */
ebac4655
JA
635static void *thread_main(void *data)
636{
69008999 637 unsigned long long runtime[2];
ebac4655 638 struct thread_data *td = data;
ebac4655
JA
639
640 if (!td->use_thread)
641 setsid();
642
643 td->pid = getpid();
644
aea47d44
JA
645 INIT_LIST_HEAD(&td->io_u_freelist);
646 INIT_LIST_HEAD(&td->io_u_busylist);
755200a3 647 INIT_LIST_HEAD(&td->io_u_requeues);
aea47d44
JA
648 INIT_LIST_HEAD(&td->io_hist_list);
649 INIT_LIST_HEAD(&td->io_log_list);
650
ebac4655
JA
651 if (init_io_u(td))
652 goto err;
653
654 if (fio_setaffinity(td) == -1) {
655 td_verror(td, errno);
656 goto err;
657 }
658
aea47d44
JA
659 if (init_iolog(td))
660 goto err;
661
ebac4655
JA
662 if (td->ioprio) {
663 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
664 td_verror(td, errno);
665 goto err;
666 }
667 }
668
1056eaad 669 if (nice(td->nice) == -1) {
b6f4d880
JA
670 td_verror(td, errno);
671 goto err;
672 }
673
75154845
JA
674 if (init_random_state(td))
675 goto err;
676
56f9498d
JA
677 if (td->ioscheduler && switch_ioscheduler(td))
678 goto err;
da86774e 679
75154845 680 td_set_runstate(td, TD_INITIALIZED);
bbfd6b00
JA
681 fio_sem_up(&startup_sem);
682 fio_sem_down(&td->mutex);
ebac4655 683
53cdc686 684 if (!td->create_serialize && setup_files(td))
ebac4655 685 goto err;
21972cde
JA
686 if (open_files(td))
687 goto err;
ebac4655 688
7d6c5283
JA
689 /*
690 * Do this late, as some IO engines would like to have the
691 * files setup prior to initializing structures.
692 */
693 if (td_io_init(td))
694 goto err;
695
69cfd7e0
JA
696 if (td->exec_prerun) {
697 if (system(td->exec_prerun) < 0)
698 goto err;
699 }
4e0ba8af 700
69008999 701 fio_gettime(&td->epoch, NULL);
079ad09b 702 getrusage(RUSAGE_SELF, &td->ts.ru_start);
69008999
JA
703
704 runtime[0] = runtime[1] = 0;
ebac4655 705 while (td->loops--) {
02bcaa8c 706 fio_gettime(&td->start, NULL);
079ad09b 707 memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
ebac4655
JA
708
709 if (td->ratemin)
079ad09b 710 memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate));
ebac4655
JA
711
712 clear_io_state(td);
713 prune_io_piece_log(td);
714
2866c82d 715 if (td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
716 do_cpuio(td);
717 else
718 do_io(td);
ebac4655 719
69008999 720 runtime[td->ddir] += utime_since_now(&td->start);
aea47d44 721 if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
69008999 722 runtime[td->ddir ^ 1] = runtime[td->ddir];
3d60d1ed 723
ebac4655
JA
724 if (td->error || td->terminate)
725 break;
726
727 if (td->verify == VERIFY_NONE)
728 continue;
729
730 clear_io_state(td);
02bcaa8c 731 fio_gettime(&td->start, NULL);
ebac4655
JA
732
733 do_verify(td);
734
69008999 735 runtime[DDIR_READ] += utime_since_now(&td->start);
ebac4655
JA
736
737 if (td->error || td->terminate)
738 break;
739 }
740
36dff966 741 update_rusage_stat(td);
69008999
JA
742 fio_gettime(&td->end_time, NULL);
743 td->runtime[0] = runtime[0] / 1000;
744 td->runtime[1] = runtime[1] / 1000;
745
079ad09b
JA
746 if (td->ts.bw_log)
747 finish_log(td, td->ts.bw_log, "bw");
748 if (td->ts.slat_log)
749 finish_log(td, td->ts.slat_log, "slat");
750 if (td->ts.clat_log)
751 finish_log(td, td->ts.clat_log, "clat");
076efc7c 752 if (td->write_iolog_file)
843a7413 753 write_iolog_close(td);
69cfd7e0
JA
754 if (td->exec_postrun) {
755 if (system(td->exec_postrun) < 0)
756 log_err("fio: postrun %s failed\n", td->exec_postrun);
757 }
ebac4655
JA
758
759 if (exitall_on_terminate)
6ce15a32 760 terminate_threads(td->groupid, 0);
ebac4655
JA
761
762err:
5bf13a5a
JA
763 if (td->error)
764 printf("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
53cdc686 765 close_files(td);
2866c82d 766 close_ioengine(td);
ebac4655 767 cleanup_io_u(td);
ebac4655 768 td_set_runstate(td, TD_EXITED);
43d76807 769 return (void *) (unsigned long) td->error;
ebac4655
JA
770}
771
906c8d75
JA
772/*
773 * We cannot pass the td data into a forked process, so attach the td and
774 * pass it to the thread worker.
775 */
a6418147 776static int fork_main(int shmid, int offset)
ebac4655
JA
777{
778 struct thread_data *td;
a6418147 779 void *data, *ret;
ebac4655
JA
780
781 data = shmat(shmid, NULL, 0);
782 if (data == (void *) -1) {
a6418147
JA
783 int __err = errno;
784
ebac4655 785 perror("shmat");
a6418147 786 return __err;
ebac4655
JA
787 }
788
789 td = data + offset * sizeof(struct thread_data);
a6418147 790 ret = thread_main(td);
ebac4655 791 shmdt(data);
43d76807 792 return (int) (unsigned long) ret;
ebac4655
JA
793}
794
906c8d75
JA
795/*
796 * Run over the job map and reap the threads that have exited, if any.
797 */
ebac4655
JA
798static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
799{
34572e28 800 struct thread_data *td;
fab6aa71 801 int i, cputhreads, pending, status, ret;
ebac4655
JA
802
803 /*
804 * reap exited threads (TD_EXITED -> TD_REAPED)
805 */
4d2413c6 806 pending = cputhreads = 0;
34572e28 807 for_each_td(td, i) {
84585003
JA
808 /*
809 * ->io_ops is NULL for a thread that has closed its
810 * io engine
811 */
812 if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
b990b5c0
JA
813 cputhreads++;
814
fab6aa71
JA
815 if (td->runstate < TD_EXITED) {
816 /*
817 * check if someone quit or got killed in an unusual way
818 */
819 ret = waitpid(td->pid, &status, WNOHANG);
820 if (ret < 0)
821 perror("waitpid");
822 else if ((ret == td->pid) && WIFSIGNALED(status)) {
823 int sig = WTERMSIG(status);
824
825 log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
826 td_set_runstate(td, TD_REAPED);
827 goto reaped;
828 }
829 }
830
4d2413c6
JA
831 if (td->runstate != TD_EXITED) {
832 if (td->runstate < TD_RUNNING)
833 pending++;
834
ebac4655 835 continue;
4d2413c6 836 }
ebac4655 837
437c9b71
JA
838 if (td->error)
839 exit_value++;
840
ebac4655
JA
841 td_set_runstate(td, TD_REAPED);
842
843 if (td->use_thread) {
844 long ret;
845
846 if (pthread_join(td->thread, (void *) &ret))
847 perror("thread_join");
a6418147
JA
848 } else {
849 int status;
850
fab6aa71
JA
851 ret = waitpid(td->pid, &status, 0);
852 if (ret < 0)
853 perror("waitpid");
73170f19 854 else if (WIFEXITED(status) && WEXITSTATUS(status)) {
a6418147
JA
855 if (!exit_value)
856 exit_value++;
857 }
858 }
ebac4655 859
fab6aa71 860reaped:
ebac4655
JA
861 (*nr_running)--;
862 (*m_rate) -= td->ratemin;
863 (*t_rate) -= td->rate;
864 }
b990b5c0 865
4d2413c6 866 if (*nr_running == cputhreads && !pending)
6ce15a32 867 terminate_threads(TERMINATE_ALL, 0);
ebac4655
JA
868}
869
906c8d75
JA
870/*
871 * Main function for kicking off and reaping jobs, as needed.
872 */
ebac4655
JA
873static void run_threads(void)
874{
ebac4655
JA
875 struct thread_data *td;
876 unsigned long spent;
877 int i, todo, nr_running, m_rate, t_rate, nr_started;
fcb6ade2 878
2f9ade3c
JA
879 if (fio_pin_memory())
880 return;
ebac4655 881
c6ae0a5b
JA
882 if (!terse_output) {
883 printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
884 fflush(stdout);
885 }
c04f7ec3 886
4efa970e
JA
887 signal(SIGINT, sig_handler);
888 signal(SIGALRM, sig_handler);
889
ebac4655
JA
890 todo = thread_number;
891 nr_running = 0;
892 nr_started = 0;
893 m_rate = t_rate = 0;
894
34572e28 895 for_each_td(td, i) {
263e529f 896 print_status_init(td->thread_number - 1);
ebac4655 897
380cf265
JA
898 if (!td->create_serialize) {
899 init_disk_util(td);
ebac4655 900 continue;
380cf265 901 }
ebac4655
JA
902
903 /*
904 * do file setup here so it happens sequentially,
905 * we don't want X number of threads getting their
906 * client data interspersed on disk
907 */
53cdc686 908 if (setup_files(td)) {
5bf13a5a
JA
909 exit_value++;
910 if (td->error)
911 log_err("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
ebac4655
JA
912 td_set_runstate(td, TD_REAPED);
913 todo--;
914 }
380cf265
JA
915
916 init_disk_util(td);
ebac4655
JA
917 }
918
ebac4655 919 while (todo) {
75154845
JA
920 struct thread_data *map[MAX_JOBS];
921 struct timeval this_start;
922 int this_jobs = 0, left;
923
ebac4655
JA
924 /*
925 * create threads (TD_NOT_CREATED -> TD_CREATED)
926 */
34572e28 927 for_each_td(td, i) {
ebac4655
JA
928 if (td->runstate != TD_NOT_CREATED)
929 continue;
930
931 /*
932 * never got a chance to start, killed by other
933 * thread for some reason
934 */
935 if (td->terminate) {
936 todo--;
937 continue;
938 }
939
940 if (td->start_delay) {
263e529f 941 spent = mtime_since_genesis();
ebac4655
JA
942
943 if (td->start_delay * 1000 > spent)
944 continue;
945 }
946
947 if (td->stonewall && (nr_started || nr_running))
948 break;
949
75154845
JA
950 /*
951 * Set state to created. Thread will transition
952 * to TD_INITIALIZED when it's done setting up.
953 */
ebac4655 954 td_set_runstate(td, TD_CREATED);
75154845 955 map[this_jobs++] = td;
bbfd6b00 956 fio_sem_init(&startup_sem, 1);
ebac4655
JA
957 nr_started++;
958
959 if (td->use_thread) {
960 if (pthread_create(&td->thread, NULL, thread_main, td)) {
961 perror("thread_create");
962 nr_started--;
963 }
964 } else {
965 if (fork())
bbfd6b00 966 fio_sem_down(&startup_sem);
ebac4655 967 else {
a6418147
JA
968 int ret = fork_main(shm_id, i);
969
970 exit(ret);
ebac4655
JA
971 }
972 }
973 }
974
975 /*
75154845
JA
976 * Wait for the started threads to transition to
977 * TD_INITIALIZED.
ebac4655 978 */
02bcaa8c 979 fio_gettime(&this_start, NULL);
75154845 980 left = this_jobs;
6ce15a32 981 while (left && !fio_abort) {
75154845
JA
982 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
983 break;
984
985 usleep(100000);
986
987 for (i = 0; i < this_jobs; i++) {
988 td = map[i];
989 if (!td)
990 continue;
b6f4d880 991 if (td->runstate == TD_INITIALIZED) {
75154845
JA
992 map[i] = NULL;
993 left--;
b6f4d880
JA
994 } else if (td->runstate >= TD_EXITED) {
995 map[i] = NULL;
996 left--;
997 todo--;
998 nr_running++; /* work-around... */
75154845
JA
999 }
1000 }
1001 }
1002
1003 if (left) {
3b70d7e5 1004 log_err("fio: %d jobs failed to start\n", left);
75154845
JA
1005 for (i = 0; i < this_jobs; i++) {
1006 td = map[i];
1007 if (!td)
1008 continue;
1009 kill(td->pid, SIGTERM);
1010 }
1011 break;
1012 }
1013
1014 /*
b6f4d880 1015 * start created threads (TD_INITIALIZED -> TD_RUNNING).
75154845 1016 */
34572e28 1017 for_each_td(td, i) {
75154845 1018 if (td->runstate != TD_INITIALIZED)
ebac4655
JA
1019 continue;
1020
1021 td_set_runstate(td, TD_RUNNING);
1022 nr_running++;
1023 nr_started--;
1024 m_rate += td->ratemin;
1025 t_rate += td->rate;
75154845 1026 todo--;
bbfd6b00 1027 fio_sem_up(&td->mutex);
ebac4655
JA
1028 }
1029
1030 reap_threads(&nr_running, &t_rate, &m_rate);
1031
1032 if (todo)
1033 usleep(100000);
1034 }
1035
1036 while (nr_running) {
1037 reap_threads(&nr_running, &t_rate, &m_rate);
1038 usleep(10000);
1039 }
1040
1041 update_io_ticks();
2f9ade3c 1042 fio_unpin_memory();
ebac4655
JA
1043}
1044
ebac4655
JA
1045int main(int argc, char *argv[])
1046{
29d610e1
JA
1047 long ps;
1048
dbe1125e
JA
1049 /*
1050 * We need locale for number printing, if it isn't set then just
1051 * go with the US format.
1052 */
1053 if (!getenv("LC_NUMERIC"))
1054 setlocale(LC_NUMERIC, "en_US");
1055
ebac4655
JA
1056 if (parse_options(argc, argv))
1057 return 1;
1058
1059 if (!thread_number) {
3b70d7e5 1060 log_err("Nothing to do\n");
ebac4655
JA
1061 return 1;
1062 }
1063
29d610e1
JA
1064 ps = sysconf(_SC_PAGESIZE);
1065 if (ps < 0) {
1066 log_err("Failed to get page size\n");
1067 return 1;
1068 }
1069
1070 page_mask = ps - 1;
1071
bb3884d8
JA
1072 if (write_bw_log) {
1073 setup_log(&agg_io_log[DDIR_READ]);
1074 setup_log(&agg_io_log[DDIR_WRITE]);
1075 }
1076
ebac4655
JA
1077 disk_util_timer_arm();
1078
1079 run_threads();
6ce15a32 1080
bb3884d8 1081 if (!fio_abort) {
6ce15a32 1082 show_run_stats();
bb3884d8
JA
1083 if (write_bw_log) {
1084 __finish_log(agg_io_log[DDIR_READ],"agg-read_bw.log");
1085 __finish_log(agg_io_log[DDIR_WRITE],"agg-write_bw.log");
1086 }
1087 }
ebac4655 1088
437c9b71 1089 return exit_value;
ebac4655 1090}