Fix problem with f->last_completed_pos not being correct on requeues
[fio.git] / fio.c
CommitLineData
ebac4655
JA
1/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
aae22ca7 5 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
ebac4655 6 *
8e9fe637
JA
7 * The license below covers all files distributed with fio unless otherwise
8 * noted in the file itself.
9 *
ebac4655 10 * This program is free software; you can redistribute it and/or modify
8e9fe637
JA
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
ebac4655
JA
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
ebac4655
JA
24#include <unistd.h>
25#include <fcntl.h>
26#include <string.h>
ebac4655
JA
27#include <signal.h>
28#include <time.h>
dbe1125e 29#include <locale.h>
36167d82 30#include <assert.h>
ebac4655
JA
31#include <sys/stat.h>
32#include <sys/wait.h>
33#include <sys/ipc.h>
34#include <sys/shm.h>
ebac4655
JA
35#include <sys/mman.h>
36
37#include "fio.h"
ebac4655 38
cfc99db7
JA
39unsigned long page_mask;
40unsigned long page_size;
29d610e1
JA
41#define ALIGN(buf) \
42 (char *) (((unsigned long) (buf) + page_mask) & ~page_mask)
ebac4655
JA
43
44int groupid = 0;
45int thread_number = 0;
9cedf167
JA
46int nr_process = 0;
47int nr_thread = 0;
ebac4655 48int shm_id = 0;
53cdc686 49int temp_stall_ts;
ebac4655 50
07739b57 51static struct fio_sem *startup_sem;
6ce15a32 52static volatile int fio_abort;
437c9b71 53static int exit_value;
ebac4655 54
bb3884d8
JA
55struct io_log *agg_io_log[2];
56
ebac4655 57#define TERMINATE_ALL (-1)
75154845 58#define JOB_START_TIMEOUT (5 * 1000)
ebac4655 59
6ce15a32
JA
60static inline void td_set_runstate(struct thread_data *td, int runstate)
61{
62 td->runstate = runstate;
63}
64
390c40e2 65static void terminate_threads(int group_id)
ebac4655 66{
34572e28 67 struct thread_data *td;
ebac4655
JA
68 int i;
69
34572e28 70 for_each_td(td, i) {
ebac4655 71 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
c1302d44
JA
72 /*
73 * if the thread is running, just let it exit
74 */
75 if (td->runstate < TD_RUNNING)
76 kill(td->pid, SIGQUIT);
ebac4655 77 td->terminate = 1;
2dc1bbeb 78 td->o.start_delay = 0;
ebac4655
JA
79 }
80 }
81}
82
83static void sig_handler(int sig)
84{
85 switch (sig) {
86 case SIGALRM:
87 update_io_ticks();
88 disk_util_timer_arm();
89 print_thread_status();
90 break;
91 default:
6ce15a32 92 printf("\nfio: terminating on signal %d\n", sig);
ebac4655 93 fflush(stdout);
390c40e2 94 terminate_threads(TERMINATE_ALL);
ebac4655
JA
95 break;
96 }
97}
98
906c8d75
JA
99/*
100 * Check if we are above the minimum rate given.
101 */
ebac4655
JA
102static int check_min_rate(struct thread_data *td, struct timeval *now)
103{
0904200b 104 unsigned long long bytes = 0;
4e991c23 105 unsigned long iops = 0;
ebac4655
JA
106 unsigned long spent;
107 unsigned long rate;
ebac4655 108
780bf1a1
JA
109 /*
110 * No minimum rate set, always ok
111 */
2dc1bbeb 112 if (!td->o.ratemin && !td->o.rate_iops_min)
780bf1a1
JA
113 return 0;
114
ebac4655
JA
115 /*
116 * allow a 2 second settle period in the beginning
117 */
118 if (mtime_since(&td->start, now) < 2000)
119 return 0;
120
4e991c23
JA
121 if (td_read(td)) {
122 iops += td->io_blocks[DDIR_READ];
0904200b 123 bytes += td->this_io_bytes[DDIR_READ];
4e991c23
JA
124 }
125 if (td_write(td)) {
126 iops += td->io_blocks[DDIR_WRITE];
0904200b 127 bytes += td->this_io_bytes[DDIR_WRITE];
4e991c23 128 }
0904200b 129
ebac4655
JA
130 /*
131 * if rate blocks is set, sample is running
132 */
4e991c23 133 if (td->rate_bytes || td->rate_blocks) {
ebac4655 134 spent = mtime_since(&td->lastrate, now);
2dc1bbeb 135 if (spent < td->o.ratecycle)
ebac4655
JA
136 return 0;
137
2dc1bbeb 138 if (td->o.rate) {
4e991c23
JA
139 /*
140 * check bandwidth specified rate
141 */
142 if (bytes < td->rate_bytes) {
2dc1bbeb 143 log_err("%s: min rate %u not met\n", td->o.name, td->o.ratemin);
4e991c23
JA
144 return 1;
145 } else {
146 rate = (bytes - td->rate_bytes) / spent;
2dc1bbeb
JA
147 if (rate < td->o.ratemin || bytes < td->rate_bytes) {
148 log_err("%s: min rate %u not met, got %luKiB/sec\n", td->o.name, td->o.ratemin, rate);
4e991c23
JA
149 return 1;
150 }
151 }
413dd459 152 } else {
4e991c23
JA
153 /*
154 * checks iops specified rate
155 */
2dc1bbeb
JA
156 if (iops < td->o.rate_iops) {
157 log_err("%s: min iops rate %u not met\n", td->o.name, td->o.rate_iops);
413dd459 158 return 1;
4e991c23
JA
159 } else {
160 rate = (iops - td->rate_blocks) / spent;
2dc1bbeb
JA
161 if (rate < td->o.rate_iops_min || iops < td->rate_blocks) {
162 log_err("%s: min iops rate %u not met, got %lu\n", td->o.name, td->o.rate_iops_min, rate);
4e991c23 163 }
413dd459 164 }
ebac4655
JA
165 }
166 }
167
0904200b 168 td->rate_bytes = bytes;
4e991c23 169 td->rate_blocks = iops;
ebac4655
JA
170 memcpy(&td->lastrate, now, sizeof(*now));
171 return 0;
172}
173
174static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
175{
2dc1bbeb 176 if (!td->o.timeout)
ebac4655 177 return 0;
2dc1bbeb 178 if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000)
ebac4655
JA
179 return 1;
180
181 return 0;
182}
183
906c8d75
JA
184/*
185 * When job exits, we can cancel the in-flight IO if we are using async
186 * io. Attempt to do so.
187 */
ebac4655
JA
188static void cleanup_pending_aio(struct thread_data *td)
189{
ebac4655 190 struct list_head *entry, *n;
ebac4655
JA
191 struct io_u *io_u;
192 int r;
193
194 /*
195 * get immediately available events, if any
196 */
d7762cf8 197 r = io_u_queued_complete(td, 0);
b2fdda43
JA
198 if (r < 0)
199 return;
ebac4655
JA
200
201 /*
202 * now cancel remaining active events
203 */
2866c82d 204 if (td->io_ops->cancel) {
ebac4655
JA
205 list_for_each_safe(entry, n, &td->io_u_busylist) {
206 io_u = list_entry(entry, struct io_u, list);
207
0c6e7517
JA
208 /*
209 * if the io_u isn't in flight, then that generally
210 * means someone leaked an io_u. complain but fix
211 * it up, so we don't stall here.
212 */
213 if ((io_u->flags & IO_U_F_FLIGHT) == 0) {
214 log_err("fio: non-busy IO on busy list\n");
ebac4655 215 put_io_u(td, io_u);
0c6e7517
JA
216 } else {
217 r = td->io_ops->cancel(td, io_u);
218 if (!r)
219 put_io_u(td, io_u);
220 }
ebac4655
JA
221 }
222 }
223
97601024 224 if (td->cur_depth)
d7762cf8 225 r = io_u_queued_complete(td, td->cur_depth);
ebac4655
JA
226}
227
858a3d47
JA
228/*
229 * Helper to handle the final sync of a file. Works just like the normal
230 * io path, just does everything sync.
231 */
232static int fio_io_sync(struct thread_data *td, struct fio_file *f)
233{
234 struct io_u *io_u = __get_io_u(td);
858a3d47
JA
235 int ret;
236
237 if (!io_u)
238 return 1;
239
240 io_u->ddir = DDIR_SYNC;
241 io_u->file = f;
242
243 if (td_io_prep(td, io_u)) {
244 put_io_u(td, io_u);
245 return 1;
246 }
247
755200a3 248requeue:
858a3d47 249 ret = td_io_queue(td, io_u);
36167d82 250 if (ret < 0) {
e1161c32 251 td_verror(td, io_u->error, "td_io_queue");
858a3d47 252 put_io_u(td, io_u);
858a3d47 253 return 1;
36167d82 254 } else if (ret == FIO_Q_QUEUED) {
d7762cf8 255 if (io_u_queued_complete(td, 1) < 0)
36167d82 256 return 1;
36167d82
JA
257 } else if (ret == FIO_Q_COMPLETED) {
258 if (io_u->error) {
e1161c32 259 td_verror(td, io_u->error, "td_io_queue");
36167d82
JA
260 return 1;
261 }
858a3d47 262
d7762cf8 263 if (io_u_sync_complete(td, io_u) < 0)
b2fdda43 264 return 1;
755200a3
JA
265 } else if (ret == FIO_Q_BUSY) {
266 if (td_io_commit(td))
267 return 1;
268 goto requeue;
858a3d47
JA
269 }
270
271 return 0;
272}
273
906c8d75 274/*
34403fb1 275 * The main verify engine. Runs over the writes we previously submitted,
906c8d75
JA
276 * reads the blocks back in, and checks the crc/md5 of the data.
277 */
1e97cce9 278static void do_verify(struct thread_data *td)
ebac4655 279{
53cdc686 280 struct fio_file *f;
36167d82 281 struct io_u *io_u;
af52b345
JA
282 int ret, min_events;
283 unsigned int i;
e5b401d4
JA
284
285 /*
286 * sync io first and invalidate cache, to make sure we really
287 * read from disk.
288 */
289 for_each_file(td, f, i) {
3d7b485f
JA
290 if (!(f->flags & FIO_FILE_OPEN))
291 continue;
b2fdda43
JA
292 if (fio_io_sync(td, f))
293 break;
294 if (file_invalidate_cache(td, f))
295 break;
e5b401d4 296 }
ebac4655 297
b2fdda43
JA
298 if (td->error)
299 return;
300
ebac4655
JA
301 td_set_runstate(td, TD_VERIFYING);
302
36167d82
JA
303 io_u = NULL;
304 while (!td->terminate) {
5451792e
JA
305 int ret2;
306
ebac4655
JA
307 io_u = __get_io_u(td);
308 if (!io_u)
309 break;
310
069c2918
JA
311 if (runtime_exceeded(td, &io_u->start_time)) {
312 put_io_u(td, io_u);
02bcaa8c 313 break;
069c2918 314 }
02bcaa8c 315
069c2918
JA
316 if (get_next_verify(td, io_u)) {
317 put_io_u(td, io_u);
ebac4655 318 break;
069c2918 319 }
ebac4655 320
069c2918
JA
321 if (td_io_prep(td, io_u)) {
322 put_io_u(td, io_u);
53cdc686 323 break;
069c2918 324 }
d7762cf8
JA
325
326 io_u->end_io = verify_io_u;
53cdc686 327
11786802 328 ret = td_io_queue(td, io_u);
36167d82
JA
329 switch (ret) {
330 case FIO_Q_COMPLETED:
331 if (io_u->error)
22819ec2 332 ret = -io_u->error;
9e9d164e 333 else if (io_u->resid) {
36167d82 334 int bytes = io_u->xfer_buflen - io_u->resid;
d460eb31 335 struct fio_file *f = io_u->file;
ebac4655 336
9e9d164e
JA
337 /*
338 * zero read, fail
339 */
340 if (!bytes) {
341 td_verror(td, ENODATA, "full resid");
342 put_io_u(td, io_u);
343 break;
344 }
8400d9b2 345
36167d82
JA
346 io_u->xfer_buflen = io_u->resid;
347 io_u->xfer_buf += bytes;
8400d9b2 348 io_u->offset += bytes;
d460eb31 349 f->last_completed_pos = io_u->offset;
8400d9b2 350
d460eb31 351 if (io_u->offset == f->real_file_size)
8400d9b2
JA
352 goto sync_done;
353
11786802
JA
354 requeue_io_u(td, &io_u);
355 } else {
8400d9b2 356sync_done:
11786802
JA
357 ret = io_u_sync_complete(td, io_u);
358 if (ret < 0)
359 break;
36167d82 360 }
36167d82
JA
361 continue;
362 case FIO_Q_QUEUED:
363 break;
755200a3
JA
364 case FIO_Q_BUSY:
365 requeue_io_u(td, &io_u);
5451792e
JA
366 ret2 = td_io_commit(td);
367 if (ret2 < 0)
368 ret = ret2;
755200a3 369 break;
36167d82
JA
370 default:
371 assert(ret < 0);
e1161c32 372 td_verror(td, -ret, "td_io_queue");
ebac4655
JA
373 break;
374 }
375
99784632 376 if (ret < 0 || td->error)
3af6ef39
JA
377 break;
378
ebac4655 379 /*
3af6ef39
JA
380 * if we can queue more, do so. but check if there are
381 * completed io_u's first.
ebac4655 382 */
97601024 383 min_events = 0;
e916b390 384 if (queue_full(td) || ret == FIO_Q_BUSY) {
3af6ef39 385 min_events = 1;
3af6ef39 386
2dc1bbeb
JA
387 if (td->cur_depth > td->o.iodepth_low)
388 min_events = td->cur_depth - td->o.iodepth_low;
e916b390
JA
389 }
390
3af6ef39
JA
391 /*
392 * Reap required number of io units, if any, and do the
393 * verification on them through the callback handler
394 */
d7762cf8 395 if (io_u_queued_complete(td, min_events) < 0)
ebac4655 396 break;
36167d82 397 }
ebac4655 398
c01c0395
JA
399 if (!td->error) {
400 min_events = td->cur_depth;
401
402 if (min_events)
403 ret = io_u_queued_complete(td, min_events);
404 } else
ebac4655
JA
405 cleanup_pending_aio(td);
406
407 td_set_runstate(td, TD_RUNNING);
408}
409
32cd46a0 410/*
906c8d75 411 * Main IO worker function. It retrieves io_u's to process and queues
32cd46a0
JA
412 * and reaps them, checking for rate and errors along the way.
413 */
ebac4655
JA
414static void do_io(struct thread_data *td)
415{
02bcaa8c 416 struct timeval s;
ebac4655 417 unsigned long usec;
af52b345
JA
418 unsigned int i;
419 int ret = 0;
ebac4655 420
5853e5a8
JA
421 td_set_runstate(td, TD_RUNNING);
422
7bb48f84 423 while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) {
97601024
JA
424 struct timeval comp_time;
425 long bytes_done = 0;
84585003 426 int min_evts = 0;
ebac4655 427 struct io_u *io_u;
5451792e 428 int ret2;
ebac4655
JA
429
430 if (td->terminate)
431 break;
432
3d7c391d 433 io_u = get_io_u(td);
ebac4655
JA
434 if (!io_u)
435 break;
436
437 memcpy(&s, &io_u->start_time, sizeof(s));
97601024
JA
438
439 if (runtime_exceeded(td, &s)) {
440 put_io_u(td, io_u);
441 break;
442 }
36167d82 443
11786802 444 ret = td_io_queue(td, io_u);
36167d82
JA
445 switch (ret) {
446 case FIO_Q_COMPLETED:
5451792e
JA
447 if (io_u->error)
448 ret = -io_u->error;
9e9d164e 449 else if (io_u->resid) {
36167d82 450 int bytes = io_u->xfer_buflen - io_u->resid;
d460eb31 451 struct fio_file *f = io_u->file;
36167d82 452
9e9d164e
JA
453 /*
454 * zero read, fail
455 */
456 if (!bytes) {
457 td_verror(td, ENODATA, "full resid");
458 put_io_u(td, io_u);
459 break;
460 }
461
cec6b55d 462 io_u->xfer_buflen = io_u->resid;
36167d82 463 io_u->xfer_buf += bytes;
5a7c5680 464 io_u->offset += bytes;
d460eb31 465 f->last_completed_pos = io_u->offset;
5a7c5680 466
d460eb31 467 if (io_u->offset == f->real_file_size)
5a7c5680
JA
468 goto sync_done;
469
11786802
JA
470 requeue_io_u(td, &io_u);
471 } else {
5a7c5680 472sync_done:
11786802
JA
473 fio_gettime(&comp_time, NULL);
474 bytes_done = io_u_sync_complete(td, io_u);
475 if (bytes_done < 0)
476 ret = bytes_done;
cec6b55d 477 }
36167d82
JA
478 break;
479 case FIO_Q_QUEUED:
7e77dd02
JA
480 /*
481 * if the engine doesn't have a commit hook,
482 * the io_u is really queued. if it does have such
483 * a hook, it has to call io_u_queued() itself.
484 */
485 if (td->io_ops->commit == NULL)
486 io_u_queued(td, io_u);
36167d82 487 break;
755200a3
JA
488 case FIO_Q_BUSY:
489 requeue_io_u(td, &io_u);
5451792e
JA
490 ret2 = td_io_commit(td);
491 if (ret2 < 0)
492 ret = ret2;
755200a3 493 break;
36167d82
JA
494 default:
495 assert(ret < 0);
496 put_io_u(td, io_u);
497 break;
ebac4655
JA
498 }
499
99784632 500 if (ret < 0 || td->error)
36167d82
JA
501 break;
502
97601024
JA
503 /*
504 * See if we need to complete some commands
505 */
755200a3 506 if (ret == FIO_Q_QUEUED || ret == FIO_Q_BUSY) {
97601024 507 min_evts = 0;
e916b390 508 if (queue_full(td) || ret == FIO_Q_BUSY) {
36167d82 509 min_evts = 1;
ebac4655 510
2dc1bbeb
JA
511 if (td->cur_depth > td->o.iodepth_low)
512 min_evts = td->cur_depth - td->o.iodepth_low;
e916b390
JA
513 }
514
97601024 515 fio_gettime(&comp_time, NULL);
d7762cf8 516 bytes_done = io_u_queued_complete(td, min_evts);
97601024 517 if (bytes_done < 0)
36167d82 518 break;
ebac4655
JA
519 }
520
97601024
JA
521 if (!bytes_done)
522 continue;
523
ebac4655
JA
524 /*
525 * the rate is batched for now, it should work for batches
526 * of completions except the very first one which may look
527 * a little bursty
528 */
97601024 529 usec = utime_since(&s, &comp_time);
ebac4655 530
413dd459 531 rate_throttle(td, usec, bytes_done);
ebac4655 532
97601024 533 if (check_min_rate(td, &comp_time)) {
98aa62d8 534 if (exitall_on_terminate)
390c40e2 535 terminate_threads(td->groupid);
e1161c32 536 td_verror(td, ENODATA, "check_min_rate");
ebac4655
JA
537 break;
538 }
539
2dc1bbeb 540 if (td->o.thinktime) {
9c1f7434
JA
541 unsigned long long b;
542
543 b = td->io_blocks[0] + td->io_blocks[1];
2dc1bbeb 544 if (!(b % td->o.thinktime_blocks)) {
48097d5c
JA
545 int left;
546
2dc1bbeb
JA
547 if (td->o.thinktime_spin)
548 __usec_sleep(td->o.thinktime_spin);
48097d5c 549
2dc1bbeb 550 left = td->o.thinktime - td->o.thinktime_spin;
48097d5c
JA
551 if (left)
552 usec_sleep(td, left);
553 }
9c1f7434 554 }
ebac4655
JA
555 }
556
4d2413c6 557 if (!td->error) {
3d7c391d
JA
558 struct fio_file *f;
559
c01c0395
JA
560 i = td->cur_depth;
561 if (i)
562 ret = io_u_queued_complete(td, i);
ebac4655 563
2dc1bbeb 564 if (should_fsync(td) && td->o.end_fsync) {
84585003 565 td_set_runstate(td, TD_FSYNCING);
3d7b485f
JA
566
567 for_each_file(td, f, i) {
568 if (!(f->flags & FIO_FILE_OPEN))
569 continue;
858a3d47 570 fio_io_sync(td, f);
3d7b485f 571 }
84585003 572 }
c01c0395
JA
573 } else
574 cleanup_pending_aio(td);
ebac4655
JA
575}
576
ebac4655
JA
577static void cleanup_io_u(struct thread_data *td)
578{
579 struct list_head *entry, *n;
580 struct io_u *io_u;
581
582 list_for_each_safe(entry, n, &td->io_u_freelist) {
583 io_u = list_entry(entry, struct io_u, list);
584
585 list_del(&io_u->list);
586 free(io_u);
587 }
588
2f9ade3c 589 free_io_mem(td);
ebac4655
JA
590}
591
6b9cea23
JA
592/*
593 * "randomly" fill the buffer contents
594 */
66eeb296 595static void fill_rand_buf(struct io_u *io_u, int max_bs)
6b9cea23 596{
66eeb296 597 int *ptr = io_u->buf;
6b9cea23
JA
598
599 while ((void *) ptr - io_u->buf < max_bs) {
600 *ptr = rand() * 0x9e370001;
601 ptr++;
602 }
603}
604
ebac4655
JA
605static int init_io_u(struct thread_data *td)
606{
2b4ce34f 607 unsigned long long buf_size;
ebac4655 608 struct io_u *io_u;
a00735e6 609 unsigned int max_bs;
ebac4655
JA
610 int i, max_units;
611 char *p;
612
2866c82d 613 if (td->io_ops->flags & FIO_SYNCIO)
ebac4655
JA
614 max_units = 1;
615 else
2dc1bbeb 616 max_units = td->o.iodepth;
ebac4655 617
2dc1bbeb 618 max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
2b4ce34f
JA
619 buf_size = (unsigned long long) max_bs * (unsigned long long) max_units;
620 buf_size += page_mask;
621 if (buf_size != (size_t) buf_size) {
622 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n");
623 return 1;
624 }
625
626 td->orig_buffer_size = buf_size;
74b025b0 627
2dc1bbeb
JA
628 if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE)
629 td->orig_buffer_size = (td->orig_buffer_size + td->o.hugepage_size - 1) & ~(td->o.hugepage_size - 1);
4afbf66f
JA
630 else if (td->orig_buffer_size & page_mask)
631 td->orig_buffer_size = (td->orig_buffer_size + page_mask) & ~page_mask;
ebac4655 632
2f9ade3c
JA
633 if (allocate_io_mem(td))
634 return 1;
ebac4655 635
ebac4655
JA
636 p = ALIGN(td->orig_buffer);
637 for (i = 0; i < max_units; i++) {
638 io_u = malloc(sizeof(*io_u));
639 memset(io_u, 0, sizeof(*io_u));
640 INIT_LIST_HEAD(&io_u->list);
641
a00735e6 642 io_u->buf = p + max_bs * i;
6b9cea23 643 if (td_write(td) || td_rw(td))
a00735e6 644 fill_rand_buf(io_u, max_bs);
6b9cea23 645
b1ff3403 646 io_u->index = i;
0c6e7517 647 io_u->flags = IO_U_F_FREE;
ebac4655
JA
648 list_add(&io_u->list, &td->io_u_freelist);
649 }
650
433afcb4
JA
651 io_u_init_timeout();
652
ebac4655
JA
653 return 0;
654}
655
da86774e
JA
656static int switch_ioscheduler(struct thread_data *td)
657{
658 char tmp[256], tmp2[128];
659 FILE *f;
660 int ret;
661
ba0fbe10 662 if (td->io_ops->flags & FIO_DISKLESSIO)
f48b467c
JA
663 return 0;
664
da86774e
JA
665 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
666
667 f = fopen(tmp, "r+");
668 if (!f) {
cbf5c1d7
JA
669 if (errno == ENOENT) {
670 log_err("fio: os or kernel doesn't support IO scheduler switching\n");
671 return 0;
672 }
673 td_verror(td, errno, "fopen iosched");
da86774e
JA
674 return 1;
675 }
676
677 /*
678 * Set io scheduler.
679 */
2dc1bbeb 680 ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
da86774e 681 if (ferror(f) || ret != 1) {
e1161c32 682 td_verror(td, errno, "fwrite");
da86774e
JA
683 fclose(f);
684 return 1;
685 }
686
687 rewind(f);
688
689 /*
690 * Read back and check that the selected scheduler is now the default.
691 */
692 ret = fread(tmp, 1, sizeof(tmp), f);
693 if (ferror(f) || ret < 0) {
e1161c32 694 td_verror(td, errno, "fread");
da86774e
JA
695 fclose(f);
696 return 1;
697 }
698
2dc1bbeb 699 sprintf(tmp2, "[%s]", td->o.ioscheduler);
da86774e 700 if (!strstr(tmp, tmp2)) {
2dc1bbeb 701 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
e1161c32 702 td_verror(td, EINVAL, "iosched_switch");
da86774e
JA
703 fclose(f);
704 return 1;
705 }
706
707 fclose(f);
708 return 0;
709}
710
a978ba68 711static int clear_io_state(struct thread_data *td)
ebac4655 712{
53cdc686 713 struct fio_file *f;
af52b345
JA
714 unsigned int i;
715 int ret;
ebac4655 716
756867bd 717 td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
ebac4655 718 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
20dc95c4 719 td->zone_bytes = 0;
d7d3b49b 720 td->rate_bytes = 0;
4e991c23 721 td->rate_blocks = 0;
38d77cae 722 td->rw_end_set[0] = td->rw_end_set[1] = 0;
ebac4655 723
c1324df1
JA
724 td->last_was_sync = 0;
725
a978ba68
JA
726 for_each_file(td, f, i)
727 td_io_close_file(td, f);
53cdc686 728
a978ba68
JA
729 ret = 0;
730 for_each_file(td, f, i) {
731 ret = td_io_open_file(td, f);
732 if (ret)
733 break;
53cdc686 734 }
a978ba68
JA
735
736 return ret;
ebac4655
JA
737}
738
906c8d75
JA
739/*
740 * Entry point for the thread based jobs. The process based jobs end up
741 * here as well, after a little setup.
742 */
ebac4655
JA
743static void *thread_main(void *data)
744{
69008999 745 unsigned long long runtime[2];
ebac4655 746 struct thread_data *td = data;
38d77cae 747 unsigned long elapsed;
a978ba68 748 int clear_state;
ebac4655 749
2dc1bbeb 750 if (!td->o.use_thread)
ebac4655
JA
751 setsid();
752
753 td->pid = getpid();
754
aea47d44
JA
755 INIT_LIST_HEAD(&td->io_u_freelist);
756 INIT_LIST_HEAD(&td->io_u_busylist);
755200a3 757 INIT_LIST_HEAD(&td->io_u_requeues);
aea47d44 758 INIT_LIST_HEAD(&td->io_log_list);
8de8f047 759 INIT_LIST_HEAD(&td->io_hist_list);
bb5d7d0b 760 td->io_hist_tree = RB_ROOT;
aea47d44 761
ebac4655 762 if (init_io_u(td))
bda4fd9e 763 goto err_sem;
ebac4655
JA
764
765 if (fio_setaffinity(td) == -1) {
e1161c32 766 td_verror(td, errno, "cpu_set_affinity");
bda4fd9e 767 goto err_sem;
ebac4655
JA
768 }
769
aea47d44 770 if (init_iolog(td))
bda4fd9e 771 goto err_sem;
aea47d44 772
ebac4655
JA
773 if (td->ioprio) {
774 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
e1161c32 775 td_verror(td, errno, "ioprio_set");
bda4fd9e 776 goto err_sem;
ebac4655
JA
777 }
778 }
779
2dc1bbeb 780 if (nice(td->o.nice) == -1) {
e1161c32 781 td_verror(td, errno, "nice");
bda4fd9e 782 goto err_sem;
b6f4d880
JA
783 }
784
2dc1bbeb 785 if (td->o.ioscheduler && switch_ioscheduler(td))
bda4fd9e 786 goto err_sem;
da86774e 787
75154845 788 td_set_runstate(td, TD_INITIALIZED);
07739b57
JA
789 fio_sem_up(startup_sem);
790 fio_sem_down(td->mutex);
ebac4655 791
37f56873
JA
792 /*
793 * the ->mutex semaphore is now no longer used, close it to avoid
794 * eating a file descriptor
795 */
796 fio_sem_remove(td->mutex);
797
2dc1bbeb 798 if (!td->o.create_serialize && setup_files(td))
ebac4655
JA
799 goto err;
800
7d6c5283
JA
801 if (td_io_init(td))
802 goto err;
803
b5af8293
JA
804 if (open_files(td))
805 goto err;
806
68727076
JA
807 if (init_random_map(td))
808 goto err;
809
2dc1bbeb
JA
810 if (td->o.exec_prerun) {
811 if (system(td->o.exec_prerun) < 0)
69cfd7e0
JA
812 goto err;
813 }
4e0ba8af 814
69008999 815 fio_gettime(&td->epoch, NULL);
433afcb4 816 memcpy(&td->timeout_end, &td->epoch, sizeof(td->epoch));
756867bd 817 getrusage(RUSAGE_SELF, &td->ts.ru_start);
69008999
JA
818
819 runtime[0] = runtime[1] = 0;
a978ba68 820 clear_state = 0;
2dc1bbeb 821 while (td->o.loops--) {
02bcaa8c 822 fio_gettime(&td->start, NULL);
756867bd 823 memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
ebac4655 824
2dc1bbeb 825 if (td->o.ratemin)
756867bd 826 memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate));
ebac4655 827
a978ba68
JA
828 if (clear_state && clear_io_state(td))
829 break;
830
ebac4655
JA
831 prune_io_piece_log(td);
832
ba0fbe10 833 do_io(td);
ebac4655 834
a978ba68
JA
835 clear_state = 1;
836
38d77cae
JA
837 if (td_read(td) && td->io_bytes[DDIR_READ]) {
838 if (td->rw_end_set[DDIR_READ])
839 elapsed = utime_since(&td->start, &td->rw_end[DDIR_READ]);
840 else
841 elapsed = utime_since_now(&td->start);
842
843 runtime[DDIR_READ] += elapsed;
844 }
845 if (td_write(td) && td->io_bytes[DDIR_WRITE]) {
846 if (td->rw_end_set[DDIR_WRITE])
847 elapsed = utime_since(&td->start, &td->rw_end[DDIR_WRITE]);
848 else
849 elapsed = utime_since_now(&td->start);
850
851 runtime[DDIR_WRITE] += elapsed;
852 }
413dd459 853
ebac4655
JA
854 if (td->error || td->terminate)
855 break;
856
2dc1bbeb 857 if (td->o.verify == VERIFY_NONE)
ebac4655
JA
858 continue;
859
a978ba68
JA
860 if (clear_io_state(td))
861 break;
862
02bcaa8c 863 fio_gettime(&td->start, NULL);
ebac4655
JA
864
865 do_verify(td);
866
69008999 867 runtime[DDIR_READ] += utime_since_now(&td->start);
ebac4655
JA
868
869 if (td->error || td->terminate)
870 break;
871 }
872
36dff966 873 update_rusage_stat(td);
756867bd
JA
874 td->ts.runtime[0] = runtime[0] / 1000;
875 td->ts.runtime[1] = runtime[1] / 1000;
876 td->ts.total_run_time = mtime_since_now(&td->epoch);
877 td->ts.io_bytes[0] = td->io_bytes[0];
878 td->ts.io_bytes[1] = td->io_bytes[1];
879
880 if (td->ts.bw_log)
881 finish_log(td, td->ts.bw_log, "bw");
882 if (td->ts.slat_log)
883 finish_log(td, td->ts.slat_log, "slat");
884 if (td->ts.clat_log)
885 finish_log(td, td->ts.clat_log, "clat");
2dc1bbeb 886 if (td->o.write_iolog_file)
843a7413 887 write_iolog_close(td);
2dc1bbeb
JA
888 if (td->o.exec_postrun) {
889 if (system(td->o.exec_postrun) < 0)
890 log_err("fio: postrun %s failed\n", td->o.exec_postrun);
69cfd7e0 891 }
ebac4655
JA
892
893 if (exitall_on_terminate)
390c40e2 894 terminate_threads(td->groupid);
ebac4655
JA
895
896err:
5bf13a5a
JA
897 if (td->error)
898 printf("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
53cdc686 899 close_files(td);
2866c82d 900 close_ioengine(td);
ebac4655 901 cleanup_io_u(td);
d23bb327 902 options_mem_free(td);
ebac4655 903 td_set_runstate(td, TD_EXITED);
43d76807 904 return (void *) (unsigned long) td->error;
bda4fd9e
JA
905err_sem:
906 fio_sem_up(startup_sem);
907 goto err;
ebac4655
JA
908}
909
906c8d75
JA
910/*
911 * We cannot pass the td data into a forked process, so attach the td and
912 * pass it to the thread worker.
913 */
a6418147 914static int fork_main(int shmid, int offset)
ebac4655
JA
915{
916 struct thread_data *td;
a6418147 917 void *data, *ret;
ebac4655
JA
918
919 data = shmat(shmid, NULL, 0);
920 if (data == (void *) -1) {
a6418147
JA
921 int __err = errno;
922
ebac4655 923 perror("shmat");
a6418147 924 return __err;
ebac4655
JA
925 }
926
927 td = data + offset * sizeof(struct thread_data);
a6418147 928 ret = thread_main(td);
ebac4655 929 shmdt(data);
43d76807 930 return (int) (unsigned long) ret;
ebac4655
JA
931}
932
906c8d75
JA
933/*
934 * Run over the job map and reap the threads that have exited, if any.
935 */
ebac4655
JA
936static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
937{
34572e28 938 struct thread_data *td;
fab6aa71 939 int i, cputhreads, pending, status, ret;
ebac4655
JA
940
941 /*
942 * reap exited threads (TD_EXITED -> TD_REAPED)
943 */
4d2413c6 944 pending = cputhreads = 0;
34572e28 945 for_each_td(td, i) {
3707f45b 946 int flags = 0;
a2f77c9f 947
84585003
JA
948 /*
949 * ->io_ops is NULL for a thread that has closed its
950 * io engine
951 */
ba0fbe10 952 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
b990b5c0
JA
953 cputhreads++;
954
a2f77c9f
JA
955 if (!td->pid || td->runstate == TD_REAPED)
956 continue;
2dc1bbeb 957 if (td->o.use_thread) {
3707f45b
JA
958 if (td->runstate == TD_EXITED) {
959 td_set_runstate(td, TD_REAPED);
960 goto reaped;
961 }
962 continue;
963 }
a2f77c9f
JA
964
965 flags = WNOHANG;
966 if (td->runstate == TD_EXITED)
967 flags = 0;
968
969 /*
970 * check if someone quit or got killed in an unusual way
971 */
972 ret = waitpid(td->pid, &status, flags);
3707f45b 973 if (ret < 0) {
a2f77c9f
JA
974 if (errno == ECHILD) {
975 log_err("fio: pid=%d disappeared %d\n", td->pid, td->runstate);
976 td_set_runstate(td, TD_REAPED);
977 goto reaped;
978 }
979 perror("waitpid");
980 } else if (ret == td->pid) {
981 if (WIFSIGNALED(status)) {
fab6aa71
JA
982 int sig = WTERMSIG(status);
983
d9244941
JA
984 if (sig != SIGQUIT)
985 log_err("fio: pid=%d, got signal=%d\n", td->pid, sig);
fab6aa71
JA
986 td_set_runstate(td, TD_REAPED);
987 goto reaped;
988 }
a2f77c9f
JA
989 if (WIFEXITED(status)) {
990 if (WEXITSTATUS(status) && !td->error)
991 td->error = WEXITSTATUS(status);
a2f77c9f 992
a2f77c9f
JA
993 td_set_runstate(td, TD_REAPED);
994 goto reaped;
a6418147
JA
995 }
996 }
ebac4655 997
a2f77c9f
JA
998 /*
999 * thread is not dead, continue
1000 */
e48676ba 1001 pending++;
a2f77c9f 1002 continue;
fab6aa71 1003reaped:
2dc1bbeb 1004 if (td->o.use_thread) {
3707f45b
JA
1005 long ret;
1006
1007 if (pthread_join(td->thread, (void *) &ret))
1008 perror("pthread_join");
1009 }
1010
ebac4655 1011 (*nr_running)--;
2dc1bbeb
JA
1012 (*m_rate) -= td->o.ratemin;
1013 (*t_rate) -= td->o.rate;
e48676ba 1014 pending--;
a2f77c9f
JA
1015
1016 if (td->error)
1017 exit_value++;
ebac4655 1018 }
b990b5c0 1019
4d2413c6 1020 if (*nr_running == cputhreads && !pending)
390c40e2 1021 terminate_threads(TERMINATE_ALL);
ebac4655
JA
1022}
1023
906c8d75
JA
1024/*
1025 * Main function for kicking off and reaping jobs, as needed.
1026 */
ebac4655
JA
1027static void run_threads(void)
1028{
ebac4655
JA
1029 struct thread_data *td;
1030 unsigned long spent;
1031 int i, todo, nr_running, m_rate, t_rate, nr_started;
fcb6ade2 1032
2f9ade3c
JA
1033 if (fio_pin_memory())
1034 return;
ebac4655 1035
c6ae0a5b 1036 if (!terse_output) {
9cedf167
JA
1037 printf("Starting ");
1038 if (nr_thread)
1039 printf("%d thread%s", nr_thread, nr_thread > 1 ? "s" : "");
1040 if (nr_process) {
1041 if (nr_thread)
1042 printf(" and ");
1043 printf("%d process%s", nr_process, nr_process > 1 ? "es" : "");
1044 }
1045 printf("\n");
c6ae0a5b
JA
1046 fflush(stdout);
1047 }
c04f7ec3 1048
4efa970e
JA
1049 signal(SIGINT, sig_handler);
1050 signal(SIGALRM, sig_handler);
1051
ebac4655
JA
1052 todo = thread_number;
1053 nr_running = 0;
1054 nr_started = 0;
1055 m_rate = t_rate = 0;
1056
34572e28 1057 for_each_td(td, i) {
263e529f 1058 print_status_init(td->thread_number - 1);
ebac4655 1059
2dc1bbeb 1060 if (!td->o.create_serialize) {
380cf265 1061 init_disk_util(td);
ebac4655 1062 continue;
380cf265 1063 }
ebac4655
JA
1064
1065 /*
1066 * do file setup here so it happens sequentially,
1067 * we don't want X number of threads getting their
1068 * client data interspersed on disk
1069 */
53cdc686 1070 if (setup_files(td)) {
5bf13a5a
JA
1071 exit_value++;
1072 if (td->error)
1073 log_err("fio: pid=%d, err=%d/%s\n", td->pid, td->error, td->verror);
ebac4655
JA
1074 td_set_runstate(td, TD_REAPED);
1075 todo--;
1076 }
380cf265
JA
1077
1078 init_disk_util(td);
ebac4655
JA
1079 }
1080
a2f77c9f
JA
1081 set_genesis_time();
1082
ebac4655 1083 while (todo) {
75154845
JA
1084 struct thread_data *map[MAX_JOBS];
1085 struct timeval this_start;
1086 int this_jobs = 0, left;
1087
ebac4655
JA
1088 /*
1089 * create threads (TD_NOT_CREATED -> TD_CREATED)
1090 */
34572e28 1091 for_each_td(td, i) {
ebac4655
JA
1092 if (td->runstate != TD_NOT_CREATED)
1093 continue;
1094
1095 /*
1096 * never got a chance to start, killed by other
1097 * thread for some reason
1098 */
1099 if (td->terminate) {
1100 todo--;
1101 continue;
1102 }
1103
2dc1bbeb 1104 if (td->o.start_delay) {
263e529f 1105 spent = mtime_since_genesis();
ebac4655 1106
2dc1bbeb 1107 if (td->o.start_delay * 1000 > spent)
ebac4655
JA
1108 continue;
1109 }
1110
2dc1bbeb 1111 if (td->o.stonewall && (nr_started || nr_running))
ebac4655
JA
1112 break;
1113
75154845
JA
1114 /*
1115 * Set state to created. Thread will transition
1116 * to TD_INITIALIZED when it's done setting up.
1117 */
ebac4655 1118 td_set_runstate(td, TD_CREATED);
75154845 1119 map[this_jobs++] = td;
ebac4655
JA
1120 nr_started++;
1121
2dc1bbeb 1122 if (td->o.use_thread) {
ebac4655
JA
1123 if (pthread_create(&td->thread, NULL, thread_main, td)) {
1124 perror("thread_create");
1125 nr_started--;
c8bb6faf 1126 break;
ebac4655
JA
1127 }
1128 } else {
07739b57 1129 if (!fork()) {
a6418147
JA
1130 int ret = fork_main(shm_id, i);
1131
1132 exit(ret);
ebac4655
JA
1133 }
1134 }
07739b57 1135 fio_sem_down(startup_sem);
ebac4655
JA
1136 }
1137
1138 /*
75154845
JA
1139 * Wait for the started threads to transition to
1140 * TD_INITIALIZED.
ebac4655 1141 */
02bcaa8c 1142 fio_gettime(&this_start, NULL);
75154845 1143 left = this_jobs;
6ce15a32 1144 while (left && !fio_abort) {
75154845
JA
1145 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
1146 break;
1147
1148 usleep(100000);
1149
1150 for (i = 0; i < this_jobs; i++) {
1151 td = map[i];
1152 if (!td)
1153 continue;
b6f4d880 1154 if (td->runstate == TD_INITIALIZED) {
75154845
JA
1155 map[i] = NULL;
1156 left--;
b6f4d880
JA
1157 } else if (td->runstate >= TD_EXITED) {
1158 map[i] = NULL;
1159 left--;
1160 todo--;
1161 nr_running++; /* work-around... */
75154845
JA
1162 }
1163 }
1164 }
1165
1166 if (left) {
3b70d7e5 1167 log_err("fio: %d jobs failed to start\n", left);
75154845
JA
1168 for (i = 0; i < this_jobs; i++) {
1169 td = map[i];
1170 if (!td)
1171 continue;
1172 kill(td->pid, SIGTERM);
1173 }
1174 break;
1175 }
1176
1177 /*
b6f4d880 1178 * start created threads (TD_INITIALIZED -> TD_RUNNING).
75154845 1179 */
34572e28 1180 for_each_td(td, i) {
75154845 1181 if (td->runstate != TD_INITIALIZED)
ebac4655
JA
1182 continue;
1183
1184 td_set_runstate(td, TD_RUNNING);
1185 nr_running++;
1186 nr_started--;
2dc1bbeb
JA
1187 m_rate += td->o.ratemin;
1188 t_rate += td->o.rate;
75154845 1189 todo--;
07739b57 1190 fio_sem_up(td->mutex);
ebac4655
JA
1191 }
1192
1193 reap_threads(&nr_running, &t_rate, &m_rate);
1194
1195 if (todo)
1196 usleep(100000);
1197 }
1198
1199 while (nr_running) {
1200 reap_threads(&nr_running, &t_rate, &m_rate);
1201 usleep(10000);
1202 }
1203
1204 update_io_ticks();
2f9ade3c 1205 fio_unpin_memory();
ebac4655
JA
1206}
1207
ebac4655
JA
1208int main(int argc, char *argv[])
1209{
29d610e1
JA
1210 long ps;
1211
dbe1125e
JA
1212 /*
1213 * We need locale for number printing, if it isn't set then just
1214 * go with the US format.
1215 */
1216 if (!getenv("LC_NUMERIC"))
1217 setlocale(LC_NUMERIC, "en_US");
1218
ebac4655
JA
1219 if (parse_options(argc, argv))
1220 return 1;
1221
4b472fa3
JA
1222 if (!thread_number)
1223 return 0;
ebac4655 1224
29d610e1
JA
1225 ps = sysconf(_SC_PAGESIZE);
1226 if (ps < 0) {
1227 log_err("Failed to get page size\n");
1228 return 1;
1229 }
1230
cfc99db7 1231 page_size = ps;
29d610e1
JA
1232 page_mask = ps - 1;
1233
bb3884d8
JA
1234 if (write_bw_log) {
1235 setup_log(&agg_io_log[DDIR_READ]);
1236 setup_log(&agg_io_log[DDIR_WRITE]);
1237 }
1238
07739b57
JA
1239 startup_sem = fio_sem_init(0);
1240
a2f77c9f
JA
1241 set_genesis_time();
1242
ebac4655
JA
1243 disk_util_timer_arm();
1244
1245 run_threads();
6ce15a32 1246
bb3884d8 1247 if (!fio_abort) {
6ce15a32 1248 show_run_stats();
bb3884d8
JA
1249 if (write_bw_log) {
1250 __finish_log(agg_io_log[DDIR_READ],"agg-read_bw.log");
1251 __finish_log(agg_io_log[DDIR_WRITE],"agg-write_bw.log");
1252 }
1253 }
ebac4655 1254
07739b57 1255 fio_sem_remove(startup_sem);
437c9b71 1256 return exit_value;
ebac4655 1257}