[PATCH] Split out the memory handling from fio.c
[fio.git] / fio.c
... / ...
CommitLineData
1/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22#include <unistd.h>
23#include <fcntl.h>
24#include <string.h>
25#include <signal.h>
26#include <time.h>
27#include <assert.h>
28#include <sys/stat.h>
29#include <sys/wait.h>
30#include <sys/ipc.h>
31#include <sys/shm.h>
32#include <sys/ioctl.h>
33#include <sys/mman.h>
34
35#include "fio.h"
36#include "os.h"
37
38#define MASK (4095)
39
40#define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
41
42int groupid = 0;
43int thread_number = 0;
44int shm_id = 0;
45int temp_stall_ts;
46char *fio_inst_prefix = _INST_PREFIX;
47
48#define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync))
49
50static volatile int startup_sem;
51
52#define TERMINATE_ALL (-1)
53#define JOB_START_TIMEOUT (5 * 1000)
54
55static void terminate_threads(int group_id)
56{
57 struct thread_data *td;
58 int i;
59
60 for_each_td(td, i) {
61 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
62 td->terminate = 1;
63 td->start_delay = 0;
64 }
65 }
66}
67
68static void sig_handler(int sig)
69{
70 switch (sig) {
71 case SIGALRM:
72 update_io_ticks();
73 disk_util_timer_arm();
74 print_thread_status();
75 break;
76 default:
77 printf("\nfio: terminating on signal\n");
78 fflush(stdout);
79 terminate_threads(TERMINATE_ALL);
80 break;
81 }
82}
83
84/*
85 * The ->file_map[] contains a map of blocks we have or have not done io
86 * to yet. Used to make sure we cover the entire range in a fair fashion.
87 */
88static int random_map_free(struct thread_data *td, struct fio_file *f,
89 unsigned long long block)
90{
91 unsigned int idx = RAND_MAP_IDX(td, f, block);
92 unsigned int bit = RAND_MAP_BIT(td, f, block);
93
94 return (f->file_map[idx] & (1UL << bit)) == 0;
95}
96
97/*
98 * Return the next free block in the map.
99 */
100static int get_next_free_block(struct thread_data *td, struct fio_file *f,
101 unsigned long long *b)
102{
103 int i;
104
105 *b = 0;
106 i = 0;
107 while ((*b) * td->min_bs < f->file_size) {
108 if (f->file_map[i] != -1UL) {
109 *b += ffz(f->file_map[i]);
110 return 0;
111 }
112
113 *b += BLOCKS_PER_MAP;
114 i++;
115 }
116
117 return 1;
118}
119
120/*
121 * Mark a given offset as used in the map.
122 */
123static void mark_random_map(struct thread_data *td, struct fio_file *f,
124 struct io_u *io_u)
125{
126 unsigned long long block = io_u->offset / (unsigned long long) td->min_bs;
127 unsigned int blocks = 0;
128
129 while (blocks < (io_u->buflen / td->min_bs)) {
130 unsigned int idx, bit;
131
132 if (!random_map_free(td, f, block))
133 break;
134
135 idx = RAND_MAP_IDX(td, f, block);
136 bit = RAND_MAP_BIT(td, f, block);
137
138 assert(idx < f->num_maps);
139
140 f->file_map[idx] |= (1UL << bit);
141 block++;
142 blocks++;
143 }
144
145 if ((blocks * td->min_bs) < io_u->buflen)
146 io_u->buflen = blocks * td->min_bs;
147}
148
149/*
150 * For random io, generate a random new block and see if it's used. Repeat
151 * until we find a free one. For sequential io, just return the end of
152 * the last io issued.
153 */
154static int get_next_offset(struct thread_data *td, struct fio_file *f,
155 unsigned long long *offset)
156{
157 unsigned long long b, rb;
158 long r;
159
160 if (!td->sequential) {
161 unsigned long long max_blocks = td->io_size / td->min_bs;
162 int loops = 50;
163
164 do {
165 r = os_random_long(&td->random_state);
166 b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
167 rb = b + (f->file_offset / td->min_bs);
168 loops--;
169 } while (!random_map_free(td, f, rb) && loops);
170
171 if (!loops) {
172 if (get_next_free_block(td, f, &b))
173 return 1;
174 }
175 } else
176 b = f->last_pos / td->min_bs;
177
178 *offset = (b * td->min_bs) + f->file_offset;
179 if (*offset > f->file_size)
180 return 1;
181
182 return 0;
183}
184
185static unsigned int get_next_buflen(struct thread_data *td)
186{
187 unsigned int buflen;
188 long r;
189
190 if (td->min_bs == td->max_bs)
191 buflen = td->min_bs;
192 else {
193 r = os_random_long(&td->bsrange_state);
194 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
195 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
196 }
197
198 if (buflen > td->io_size - td->this_io_bytes[td->ddir]) {
199 /*
200 * if using direct/raw io, we may not be able to
201 * shrink the size. so just fail it.
202 */
203 if (td->io_ops->flags & FIO_RAWIO)
204 return 0;
205
206 buflen = td->io_size - td->this_io_bytes[td->ddir];
207 }
208
209 return buflen;
210}
211
212/*
213 * Check if we are above the minimum rate given.
214 */
215static int check_min_rate(struct thread_data *td, struct timeval *now)
216{
217 unsigned long spent;
218 unsigned long rate;
219 int ddir = td->ddir;
220
221 /*
222 * allow a 2 second settle period in the beginning
223 */
224 if (mtime_since(&td->start, now) < 2000)
225 return 0;
226
227 /*
228 * if rate blocks is set, sample is running
229 */
230 if (td->rate_bytes) {
231 spent = mtime_since(&td->lastrate, now);
232 if (spent < td->ratecycle)
233 return 0;
234
235 rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
236 if (rate < td->ratemin) {
237 fprintf(f_out, "%s: min rate %d not met, got %ldKiB/sec\n", td->name, td->ratemin, rate);
238 return 1;
239 }
240 }
241
242 td->rate_bytes = td->this_io_bytes[ddir];
243 memcpy(&td->lastrate, now, sizeof(*now));
244 return 0;
245}
246
247static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
248{
249 if (!td->timeout)
250 return 0;
251 if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
252 return 1;
253
254 return 0;
255}
256
257/*
258 * Return the data direction for the next io_u. If the job is a
259 * mixed read/write workload, check the rwmix cycle and switch if
260 * necessary.
261 */
262static int get_rw_ddir(struct thread_data *td)
263{
264 if (td_rw(td)) {
265 struct timeval now;
266 unsigned long elapsed;
267
268 gettimeofday(&now, NULL);
269 elapsed = mtime_since_now(&td->rwmix_switch);
270
271 /*
272 * Check if it's time to seed a new data direction.
273 */
274 if (elapsed >= td->rwmixcycle) {
275 unsigned int v;
276 long r;
277
278 r = os_random_long(&td->rwmix_state);
279 v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
280 if (v < td->rwmixread)
281 td->rwmix_ddir = DDIR_READ;
282 else
283 td->rwmix_ddir = DDIR_WRITE;
284 memcpy(&td->rwmix_switch, &now, sizeof(now));
285 }
286 return td->rwmix_ddir;
287 } else if (td_read(td))
288 return DDIR_READ;
289 else
290 return DDIR_WRITE;
291}
292
293static int td_io_prep(struct thread_data *td, struct io_u *io_u)
294{
295 if (td->io_ops->prep && td->io_ops->prep(td, io_u))
296 return 1;
297
298 return 0;
299}
300
301void put_io_u(struct thread_data *td, struct io_u *io_u)
302{
303 io_u->file = NULL;
304 list_del(&io_u->list);
305 list_add(&io_u->list, &td->io_u_freelist);
306 td->cur_depth--;
307}
308
309static int fill_io_u(struct thread_data *td, struct fio_file *f,
310 struct io_u *io_u)
311{
312 /*
313 * If using an iolog, grab next piece if any available.
314 */
315 if (td->read_iolog)
316 return read_iolog_get(td, io_u);
317
318 /*
319 * No log, let the seq/rand engine retrieve the next position.
320 */
321 if (!get_next_offset(td, f, &io_u->offset)) {
322 io_u->buflen = get_next_buflen(td);
323
324 if (io_u->buflen) {
325 io_u->ddir = get_rw_ddir(td);
326
327 /*
328 * If using a write iolog, store this entry.
329 */
330 if (td->write_iolog)
331 write_iolog_put(td, io_u);
332
333 io_u->file = f;
334 return 0;
335 }
336 }
337
338 return 1;
339}
340
341#define queue_full(td) list_empty(&(td)->io_u_freelist)
342
343struct io_u *__get_io_u(struct thread_data *td)
344{
345 struct io_u *io_u = NULL;
346
347 if (!queue_full(td)) {
348 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
349
350 io_u->error = 0;
351 io_u->resid = 0;
352 list_del(&io_u->list);
353 list_add(&io_u->list, &td->io_u_busylist);
354 td->cur_depth++;
355 }
356
357 return io_u;
358}
359
360/*
361 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
362 * etc. The returned io_u is fully ready to be prepped and submitted.
363 */
364static struct io_u *get_io_u(struct thread_data *td, struct fio_file *f)
365{
366 struct io_u *io_u;
367
368 io_u = __get_io_u(td);
369 if (!io_u)
370 return NULL;
371
372 if (td->zone_bytes >= td->zone_size) {
373 td->zone_bytes = 0;
374 f->last_pos += td->zone_skip;
375 }
376
377 if (fill_io_u(td, f, io_u)) {
378 put_io_u(td, io_u);
379 return NULL;
380 }
381
382 if (io_u->buflen + io_u->offset > f->file_size) {
383 if (td->io_ops->flags & FIO_RAWIO) {
384 put_io_u(td, io_u);
385 return NULL;
386 }
387
388 io_u->buflen = f->file_size - io_u->offset;
389 }
390
391 if (!io_u->buflen) {
392 put_io_u(td, io_u);
393 return NULL;
394 }
395
396 if (!td->read_iolog && !td->sequential)
397 mark_random_map(td, f, io_u);
398
399 f->last_pos += io_u->buflen;
400
401 if (td->verify != VERIFY_NONE)
402 populate_verify_io_u(td, io_u);
403
404 if (td_io_prep(td, io_u)) {
405 put_io_u(td, io_u);
406 return NULL;
407 }
408
409 gettimeofday(&io_u->start_time, NULL);
410 return io_u;
411}
412
413static inline void td_set_runstate(struct thread_data *td, int runstate)
414{
415 td->runstate = runstate;
416}
417
418static struct fio_file *get_next_file(struct thread_data *td)
419{
420 unsigned int old_next_file = td->next_file;
421 struct fio_file *f;
422
423 do {
424 f = &td->files[td->next_file];
425
426 td->next_file++;
427 if (td->next_file >= td->nr_files)
428 td->next_file = 0;
429
430 if (f->fd != -1)
431 break;
432
433 f = NULL;
434 } while (td->next_file != old_next_file);
435
436 return f;
437}
438
439static int td_io_sync(struct thread_data *td, struct fio_file *f)
440{
441 if (td->io_ops->sync)
442 return td->io_ops->sync(td, f);
443
444 return 0;
445}
446
447static int td_io_getevents(struct thread_data *td, int min, int max,
448 struct timespec *t)
449{
450 return td->io_ops->getevents(td, min, max, t);
451}
452
453static int td_io_queue(struct thread_data *td, struct io_u *io_u)
454{
455 gettimeofday(&io_u->issue_time, NULL);
456
457 return td->io_ops->queue(td, io_u);
458}
459
460#define iocb_time(iocb) ((unsigned long) (iocb)->data)
461
462static void io_completed(struct thread_data *td, struct io_u *io_u,
463 struct io_completion_data *icd)
464{
465 struct timeval e;
466 unsigned long msec;
467
468 gettimeofday(&e, NULL);
469
470 if (!io_u->error) {
471 unsigned int bytes = io_u->buflen - io_u->resid;
472 const int idx = io_u->ddir;
473
474 td->io_blocks[idx]++;
475 td->io_bytes[idx] += bytes;
476 td->zone_bytes += bytes;
477 td->this_io_bytes[idx] += bytes;
478
479 msec = mtime_since(&io_u->issue_time, &e);
480
481 add_clat_sample(td, idx, msec);
482 add_bw_sample(td, idx);
483
484 if ((td_rw(td) || td_write(td)) && idx == DDIR_WRITE)
485 log_io_piece(td, io_u);
486
487 icd->bytes_done[idx] += bytes;
488 } else
489 icd->error = io_u->error;
490}
491
492static void ios_completed(struct thread_data *td,struct io_completion_data *icd)
493{
494 struct io_u *io_u;
495 int i;
496
497 icd->error = 0;
498 icd->bytes_done[0] = icd->bytes_done[1] = 0;
499
500 for (i = 0; i < icd->nr; i++) {
501 io_u = td->io_ops->event(td, i);
502
503 io_completed(td, io_u, icd);
504 put_io_u(td, io_u);
505 }
506}
507
508/*
509 * When job exits, we can cancel the in-flight IO if we are using async
510 * io. Attempt to do so.
511 */
512static void cleanup_pending_aio(struct thread_data *td)
513{
514 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
515 struct list_head *entry, *n;
516 struct io_completion_data icd;
517 struct io_u *io_u;
518 int r;
519
520 /*
521 * get immediately available events, if any
522 */
523 r = td_io_getevents(td, 0, td->cur_depth, &ts);
524 if (r > 0) {
525 icd.nr = r;
526 ios_completed(td, &icd);
527 }
528
529 /*
530 * now cancel remaining active events
531 */
532 if (td->io_ops->cancel) {
533 list_for_each_safe(entry, n, &td->io_u_busylist) {
534 io_u = list_entry(entry, struct io_u, list);
535
536 r = td->io_ops->cancel(td, io_u);
537 if (!r)
538 put_io_u(td, io_u);
539 }
540 }
541
542 if (td->cur_depth) {
543 r = td_io_getevents(td, td->cur_depth, td->cur_depth, NULL);
544 if (r > 0) {
545 icd.nr = r;
546 ios_completed(td, &icd);
547 }
548 }
549}
550
551/*
552 * The main verify engine. Runs over the writes we previusly submitted,
553 * reads the blocks back in, and checks the crc/md5 of the data.
554 */
555void do_verify(struct thread_data *td)
556{
557 struct timeval t;
558 struct io_u *io_u, *v_io_u = NULL;
559 struct io_completion_data icd;
560 struct fio_file *f;
561 int ret, i;
562
563 /*
564 * sync io first and invalidate cache, to make sure we really
565 * read from disk.
566 */
567 for_each_file(td, f, i) {
568 td_io_sync(td, f);
569 file_invalidate_cache(td, f);
570 }
571
572 td_set_runstate(td, TD_VERIFYING);
573
574 do {
575 if (td->terminate)
576 break;
577
578 gettimeofday(&t, NULL);
579 if (runtime_exceeded(td, &t))
580 break;
581
582 io_u = __get_io_u(td);
583 if (!io_u)
584 break;
585
586 if (get_next_verify(td, io_u)) {
587 put_io_u(td, io_u);
588 break;
589 }
590
591 f = get_next_file(td);
592 if (!f)
593 break;
594
595 io_u->file = f;
596
597 if (td_io_prep(td, io_u)) {
598 put_io_u(td, io_u);
599 break;
600 }
601
602 ret = td_io_queue(td, io_u);
603 if (ret) {
604 put_io_u(td, io_u);
605 td_verror(td, ret);
606 break;
607 }
608
609 /*
610 * we have one pending to verify, do that while
611 * we are doing io on the next one
612 */
613 if (do_io_u_verify(td, &v_io_u))
614 break;
615
616 ret = td_io_getevents(td, 1, 1, NULL);
617 if (ret != 1) {
618 if (ret < 0)
619 td_verror(td, ret);
620 break;
621 }
622
623 v_io_u = td->io_ops->event(td, 0);
624 icd.nr = 1;
625 icd.error = 0;
626 io_completed(td, v_io_u, &icd);
627
628 if (icd.error) {
629 td_verror(td, icd.error);
630 put_io_u(td, v_io_u);
631 v_io_u = NULL;
632 break;
633 }
634
635 /*
636 * if we can't submit more io, we need to verify now
637 */
638 if (queue_full(td) && do_io_u_verify(td, &v_io_u))
639 break;
640
641 } while (1);
642
643 do_io_u_verify(td, &v_io_u);
644
645 if (td->cur_depth)
646 cleanup_pending_aio(td);
647
648 td_set_runstate(td, TD_RUNNING);
649}
650
651/*
652 * Not really an io thread, all it does is burn CPU cycles in the specified
653 * manner.
654 */
655static void do_cpuio(struct thread_data *td)
656{
657 struct timeval e;
658 int split = 100 / td->cpuload;
659 int i = 0;
660
661 while (!td->terminate) {
662 gettimeofday(&e, NULL);
663
664 if (runtime_exceeded(td, &e))
665 break;
666
667 if (!(i % split))
668 __usec_sleep(10000);
669 else
670 usec_sleep(td, 10000);
671
672 i++;
673 }
674}
675
676/*
677 * Main IO worker function. It retrieves io_u's to process and queues
678 * and reaps them, checking for rate and errors along the way.
679 */
680static void do_io(struct thread_data *td)
681{
682 struct io_completion_data icd;
683 struct timeval s, e;
684 unsigned long usec;
685 struct fio_file *f;
686 int i, ret = 0;
687
688 td_set_runstate(td, TD_RUNNING);
689
690 while (td->this_io_bytes[td->ddir] < td->io_size) {
691 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
692 struct timespec *timeout;
693 int min_evts = 0;
694 struct io_u *io_u;
695
696 if (td->terminate)
697 break;
698
699 f = get_next_file(td);
700 if (!f)
701 break;
702
703 io_u = get_io_u(td, f);
704 if (!io_u)
705 break;
706
707 memcpy(&s, &io_u->start_time, sizeof(s));
708
709 ret = td_io_queue(td, io_u);
710 if (ret) {
711 put_io_u(td, io_u);
712 td_verror(td, ret);
713 break;
714 }
715
716 add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time));
717
718 if (td->cur_depth < td->iodepth) {
719 timeout = &ts;
720 min_evts = 0;
721 } else {
722 timeout = NULL;
723 min_evts = 1;
724 }
725
726
727 ret = td_io_getevents(td, min_evts, td->cur_depth, timeout);
728 if (ret < 0) {
729 td_verror(td, -ret);
730 break;
731 } else if (!ret)
732 continue;
733
734 icd.nr = ret;
735 ios_completed(td, &icd);
736 if (icd.error) {
737 td_verror(td, icd.error);
738 break;
739 }
740
741 /*
742 * the rate is batched for now, it should work for batches
743 * of completions except the very first one which may look
744 * a little bursty
745 */
746 gettimeofday(&e, NULL);
747 usec = utime_since(&s, &e);
748
749 rate_throttle(td, usec, icd.bytes_done[td->ddir]);
750
751 if (check_min_rate(td, &e)) {
752 if (rate_quit)
753 terminate_threads(td->groupid);
754 td_verror(td, ENOMEM);
755 break;
756 }
757
758 if (runtime_exceeded(td, &e))
759 break;
760
761 if (td->thinktime)
762 usec_sleep(td, td->thinktime);
763
764 if (should_fsync(td) && td->fsync_blocks &&
765 (td->io_blocks[DDIR_WRITE] % td->fsync_blocks) == 0)
766 td_io_sync(td, f);
767 }
768
769 if (!ret) {
770 if (td->cur_depth)
771 cleanup_pending_aio(td);
772
773 if (should_fsync(td) && td->end_fsync) {
774 td_set_runstate(td, TD_FSYNCING);
775 for_each_file(td, f, i)
776 td_io_sync(td, f);
777 }
778 }
779}
780
781static int td_io_init(struct thread_data *td)
782{
783 if (td->io_ops->init)
784 return td->io_ops->init(td);
785
786 return 0;
787}
788
789static void cleanup_io_u(struct thread_data *td)
790{
791 struct list_head *entry, *n;
792 struct io_u *io_u;
793
794 list_for_each_safe(entry, n, &td->io_u_freelist) {
795 io_u = list_entry(entry, struct io_u, list);
796
797 list_del(&io_u->list);
798 free(io_u);
799 }
800
801 free_io_mem(td);
802}
803
804static int init_io_u(struct thread_data *td)
805{
806 struct io_u *io_u;
807 int i, max_units;
808 char *p;
809
810 if (td->io_ops->flags & FIO_CPUIO)
811 return 0;
812
813 if (td->io_ops->flags & FIO_SYNCIO)
814 max_units = 1;
815 else
816 max_units = td->iodepth;
817
818 td->orig_buffer_size = td->max_bs * max_units + MASK;
819
820 if (allocate_io_mem(td))
821 return 1;
822
823 p = ALIGN(td->orig_buffer);
824 for (i = 0; i < max_units; i++) {
825 io_u = malloc(sizeof(*io_u));
826 memset(io_u, 0, sizeof(*io_u));
827 INIT_LIST_HEAD(&io_u->list);
828
829 io_u->buf = p + td->max_bs * i;
830 io_u->index = i;
831 list_add(&io_u->list, &td->io_u_freelist);
832 }
833
834 return 0;
835}
836
837static int switch_ioscheduler(struct thread_data *td)
838{
839 char tmp[256], tmp2[128];
840 FILE *f;
841 int ret;
842
843 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
844
845 f = fopen(tmp, "r+");
846 if (!f) {
847 td_verror(td, errno);
848 return 1;
849 }
850
851 /*
852 * Set io scheduler.
853 */
854 ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
855 if (ferror(f) || ret != 1) {
856 td_verror(td, errno);
857 fclose(f);
858 return 1;
859 }
860
861 rewind(f);
862
863 /*
864 * Read back and check that the selected scheduler is now the default.
865 */
866 ret = fread(tmp, 1, sizeof(tmp), f);
867 if (ferror(f) || ret < 0) {
868 td_verror(td, errno);
869 fclose(f);
870 return 1;
871 }
872
873 sprintf(tmp2, "[%s]", td->ioscheduler);
874 if (!strstr(tmp, tmp2)) {
875 log_err("fio: io scheduler %s not found\n", td->ioscheduler);
876 td_verror(td, EINVAL);
877 fclose(f);
878 return 1;
879 }
880
881 fclose(f);
882 return 0;
883}
884
885static void clear_io_state(struct thread_data *td)
886{
887 struct fio_file *f;
888 int i;
889
890 td->stat_io_bytes[0] = td->stat_io_bytes[1] = 0;
891 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
892 td->zone_bytes = 0;
893
894 for_each_file(td, f, i) {
895 f->last_pos = 0;
896 if (td->io_ops->flags & FIO_SYNCIO)
897 lseek(f->fd, SEEK_SET, 0);
898
899 if (f->file_map)
900 memset(f->file_map, 0, f->num_maps * sizeof(long));
901 }
902}
903
904/*
905 * Entry point for the thread based jobs. The process based jobs end up
906 * here as well, after a little setup.
907 */
908static void *thread_main(void *data)
909{
910 struct thread_data *td = data;
911
912 if (!td->use_thread)
913 setsid();
914
915 td->pid = getpid();
916
917 INIT_LIST_HEAD(&td->io_u_freelist);
918 INIT_LIST_HEAD(&td->io_u_busylist);
919 INIT_LIST_HEAD(&td->io_hist_list);
920 INIT_LIST_HEAD(&td->io_log_list);
921
922 if (init_io_u(td))
923 goto err;
924
925 if (fio_setaffinity(td) == -1) {
926 td_verror(td, errno);
927 goto err;
928 }
929
930 if (td_io_init(td))
931 goto err;
932
933 if (init_iolog(td))
934 goto err;
935
936 if (td->ioprio) {
937 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
938 td_verror(td, errno);
939 goto err;
940 }
941 }
942
943 if (nice(td->nice) == -1) {
944 td_verror(td, errno);
945 goto err;
946 }
947
948 if (init_random_state(td))
949 goto err;
950
951 if (td->ioscheduler && switch_ioscheduler(td))
952 goto err;
953
954 td_set_runstate(td, TD_INITIALIZED);
955 fio_sem_up(&startup_sem);
956 fio_sem_down(&td->mutex);
957
958 if (!td->create_serialize && setup_files(td))
959 goto err;
960
961 gettimeofday(&td->epoch, NULL);
962
963 if (td->exec_prerun)
964 system(td->exec_prerun);
965
966 while (td->loops--) {
967 getrusage(RUSAGE_SELF, &td->ru_start);
968 gettimeofday(&td->start, NULL);
969 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
970
971 if (td->ratemin)
972 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
973
974 clear_io_state(td);
975 prune_io_piece_log(td);
976
977 if (td->io_ops->flags & FIO_CPUIO)
978 do_cpuio(td);
979 else
980 do_io(td);
981
982 td->runtime[td->ddir] += mtime_since_now(&td->start);
983 if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
984 td->runtime[td->ddir ^ 1] = td->runtime[td->ddir];
985
986 update_rusage_stat(td);
987
988 if (td->error || td->terminate)
989 break;
990
991 if (td->verify == VERIFY_NONE)
992 continue;
993
994 clear_io_state(td);
995 gettimeofday(&td->start, NULL);
996
997 do_verify(td);
998
999 td->runtime[DDIR_READ] += mtime_since_now(&td->start);
1000
1001 if (td->error || td->terminate)
1002 break;
1003 }
1004
1005 if (td->bw_log)
1006 finish_log(td, td->bw_log, "bw");
1007 if (td->slat_log)
1008 finish_log(td, td->slat_log, "slat");
1009 if (td->clat_log)
1010 finish_log(td, td->clat_log, "clat");
1011 if (td->write_iolog)
1012 write_iolog_close(td);
1013 if (td->exec_postrun)
1014 system(td->exec_postrun);
1015
1016 if (exitall_on_terminate)
1017 terminate_threads(td->groupid);
1018
1019err:
1020 close_files(td);
1021 close_ioengine(td);
1022 cleanup_io_u(td);
1023 td_set_runstate(td, TD_EXITED);
1024 return NULL;
1025
1026}
1027
1028/*
1029 * We cannot pass the td data into a forked process, so attach the td and
1030 * pass it to the thread worker.
1031 */
1032static void *fork_main(int shmid, int offset)
1033{
1034 struct thread_data *td;
1035 void *data;
1036
1037 data = shmat(shmid, NULL, 0);
1038 if (data == (void *) -1) {
1039 perror("shmat");
1040 return NULL;
1041 }
1042
1043 td = data + offset * sizeof(struct thread_data);
1044 thread_main(td);
1045 shmdt(data);
1046 return NULL;
1047}
1048
1049/*
1050 * Run over the job map and reap the threads that have exited, if any.
1051 */
1052static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1053{
1054 struct thread_data *td;
1055 int i, cputhreads;
1056
1057 /*
1058 * reap exited threads (TD_EXITED -> TD_REAPED)
1059 */
1060 cputhreads = 0;
1061 for_each_td(td, i) {
1062 /*
1063 * ->io_ops is NULL for a thread that has closed its
1064 * io engine
1065 */
1066 if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
1067 cputhreads++;
1068
1069 if (td->runstate != TD_EXITED)
1070 continue;
1071
1072 td_set_runstate(td, TD_REAPED);
1073
1074 if (td->use_thread) {
1075 long ret;
1076
1077 if (pthread_join(td->thread, (void *) &ret))
1078 perror("thread_join");
1079 } else
1080 waitpid(td->pid, NULL, 0);
1081
1082 (*nr_running)--;
1083 (*m_rate) -= td->ratemin;
1084 (*t_rate) -= td->rate;
1085 }
1086
1087 if (*nr_running == cputhreads)
1088 terminate_threads(TERMINATE_ALL);
1089}
1090
1091/*
1092 * Main function for kicking off and reaping jobs, as needed.
1093 */
1094static void run_threads(void)
1095{
1096 struct thread_data *td;
1097 unsigned long spent;
1098 int i, todo, nr_running, m_rate, t_rate, nr_started;
1099
1100 if (fio_pin_memory())
1101 return;
1102
1103 if (!terse_output) {
1104 printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
1105 fflush(stdout);
1106 }
1107
1108 signal(SIGINT, sig_handler);
1109 signal(SIGALRM, sig_handler);
1110
1111 todo = thread_number;
1112 nr_running = 0;
1113 nr_started = 0;
1114 m_rate = t_rate = 0;
1115
1116 for_each_td(td, i) {
1117 print_status_init(td->thread_number - 1);
1118
1119 init_disk_util(td);
1120
1121 if (!td->create_serialize)
1122 continue;
1123
1124 /*
1125 * do file setup here so it happens sequentially,
1126 * we don't want X number of threads getting their
1127 * client data interspersed on disk
1128 */
1129 if (setup_files(td)) {
1130 td_set_runstate(td, TD_REAPED);
1131 todo--;
1132 }
1133 }
1134
1135 time_init();
1136
1137 while (todo) {
1138 struct thread_data *map[MAX_JOBS];
1139 struct timeval this_start;
1140 int this_jobs = 0, left;
1141
1142 /*
1143 * create threads (TD_NOT_CREATED -> TD_CREATED)
1144 */
1145 for_each_td(td, i) {
1146 if (td->runstate != TD_NOT_CREATED)
1147 continue;
1148
1149 /*
1150 * never got a chance to start, killed by other
1151 * thread for some reason
1152 */
1153 if (td->terminate) {
1154 todo--;
1155 continue;
1156 }
1157
1158 if (td->start_delay) {
1159 spent = mtime_since_genesis();
1160
1161 if (td->start_delay * 1000 > spent)
1162 continue;
1163 }
1164
1165 if (td->stonewall && (nr_started || nr_running))
1166 break;
1167
1168 /*
1169 * Set state to created. Thread will transition
1170 * to TD_INITIALIZED when it's done setting up.
1171 */
1172 td_set_runstate(td, TD_CREATED);
1173 map[this_jobs++] = td;
1174 fio_sem_init(&startup_sem, 1);
1175 nr_started++;
1176
1177 if (td->use_thread) {
1178 if (pthread_create(&td->thread, NULL, thread_main, td)) {
1179 perror("thread_create");
1180 nr_started--;
1181 }
1182 } else {
1183 if (fork())
1184 fio_sem_down(&startup_sem);
1185 else {
1186 fork_main(shm_id, i);
1187 exit(0);
1188 }
1189 }
1190 }
1191
1192 /*
1193 * Wait for the started threads to transition to
1194 * TD_INITIALIZED.
1195 */
1196 gettimeofday(&this_start, NULL);
1197 left = this_jobs;
1198 while (left) {
1199 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT)
1200 break;
1201
1202 usleep(100000);
1203
1204 for (i = 0; i < this_jobs; i++) {
1205 td = map[i];
1206 if (!td)
1207 continue;
1208 if (td->runstate == TD_INITIALIZED) {
1209 map[i] = NULL;
1210 left--;
1211 } else if (td->runstate >= TD_EXITED) {
1212 map[i] = NULL;
1213 left--;
1214 todo--;
1215 nr_running++; /* work-around... */
1216 }
1217 }
1218 }
1219
1220 if (left) {
1221 log_err("fio: %d jobs failed to start\n", left);
1222 for (i = 0; i < this_jobs; i++) {
1223 td = map[i];
1224 if (!td)
1225 continue;
1226 kill(td->pid, SIGTERM);
1227 }
1228 break;
1229 }
1230
1231 /*
1232 * start created threads (TD_INITIALIZED -> TD_RUNNING).
1233 */
1234 for_each_td(td, i) {
1235 if (td->runstate != TD_INITIALIZED)
1236 continue;
1237
1238 td_set_runstate(td, TD_RUNNING);
1239 nr_running++;
1240 nr_started--;
1241 m_rate += td->ratemin;
1242 t_rate += td->rate;
1243 todo--;
1244 fio_sem_up(&td->mutex);
1245 }
1246
1247 reap_threads(&nr_running, &t_rate, &m_rate);
1248
1249 if (todo)
1250 usleep(100000);
1251 }
1252
1253 while (nr_running) {
1254 reap_threads(&nr_running, &t_rate, &m_rate);
1255 usleep(10000);
1256 }
1257
1258 update_io_ticks();
1259 fio_unpin_memory();
1260}
1261
1262int main(int argc, char *argv[])
1263{
1264 if (parse_options(argc, argv))
1265 return 1;
1266
1267 if (!thread_number) {
1268 log_err("Nothing to do\n");
1269 return 1;
1270 }
1271
1272 disk_util_timer_arm();
1273
1274 run_threads();
1275 show_run_stats();
1276
1277 return 0;
1278}