[PATCH] fio: make disk util work better (account like ru)
[disktools.git] / fio.c
CommitLineData
abe4da87
JA
1/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
892199bd
JA
21#include <stdio.h>
22#include <stdlib.h>
23#include <unistd.h>
24#include <fcntl.h>
25#include <string.h>
26#include <errno.h>
27#include <signal.h>
28#include <time.h>
e128065d 29#include <math.h>
49d2caab 30#include <assert.h>
189873de 31#include <pthread.h>
debf703a
JA
32#include <dirent.h>
33#include <libgen.h>
892199bd
JA
34#include <sys/types.h>
35#include <sys/stat.h>
36#include <sys/wait.h>
892199bd
JA
37#include <sys/ipc.h>
38#include <sys/shm.h>
c94deb1c 39#include <sys/ioctl.h>
6e2c38cc 40#include <sys/mman.h>
892199bd
JA
41#include <asm/unistd.h>
42
27c32a38 43#include "fio.h"
892199bd 44
892199bd
JA
45#define MASK (4095)
46
4240cfa1 47#define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
892199bd 48
27c32a38
JA
49int groupid = 0;
50int thread_number = 0;
51char run_str[MAX_JOBS + 1];
52int shm_id = 0;
892199bd 53
02bdd9ba
JA
54/*
55 * thread life cycle
56 */
57enum {
58 TD_NOT_CREATED = 0,
59 TD_CREATED,
e8457004
JA
60 TD_RUNNING,
61 TD_VERIFYING,
02bdd9ba
JA
62 TD_EXITED,
63 TD_REAPED,
64};
65
2c83567e
JA
66/*
67 * The io unit
68 */
69struct io_u {
70 struct iocb iocb;
57d753e3 71 struct timeval start_time;
2c83567e
JA
72 struct timeval issue_time;
73
2c83567e
JA
74 char *buf;
75 unsigned int buflen;
4ac89145 76 unsigned long long offset;
2c83567e
JA
77
78 struct list_head list;
79};
80
62bb4285 81#define should_fsync(td) (td_write(td) && !(td)->odirect)
02983297 82
892199bd
JA
83static sem_t startup_sem;
84
27c32a38
JA
85#define TERMINATE_ALL (-1)
86
87static void terminate_threads(int groupid)
892199bd
JA
88{
89 int i;
90
213b446c
JA
91 for (i = 0; i < thread_number; i++) {
92 struct thread_data *td = &threads[i];
93
27c32a38
JA
94 if (groupid == TERMINATE_ALL || groupid == td->groupid) {
95 td->terminate = 1;
96 td->start_delay = 0;
97 }
213b446c 98 }
02bdd9ba
JA
99}
100
27c32a38 101static void sig_handler(int sig)
946d8870 102{
27c32a38 103 terminate_threads(TERMINATE_ALL);
892199bd
JA
104}
105
5c24b2c4 106static unsigned long utime_since(struct timeval *s, struct timeval *e)
892199bd
JA
107{
108 double sec, usec;
109
110 sec = e->tv_sec - s->tv_sec;
111 usec = e->tv_usec - s->tv_usec;
112 if (sec > 0 && usec < 0) {
113 sec--;
114 usec += 1000000;
115 }
116
117 sec *= (double) 1000000;
118
119 return sec + usec;
120}
121
fd11d7af
JA
122static unsigned long utime_since_now(struct timeval *s)
123{
124 struct timeval t;
125
126 gettimeofday(&t, NULL);
127 return utime_since(s, &t);
128}
129
5c24b2c4 130static unsigned long mtime_since(struct timeval *s, struct timeval *e)
892199bd
JA
131{
132 double sec, usec;
133
134 sec = e->tv_sec - s->tv_sec;
135 usec = e->tv_usec - s->tv_usec;
136 if (sec > 0 && usec < 0) {
137 sec--;
138 usec += 1000000;
139 }
140
141 sec *= (double) 1000;
142 usec /= (double) 1000;
143
144 return sec + usec;
145}
146
be33abe4
JA
147static unsigned long mtime_since_now(struct timeval *s)
148{
149 struct timeval t;
150
151 gettimeofday(&t, NULL);
152 return mtime_since(s, &t);
153}
154
98168d55
JA
155static inline unsigned long msec_now(struct timeval *s)
156{
157 return s->tv_sec * 1000 + s->tv_usec / 1000;
158}
159
49d2caab
JA
160static int random_map_free(struct thread_data *td, unsigned long long block)
161{
75b2ab2c
JA
162 unsigned int idx = RAND_MAP_IDX(td, block);
163 unsigned int bit = RAND_MAP_BIT(td, block);
49d2caab
JA
164
165 return (td->file_map[idx] & (1UL << bit)) == 0;
166}
167
168static int get_next_free_block(struct thread_data *td, unsigned long long *b)
892199bd 169{
49d2caab
JA
170 int i;
171
172 *b = 0;
173 i = 0;
174 while ((*b) * td->min_bs < td->io_size) {
175 if (td->file_map[i] != -1UL) {
176 *b += ffz(td->file_map[i]);
177 return 0;
178 }
179
180 *b += BLOCKS_PER_MAP;
181 i++;
182 }
183
184 return 1;
185}
186
187static void mark_random_map(struct thread_data *td, struct io_u *io_u)
188{
189 unsigned long block = io_u->offset / td->min_bs;
190 unsigned int blocks = 0;
191
192 while (blocks < (io_u->buflen / td->min_bs)) {
193 int idx, bit;
194
195 if (!random_map_free(td, block))
196 break;
197
75b2ab2c
JA
198 idx = RAND_MAP_IDX(td, block);
199 bit = RAND_MAP_BIT(td, block);
49d2caab
JA
200
201 assert(idx < td->num_maps);
202
203 td->file_map[idx] |= (1UL << bit);
204 block++;
205 blocks++;
206 }
207
208 if ((blocks * td->min_bs) < io_u->buflen)
209 io_u->buflen = blocks * td->min_bs;
210}
211
212static int get_next_offset(struct thread_data *td, unsigned long long *offset)
213{
75b2ab2c 214 unsigned long long b, rb;
d32d9284 215 long r;
892199bd
JA
216
217 if (!td->sequential) {
49d2caab
JA
218 unsigned long max_blocks = td->io_size / td->min_bs;
219 int loops = 50;
220
221 do {
222 lrand48_r(&td->random_state, &r);
223 b = ((max_blocks - 1) * r / (RAND_MAX+1.0));
75b2ab2c 224 rb = b + (td->file_offset / td->min_bs);
49d2caab 225 loops--;
75b2ab2c 226 } while (!random_map_free(td, rb) && loops);
49d2caab
JA
227
228 if (!loops) {
229 if (get_next_free_block(td, &b))
230 return 1;
231 }
7889f07b 232 } else
49d2caab 233 b = td->last_bytes / td->min_bs;
7889f07b 234
49d2caab 235 *offset = (b * td->min_bs) + td->file_offset;
75b2ab2c
JA
236 if (*offset > td->file_size)
237 return 1;
238
49d2caab 239 return 0;
7889f07b
JA
240}
241
242static unsigned int get_next_buflen(struct thread_data *td)
243{
244 unsigned int buflen;
d32d9284 245 long r;
7889f07b
JA
246
247 if (td->min_bs == td->max_bs)
248 buflen = td->min_bs;
249 else {
d32d9284 250 lrand48_r(&td->bsrange_state, &r);
7889f07b
JA
251 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
252 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
892199bd
JA
253 }
254
49d2caab
JA
255 if (buflen > td->io_size - td->this_io_bytes)
256 buflen = td->io_size - td->this_io_bytes;
7889f07b 257
7889f07b 258 return buflen;
892199bd
JA
259}
260
57d753e3
JA
261static inline void add_stat_sample(struct thread_data *td, struct io_stat *is,
262 unsigned long val)
892199bd 263{
57d753e3
JA
264 if (val > is->max_val)
265 is->max_val = val;
266 if (val < is->min_val)
267 is->min_val = val;
268
269 is->val += val;
270 is->val_sq += val * val;
271 is->samples++;
272}
fd1ae4c9 273
a0a9b35b
JA
274static void add_log_sample(struct thread_data *td, struct io_log *log,
275 unsigned long val)
276{
277 if (log->nr_samples == log->max_samples) {
278 int new_size = sizeof(struct io_sample) * log->max_samples * 2;
279
280 log->log = realloc(log->log, new_size);
281 log->max_samples <<= 1;
282 }
283
284 log->log[log->nr_samples].val = val;
285 log->log[log->nr_samples].time = mtime_since_now(&td->start);
286 log->nr_samples++;
287}
288
57d753e3
JA
289static void add_clat_sample(struct thread_data *td, unsigned long msec)
290{
291 add_stat_sample(td, &td->clat_stat, msec);
a0a9b35b
JA
292
293 if (td->lat_log)
294 add_log_sample(td, td->lat_log, msec);
57d753e3 295}
fd1ae4c9 296
57d753e3
JA
297static void add_slat_sample(struct thread_data *td, unsigned long msec)
298{
299 add_stat_sample(td, &td->slat_stat, msec);
300}
fd1ae4c9 301
645785e5 302static void add_bw_sample(struct thread_data *td)
57d753e3
JA
303{
304 unsigned long spent = mtime_since_now(&td->stat_sample_time);
305 unsigned long rate;
306
1d035750 307 if (spent < td->bw_avg_time)
57d753e3
JA
308 return;
309
49d2caab 310 rate = (td->this_io_bytes - td->stat_io_bytes) / spent;
57d753e3
JA
311 add_stat_sample(td, &td->bw_stat, rate);
312
a0a9b35b
JA
313 if (td->bw_log)
314 add_log_sample(td, td->bw_log, rate);
315
57d753e3 316 gettimeofday(&td->stat_sample_time, NULL);
49d2caab 317 td->stat_io_bytes = td->this_io_bytes;
892199bd
JA
318}
319
fd11d7af
JA
320/*
321 * busy looping version for the last few usec
322 */
323static void __usec_sleep(int usec)
324{
325 struct timeval start;
326
327 gettimeofday(&start, NULL);
328 while (utime_since_now(&start) < usec)
3782a8cd 329 nop;
fd11d7af
JA
330}
331
d15c5195 332static void usec_sleep(struct thread_data *td, unsigned long usec)
892199bd 333{
d15c5195
JA
334 struct timespec req, rem;
335
336 req.tv_sec = usec / 1000000;
337 req.tv_nsec = usec * 1000 - req.tv_sec * 1000000;
892199bd
JA
338
339 do {
fd11d7af
JA
340 if (usec < 5000) {
341 __usec_sleep(usec);
342 break;
343 }
d15c5195 344
86184d14 345 rem.tv_sec = rem.tv_nsec = 0;
d15c5195
JA
346 if (nanosleep(&req, &rem) < 0)
347 break;
348
349 if ((rem.tv_sec + rem.tv_nsec) == 0)
892199bd 350 break;
86184d14
JA
351
352 req.tv_nsec = rem.tv_nsec;
d15c5195
JA
353 req.tv_sec = rem.tv_sec;
354
355 usec = rem.tv_sec * 1000000 + rem.tv_nsec / 1000;
356 } while (!td->terminate);
892199bd
JA
357}
358
9e850933
JA
359static void rate_throttle(struct thread_data *td, unsigned long time_spent,
360 unsigned int bytes)
86184d14 361{
9e850933
JA
362 unsigned long usec_cycle;
363
4240cfa1
JA
364 if (!td->rate)
365 return;
366
9e850933
JA
367 usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
368
369 if (time_spent < usec_cycle) {
370 unsigned long s = usec_cycle - time_spent;
86184d14
JA
371
372 td->rate_pending_usleep += s;
fad86e6a 373 if (td->rate_pending_usleep >= 100000) {
d15c5195 374 usec_sleep(td, td->rate_pending_usleep);
86184d14
JA
375 td->rate_pending_usleep = 0;
376 }
4240cfa1 377 } else {
9e850933 378 long overtime = time_spent - usec_cycle;
42b2b9fe 379
4240cfa1
JA
380 td->rate_pending_usleep -= overtime;
381 }
382}
383
5c24b2c4 384static int check_min_rate(struct thread_data *td, struct timeval *now)
4240cfa1 385{
7607bc6b 386 unsigned long spent;
4240cfa1
JA
387 unsigned long rate;
388
389 /*
390 * allow a 2 second settle period in the beginning
391 */
7607bc6b 392 if (mtime_since(&td->start, now) < 2000)
4240cfa1
JA
393 return 0;
394
395 /*
396 * if rate blocks is set, sample is running
397 */
49d2caab 398 if (td->rate_bytes) {
4240cfa1
JA
399 spent = mtime_since(&td->lastrate, now);
400 if (spent < td->ratecycle)
401 return 0;
402
49d2caab 403 rate = (td->this_io_bytes - td->rate_bytes) / spent;
4240cfa1
JA
404 if (rate < td->ratemin) {
405 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
02bdd9ba 406 if (rate_quit)
27c32a38 407 terminate_threads(td->groupid);
4240cfa1
JA
408 return 1;
409 }
86184d14 410 }
4240cfa1 411
49d2caab 412 td->rate_bytes = td->this_io_bytes;
4240cfa1
JA
413 memcpy(&td->lastrate, now, sizeof(*now));
414 return 0;
86184d14
JA
415}
416
67903a2e
JA
417static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
418{
01f79976
JA
419 if (!td->timeout)
420 return 0;
67903a2e
JA
421 if (mtime_since(&td->start, t) >= td->timeout * 1000)
422 return 1;
423
424 return 0;
425}
426
e8457004
JA
427static void fill_random_bytes(struct thread_data *td,
428 unsigned char *p, unsigned int len)
429{
645785e5 430 unsigned int todo;
40ef7f64 431 double r;
e8457004
JA
432
433 while (len) {
40ef7f64 434 drand48_r(&td->verify_state, &r);
e8457004 435
40ef7f64
JA
436 /*
437 * lrand48_r seems to be broken and only fill the bottom
438 * 32-bits, even on 64-bit archs with 64-bit longs
439 */
440 todo = sizeof(r);
e8457004
JA
441 if (todo > len)
442 todo = len;
443
444 memcpy(p, &r, todo);
445
446 len -= todo;
447 p += todo;
448 }
449}
450
9d0c6ca2
JA
451static void hexdump(void *buffer, int len)
452{
453 unsigned char *p = buffer;
454 int i;
455
456 for (i = 0; i < len; i++)
457 printf("%02x", p[i]);
458 printf("\n");
459}
460
7f46ef08
JA
461static int verify_io_u_crc32(struct verify_header *hdr, struct io_u *io_u)
462{
463 unsigned char *p = (unsigned char *) io_u->buf;
464 unsigned long c;
465
466 p += sizeof(*hdr);
467 c = crc32(p, hdr->len - sizeof(*hdr));
468
469 return c != hdr->crc32;
470}
471
472static int verify_io_u_md5(struct verify_header *hdr, struct io_u *io_u)
e8457004 473{
e8457004
JA
474 unsigned char *p = (unsigned char *) io_u->buf;
475 struct md5_ctx md5_ctx;
9d0c6ca2 476 int ret;
e8457004 477
e8457004
JA
478 memset(&md5_ctx, 0, sizeof(md5_ctx));
479 p += sizeof(*hdr);
480 md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
481
9d0c6ca2
JA
482 ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
483 if (ret) {
484 hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
485 hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
486 }
487
488 return ret;
e8457004
JA
489}
490
7f46ef08
JA
491static int verify_io_u(struct io_u *io_u)
492{
493 struct verify_header *hdr = (struct verify_header *) io_u->buf;
494 int ret;
495
496 if (hdr->fio_magic != FIO_HDR_MAGIC)
497 return 1;
498
499 if (hdr->verify_type == VERIFY_MD5)
500 ret = verify_io_u_md5(hdr, io_u);
501 else if (hdr->verify_type == VERIFY_CRC32)
502 ret = verify_io_u_crc32(hdr, io_u);
503 else {
504 fprintf(stderr, "Bad verify type %d\n", hdr->verify_type);
505 ret = 1;
506 }
507
508 return ret;
509}
510
511static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
512{
513 hdr->crc32 = crc32(p, len);
514}
515
516static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
517{
518 struct md5_ctx md5_ctx;
519
520 memset(&md5_ctx, 0, sizeof(md5_ctx));
521 md5_update(&md5_ctx, p, len);
522 memcpy(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
523}
524
cfc702bd
JA
525/*
526 * fill body of io_u->buf with random data and add a header with the
527 * (eg) sha1sum of that data.
528 */
e8457004 529static void populate_io_u(struct thread_data *td, struct io_u *io_u)
cfc702bd 530{
e8457004 531 unsigned char *p = (unsigned char *) io_u->buf;
7f46ef08 532 struct verify_header hdr;
e8457004
JA
533
534 hdr.fio_magic = FIO_HDR_MAGIC;
535 hdr.len = io_u->buflen;
536 p += sizeof(hdr);
537 fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
538
7f46ef08
JA
539 if (td->verify == VERIFY_MD5) {
540 fill_md5(&hdr, p, io_u->buflen - sizeof(hdr));
541 hdr.verify_type = VERIFY_MD5;
542 } else {
543 fill_crc32(&hdr, p, io_u->buflen - sizeof(hdr));
544 hdr.verify_type = VERIFY_CRC32;
545 }
546
e8457004 547 memcpy(io_u->buf, &hdr, sizeof(hdr));
cfc702bd
JA
548}
549
2c83567e
JA
550static void put_io_u(struct thread_data *td, struct io_u *io_u)
551{
552 list_del(&io_u->list);
553 list_add(&io_u->list, &td->io_u_freelist);
554 td->cur_depth--;
555}
556
f0f3411b
JA
557#define queue_full(td) (list_empty(&(td)->io_u_freelist))
558
e8457004
JA
559static struct io_u *__get_io_u(struct thread_data *td)
560{
561 struct io_u *io_u;
562
f0f3411b 563 if (queue_full(td))
e8457004
JA
564 return NULL;
565
566 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
567 list_del(&io_u->list);
568 list_add(&io_u->list, &td->io_u_busylist);
f4bb2243 569 td->cur_depth++;
e8457004
JA
570 return io_u;
571}
572
2c83567e
JA
573static struct io_u *get_io_u(struct thread_data *td)
574{
575 struct io_u *io_u;
576
e8457004
JA
577 io_u = __get_io_u(td);
578 if (!io_u)
2c83567e
JA
579 return NULL;
580
406e7b7c
JA
581 if (get_next_offset(td, &io_u->offset)) {
582 put_io_u(td, io_u);
49d2caab 583 return NULL;
406e7b7c 584 }
49d2caab 585
b2a369fb
JA
586 io_u->buflen = get_next_buflen(td);
587 if (!io_u->buflen) {
e8457004 588 put_io_u(td, io_u);
7889f07b 589 return NULL;
e8457004 590 }
2c83567e 591
75b2ab2c
JA
592 if (io_u->buflen + io_u->offset > td->file_size)
593 io_u->buflen = td->file_size - io_u->offset;
49d2caab
JA
594
595 if (!td->sequential)
596 mark_random_map(td, io_u);
597
598 td->last_bytes += io_u->buflen;
599
7f46ef08 600 if (td->verify != VERIFY_NONE)
e8457004 601 populate_io_u(td, io_u);
cfc702bd 602
2c83567e
JA
603 if (td->use_aio) {
604 if (td_read(td))
605 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
606 else
607 io_prep_pwrite(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
608 }
609
57d753e3 610 gettimeofday(&io_u->start_time, NULL);
2c83567e
JA
611 return io_u;
612}
613
40ef7f64
JA
614static inline void td_set_runstate(struct thread_data *td, int runstate)
615{
616 td->old_runstate = td->runstate;
617 td->runstate = runstate;
618}
619
645785e5
JA
620static int get_next_verify(struct thread_data *td,
621 unsigned long long *offset, unsigned int *len)
622{
623 struct io_piece *ipo;
624
625 if (list_empty(&td->io_hist_list))
626 return 1;
627
628 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
629 list_del(&ipo->list);
630
631 *offset = ipo->offset;
632 *len = ipo->len;
633 free(ipo);
634 return 0;
635}
636
9d0c6ca2
JA
637static void prune_io_piece_log(struct thread_data *td)
638{
639 struct io_piece *ipo;
640
641 while (!list_empty(&td->io_hist_list)) {
642 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
643
644 list_del(&ipo->list);
645 free(ipo);
646 }
647}
648
9d0c6ca2
JA
649/*
650 * log a succesful write, so we can unwind the log for verify
651 */
652static void log_io_piece(struct thread_data *td, struct io_u *io_u)
653{
49d2caab 654 struct io_piece *ipo = malloc(sizeof(struct io_piece));
9d0c6ca2
JA
655 struct list_head *entry;
656
657 INIT_LIST_HEAD(&ipo->list);
658 ipo->offset = io_u->offset;
659 ipo->len = io_u->buflen;
660
49d2caab
JA
661 /*
662 * for random io where the writes extend the file, it will typically
663 * be laid out with the block scattered as written. it's faster to
664 * read them in in that order again, so don't sort
665 */
666 if (td->sequential || !td->overwrite) {
9d0c6ca2
JA
667 list_add_tail(&ipo->list, &td->io_hist_list);
668 return;
669 }
670
671 /*
672 * for random io, sort the list so verify will run faster
673 */
674 entry = &td->io_hist_list;
675 while ((entry = entry->prev) != &td->io_hist_list) {
676 struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
677
9d0c6ca2
JA
678 if (__ipo->offset < ipo->offset)
679 break;
680 }
681
682 list_add(&ipo->list, entry);
683}
684
91fc5dc9 685static void do_sync_verify(struct thread_data *td)
cfc702bd 686{
40ef7f64 687 struct timeval t;
e8457004 688 struct io_u *io_u = NULL;
645785e5 689 int ret;
e8457004 690
40ef7f64 691 td_set_runstate(td, TD_VERIFYING);
e8457004
JA
692
693 io_u = __get_io_u(td);
694
40ef7f64 695 if (!td->odirect) {
6e2c38cc
JA
696 if (!td->use_mmap) {
697 if (fadvise(td->fd, td->file_offset, td->io_size, POSIX_FADV_DONTNEED) < 0) {
698 td->error = errno;
699 goto out;
700 }
701 } else {
702 if (madvise(td->mmap, td->io_size, MADV_DONTNEED)) {
703 td->error = errno;
704 goto out;
705 }
40ef7f64
JA
706 }
707 }
708
e8457004
JA
709 do {
710 if (td->terminate)
711 break;
40ef7f64
JA
712
713 gettimeofday(&t, NULL);
714 if (runtime_exceeded(td, &t))
715 break;
716
645785e5
JA
717 if (get_next_verify(td, &io_u->offset, &io_u->buflen))
718 break;
719
720 if (td->cur_off != io_u->offset) {
721 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
722 td->error = errno;
723 break;
724 }
725 }
e8457004
JA
726
727 ret = read(td->fd, io_u->buf, io_u->buflen);
728 if (ret < (int) io_u->buflen) {
729 if (ret == -1) {
730 td->error = errno;
731 break;
732 } else if (!ret)
733 break;
734 else
735 io_u->buflen = ret;
736 }
737
645785e5 738 if (verify_io_u(io_u))
e8457004
JA
739 break;
740
645785e5 741 td->cur_off = io_u->offset + io_u->buflen;
e8457004
JA
742 } while (1);
743
744out:
40ef7f64 745 td_set_runstate(td, TD_RUNNING);
e8457004 746 put_io_u(td, io_u);
cfc702bd
JA
747}
748
6e2c38cc
JA
749static int __do_sync_mmap(struct thread_data *td, struct io_u *io_u)
750{
751 unsigned long long real_off = io_u->offset - td->file_offset;
752
753 if (td_read(td))
754 memcpy(io_u->buf, td->mmap + real_off, io_u->buflen);
755 else
756 memcpy(td->mmap + real_off, io_u->buf, io_u->buflen);
c009cdac
JA
757
758 /*
759 * not really direct, but should drop the pages from the cache
760 */
761 if (td->odirect) {
762 msync(td->mmap + real_off, io_u->buflen, MS_SYNC);
763 madvise(td->mmap + real_off, io_u->buflen, MADV_DONTNEED);
764 }
6e2c38cc
JA
765
766 return io_u->buflen;
767}
768
769static int __do_sync_rw(struct thread_data *td, struct io_u *io_u)
770{
771 if (td->cur_off != io_u->offset) {
772 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
773 td->error = errno;
774 return 1;
775 }
776 }
777
778 if (td_read(td))
779 return read(td->fd, io_u->buf, io_u->buflen);
780 else
781 return write(td->fd, io_u->buf, io_u->buflen);
782}
783
784static void sync_td(struct thread_data *td)
785{
786 if (!td->use_mmap)
787 fsync(td->fd);
788 else
789 msync(td->mmap, td->file_size, MS_SYNC);
790}
791
43000118 792static void do_sync_io(struct thread_data *td)
892199bd 793{
7889f07b 794 unsigned long msec, usec;
e8457004 795 struct io_u *io_u = NULL;
2c83567e 796 struct timeval e;
892199bd 797
49d2caab 798 while (td->this_io_bytes < td->io_size) {
892199bd
JA
799 int ret;
800
801 if (td->terminate)
802 break;
803
2c83567e 804 io_u = get_io_u(td);
7889f07b
JA
805 if (!io_u)
806 break;
2c83567e 807
6e2c38cc
JA
808 if (!td->use_mmap)
809 ret = __do_sync_rw(td, io_u);
892199bd 810 else
6e2c38cc 811 ret = __do_sync_mmap(td, io_u);
892199bd 812
2c83567e 813 if (ret < (int) io_u->buflen) {
892199bd
JA
814 if (ret == -1)
815 td->error = errno;
816 break;
817 }
818
62bb4285 819 if (td_write(td))
645785e5
JA
820 log_io_piece(td, io_u);
821
4240cfa1 822 td->io_blocks++;
49d2caab
JA
823 td->io_bytes += io_u->buflen;
824 td->this_io_bytes += io_u->buflen;
63a09e51 825 td->cur_off = io_u->offset + io_u->buflen;
4240cfa1 826
86184d14
JA
827 gettimeofday(&e, NULL);
828
57d753e3 829 usec = utime_since(&io_u->start_time, &e);
86184d14 830
9e850933 831 rate_throttle(td, usec, io_u->buflen);
892199bd 832
4240cfa1
JA
833 if (check_min_rate(td, &e)) {
834 td->error = ENODATA;
835 break;
836 }
892199bd 837
4240cfa1 838 msec = usec / 1000;
57d753e3 839 add_clat_sample(td, msec);
645785e5 840 add_bw_sample(td);
67903a2e
JA
841
842 if (runtime_exceeded(td, &e))
843 break;
2c83567e 844
cdf92433 845 put_io_u(td, io_u);
e8457004 846 io_u = NULL;
cdf92433 847
e97712ed 848 if (td->thinktime)
d15c5195 849 usec_sleep(td, td->thinktime);
e97712ed 850
cdf92433
JA
851 if (should_fsync(td) && td->fsync_blocks &&
852 (td->io_blocks % td->fsync_blocks) == 0)
6e2c38cc 853 sync_td(td);
892199bd
JA
854 }
855
e8457004
JA
856 if (io_u)
857 put_io_u(td, io_u);
858
4240cfa1 859 if (should_fsync(td))
6e2c38cc 860 sync_td(td);
892199bd 861}
43000118 862
1ad72b11
JA
863static int io_u_getevents(struct thread_data *td, int min, int max,
864 struct timespec *t)
865{
866 int r;
867
868 do {
869 r = io_getevents(td->aio_ctx, min, max, td->aio_events, t);
870 if (r != -EAGAIN && r != -EINTR)
871 break;
872 } while (1);
873
874 return r;
875}
876
2c83567e 877static int io_u_queue(struct thread_data *td, struct io_u *io_u)
56b0eff0 878{
2c83567e 879 struct iocb *iocb = &io_u->iocb;
56b0eff0
JA
880 int ret;
881
882 do {
254605cd 883 ret = io_submit(td->aio_ctx, 1, &iocb);
56b0eff0
JA
884 if (ret == 1)
885 return 0;
406e7b7c 886 else if (ret == -EAGAIN)
56b0eff0 887 usleep(100);
406e7b7c 888 else if (ret == -EINTR)
a592bd33 889 continue;
56b0eff0
JA
890 else
891 break;
892 } while (1);
893
a592bd33 894 return ret;
56b0eff0
JA
895}
896
98168d55 897#define iocb_time(iocb) ((unsigned long) (iocb)->data)
2c83567e
JA
898#define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
899
f0f3411b 900static int ios_completed(struct thread_data *td, int nr)
2c83567e
JA
901{
902 unsigned long msec;
903 struct io_u *io_u;
904 struct timeval e;
9e850933 905 int i, bytes_done;
2c83567e 906
f0f3411b 907 gettimeofday(&e, NULL);
2c83567e 908
9e850933 909 for (i = 0, bytes_done = 0; i < nr; i++) {
2c83567e
JA
910 io_u = ev_to_iou(td->aio_events + i);
911
f0f3411b 912 td->io_blocks++;
49d2caab
JA
913 td->io_bytes += io_u->buflen;
914 td->this_io_bytes += io_u->buflen;
8c033f93 915
f0f3411b 916 msec = mtime_since(&io_u->issue_time, &e);
2c83567e 917
f0f3411b 918 add_clat_sample(td, msec);
645785e5
JA
919 add_bw_sample(td);
920
62bb4285 921 if (td_write(td))
645785e5 922 log_io_piece(td, io_u);
2c83567e 923
f4bb2243 924 bytes_done += io_u->buflen;
2c83567e
JA
925 put_io_u(td, io_u);
926 }
9e850933
JA
927
928 return bytes_done;
2c83567e
JA
929}
930
931static void cleanup_pending_aio(struct thread_data *td)
932{
933 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
934 struct list_head *entry, *n;
935 struct io_u *io_u;
936 int r;
937
938 /*
939 * get immediately available events, if any
940 */
1ad72b11 941 r = io_u_getevents(td, 0, td->cur_depth, &ts);
2c83567e 942 if (r > 0)
f0f3411b 943 ios_completed(td, r);
2c83567e
JA
944
945 /*
946 * now cancel remaining active events
947 */
948 list_for_each_safe(entry, n, &td->io_u_busylist) {
949 io_u = list_entry(entry, struct io_u, list);
950
951 r = io_cancel(td->aio_ctx, &io_u->iocb, td->aio_events);
952 if (!r)
953 put_io_u(td, io_u);
954 }
955
956 if (td->cur_depth) {
1ad72b11 957 r = io_u_getevents(td, td->cur_depth, td->cur_depth, NULL);
2c83567e 958 if (r > 0)
f0f3411b 959 ios_completed(td, r);
2c83567e
JA
960 }
961}
98168d55 962
d32d9284
JA
963static int async_do_verify(struct thread_data *td, struct io_u **io_u)
964{
965 struct io_u *v_io_u = *io_u;
966 int ret = 0;
967
968 if (v_io_u) {
645785e5 969 ret = verify_io_u(v_io_u);
d32d9284
JA
970 put_io_u(td, v_io_u);
971 *io_u = NULL;
972 }
973
974 return ret;
975}
976
91fc5dc9 977static void do_async_verify(struct thread_data *td)
cfc702bd 978{
f4bb2243 979 struct timeval t;
d32d9284 980 struct io_u *io_u, *v_io_u = NULL;
645785e5 981 int ret;
f4bb2243
JA
982
983 td_set_runstate(td, TD_VERIFYING);
984
f4bb2243
JA
985 do {
986 if (td->terminate)
987 break;
988
989 gettimeofday(&t, NULL);
990 if (runtime_exceeded(td, &t))
991 break;
992
993 io_u = __get_io_u(td);
994 if (!io_u)
995 break;
996
645785e5
JA
997 if (get_next_verify(td, &io_u->offset, &io_u->buflen)) {
998 put_io_u(td, io_u);
999 break;
f4bb2243
JA
1000 }
1001
1002 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
1003 ret = io_u_queue(td, io_u);
1004 if (ret) {
1005 put_io_u(td, io_u);
1006 td->error = ret;
1007 break;
1008 }
1009
f0f3411b
JA
1010 /*
1011 * we have one pending to verify, do that while the next
1012 * we are doing io on the next one
1013 */
d32d9284
JA
1014 if (async_do_verify(td, &v_io_u))
1015 break;
f0f3411b 1016
1ad72b11 1017 ret = io_u_getevents(td, 1, 1, NULL);
f0f3411b
JA
1018 if (ret != 1) {
1019 if (ret < 0)
1020 td->error = ret;
f4bb2243
JA
1021 break;
1022 }
1023
f0f3411b 1024 v_io_u = ev_to_iou(td->aio_events);
f4bb2243 1025
645785e5 1026 td->cur_off = v_io_u->offset + v_io_u->buflen;
f0f3411b
JA
1027
1028 /*
d32d9284 1029 * if we can't submit more io, we need to verify now
f0f3411b 1030 */
d32d9284
JA
1031 if (queue_full(td) && async_do_verify(td, &v_io_u))
1032 break;
1033
f4bb2243
JA
1034 } while (1);
1035
d32d9284 1036 async_do_verify(td, &v_io_u);
f0f3411b 1037
f4bb2243
JA
1038 if (td->cur_depth)
1039 cleanup_pending_aio(td);
1040
1041 td_set_runstate(td, TD_RUNNING);
cfc702bd
JA
1042}
1043
43000118
JA
1044static void do_async_io(struct thread_data *td)
1045{
1046 struct timeval s, e;
7889f07b 1047 unsigned long usec;
43000118 1048
49d2caab 1049 while (td->this_io_bytes < td->io_size) {
43000118
JA
1050 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1051 struct timespec *timeout;
2c83567e
JA
1052 int ret, min_evts = 0;
1053 struct io_u *io_u;
9e850933 1054 unsigned int bytes_done;
43000118
JA
1055
1056 if (td->terminate)
1057 break;
1058
2c83567e 1059 io_u = get_io_u(td);
7889f07b
JA
1060 if (!io_u)
1061 break;
43000118 1062
57d753e3 1063 memcpy(&s, &io_u->start_time, sizeof(s));
8baf1bcc 1064
2c83567e 1065 ret = io_u_queue(td, io_u);
56b0eff0 1066 if (ret) {
a3fdb993 1067 put_io_u(td, io_u);
a592bd33 1068 td->error = ret;
43000118
JA
1069 break;
1070 }
1071
57d753e3
JA
1072 gettimeofday(&io_u->issue_time, NULL);
1073 add_slat_sample(td, mtime_since(&io_u->start_time, &io_u->issue_time));
2c83567e 1074 if (td->cur_depth < td->aio_depth) {
43000118
JA
1075 timeout = &ts;
1076 min_evts = 0;
1077 } else {
1078 timeout = NULL;
1079 min_evts = 1;
1080 }
1081
1ad72b11 1082 ret = io_u_getevents(td, min_evts, td->cur_depth, timeout);
43000118 1083 if (ret < 0) {
406e7b7c 1084 td->error = ret;
43000118
JA
1085 break;
1086 } else if (!ret)
1087 continue;
1088
f0f3411b 1089 bytes_done = ios_completed(td, ret);
43000118 1090
98168d55
JA
1091 /*
1092 * the rate is batched for now, it should work for batches
1093 * of completions except the very first one which may look
1094 * a little bursty
1095 */
2c83567e 1096 gettimeofday(&e, NULL);
43000118
JA
1097 usec = utime_since(&s, &e);
1098
9e850933 1099 rate_throttle(td, usec, bytes_done);
43000118
JA
1100
1101 if (check_min_rate(td, &e)) {
1102 td->error = ENODATA;
1103 break;
1104 }
67903a2e
JA
1105
1106 if (runtime_exceeded(td, &e))
1107 break;
765d9223
JA
1108
1109 if (td->thinktime)
d15c5195 1110 usec_sleep(td, td->thinktime);
cdf92433
JA
1111
1112 if (should_fsync(td) && td->fsync_blocks &&
1113 (td->io_blocks % td->fsync_blocks) == 0)
1114 fsync(td->fd);
43000118 1115 }
56b0eff0 1116
2c83567e
JA
1117 if (td->cur_depth)
1118 cleanup_pending_aio(td);
4ac89145
JA
1119
1120 if (should_fsync(td))
1121 fsync(td->fd);
56b0eff0
JA
1122}
1123
1124static void cleanup_aio(struct thread_data *td)
1125{
254605cd
JA
1126 io_destroy(td->aio_ctx);
1127
43000118
JA
1128 if (td->aio_events)
1129 free(td->aio_events);
43000118
JA
1130}
1131
1132static int init_aio(struct thread_data *td)
1133{
254605cd 1134 if (io_queue_init(td->aio_depth, &td->aio_ctx)) {
43000118
JA
1135 td->error = errno;
1136 return 1;
1137 }
1138
43000118 1139 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
43000118
JA
1140 return 0;
1141}
1142
2c83567e
JA
1143static void cleanup_io_u(struct thread_data *td)
1144{
1145 struct list_head *entry, *n;
1146 struct io_u *io_u;
1147
1148 list_for_each_safe(entry, n, &td->io_u_freelist) {
1149 io_u = list_entry(entry, struct io_u, list);
1150
1151 list_del(&io_u->list);
2c83567e
JA
1152 free(io_u);
1153 }
6b71c826 1154
99c6704f
JA
1155 if (td->mem_type == MEM_MALLOC)
1156 free(td->orig_buffer);
1157 else if (td->mem_type == MEM_SHM) {
1158 struct shmid_ds sbuf;
1159
1160 shmdt(td->orig_buffer);
1161 shmctl(td->shm_id, IPC_RMID, &sbuf);
891e70f8
JA
1162 } else if (td->mem_type == MEM_MMAP)
1163 munmap(td->orig_buffer, td->orig_buffer_size);
1164 else
1165 fprintf(stderr, "Bad memory type %d\n", td->mem_type);
1166
1167 td->orig_buffer = NULL;
2c83567e
JA
1168}
1169
99c6704f 1170static int init_io_u(struct thread_data *td)
2c83567e
JA
1171{
1172 struct io_u *io_u;
891e70f8 1173 int i, max_units;
6b71c826 1174 char *p;
2c83567e
JA
1175
1176 if (!td->use_aio)
1177 max_units = 1;
1178 else
1179 max_units = td->aio_depth;
1180
891e70f8 1181 td->orig_buffer_size = td->max_bs * max_units + MASK;
99c6704f
JA
1182
1183 if (td->mem_type == MEM_MALLOC)
891e70f8 1184 td->orig_buffer = malloc(td->orig_buffer_size);
99c6704f 1185 else if (td->mem_type == MEM_SHM) {
891e70f8 1186 td->shm_id = shmget(IPC_PRIVATE, td->orig_buffer_size, IPC_CREAT | 0600);
99c6704f
JA
1187 if (td->shm_id < 0) {
1188 td->error = errno;
1189 perror("shmget");
1190 return 1;
1191 }
1192
1193 td->orig_buffer = shmat(td->shm_id, NULL, 0);
1194 if (td->orig_buffer == (void *) -1) {
1195 td->error = errno;
1196 perror("shmat");
891e70f8
JA
1197 td->orig_buffer = NULL;
1198 return 1;
1199 }
1200 } else if (td->mem_type == MEM_MMAP) {
1201 td->orig_buffer = mmap(NULL, td->orig_buffer_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
1202 if (td->orig_buffer == MAP_FAILED) {
1203 td->error = errno;
1204 perror("mmap");
1205 td->orig_buffer = NULL;
99c6704f
JA
1206 return 1;
1207 }
1208 }
6b71c826 1209
2c83567e
JA
1210 INIT_LIST_HEAD(&td->io_u_freelist);
1211 INIT_LIST_HEAD(&td->io_u_busylist);
645785e5 1212 INIT_LIST_HEAD(&td->io_hist_list);
2c83567e 1213
99c6704f 1214 p = ALIGN(td->orig_buffer);
2c83567e
JA
1215 for (i = 0; i < max_units; i++) {
1216 io_u = malloc(sizeof(*io_u));
1217 memset(io_u, 0, sizeof(*io_u));
1218 INIT_LIST_HEAD(&io_u->list);
1219
7889f07b 1220 io_u->buf = p + td->max_bs * i;
2c83567e
JA
1221 list_add(&io_u->list, &td->io_u_freelist);
1222 }
99c6704f
JA
1223
1224 return 0;
2c83567e
JA
1225}
1226
02983297
JA
1227static int create_file(struct thread_data *td)
1228{
7889f07b 1229 unsigned long long left;
645785e5 1230 unsigned int bs;
02983297 1231 char *b;
645785e5 1232 int r;
02983297 1233
02983297
JA
1234 /*
1235 * unless specifically asked for overwrite, let normal io extend it
1236 */
62bb4285 1237 if (td_write(td) && !td->overwrite)
02983297
JA
1238 return 0;
1239
57d753e3
JA
1240 if (!td->file_size) {
1241 fprintf(stderr, "Need size for create\n");
1242 td->error = EINVAL;
1243 return 1;
1244 }
1245
42fd89a7
JA
1246 printf("Client%d: Laying out IO file\n", td->thread_number);
1247
02983297
JA
1248 td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
1249 if (td->fd < 0) {
1250 td->error = errno;
1251 return 1;
1252 }
1253
c94deb1c
JA
1254 if (ftruncate(td->fd, td->file_size) == -1) {
1255 td->error = errno;
1256 return 1;
1257 }
1258
49d2caab 1259 td->io_size = td->file_size;
7889f07b
JA
1260 b = malloc(td->max_bs);
1261 memset(b, 0, td->max_bs);
1262
1263 left = td->file_size;
1264 while (left) {
1265 bs = td->max_bs;
1266 if (bs > left)
1267 bs = left;
02983297 1268
7889f07b 1269 r = write(td->fd, b, bs);
02983297 1270
645785e5 1271 if (r == (int) bs) {
7889f07b 1272 left -= bs;
02983297 1273 continue;
7889f07b 1274 } else {
02983297
JA
1275 if (r < 0)
1276 td->error = errno;
1277 else
1278 td->error = EIO;
1279
1280 break;
1281 }
1282 }
1283
fc097bfe
JA
1284 if (td->create_fsync)
1285 fsync(td->fd);
1286
02983297
JA
1287 close(td->fd);
1288 td->fd = -1;
1289 free(b);
1290 return 0;
1291}
1292
1293static int file_exists(struct thread_data *td)
1294{
1295 struct stat st;
1296
1297 if (stat(td->file_name, &st) != -1)
1298 return 1;
1299
1300 return errno != ENOENT;
1301}
1302
c4c8f7b3 1303static int file_size(struct thread_data *td)
02983297
JA
1304{
1305 struct stat st;
c94deb1c
JA
1306
1307 if (fstat(td->fd, &st) == -1) {
1308 td->error = errno;
1309 return 1;
1310 }
1311
c94deb1c 1312 if (td_read(td)) {
c4c8f7b3
JA
1313 if (!td->file_size || td->file_size > st.st_size)
1314 td->file_size = st.st_size;
c94deb1c
JA
1315 } else {
1316 if (!td->file_size)
1317 td->file_size = 1024 * 1024 * 1024;
c4c8f7b3
JA
1318 }
1319
1320 return 0;
1321}
1322
1323static int bdev_size(struct thread_data *td)
1324{
1325 size_t bytes;
c94deb1c 1326
c4c8f7b3
JA
1327 if (ioctl(td->fd, BLKGETSIZE64, &bytes) < 0) {
1328 td->error = errno;
1329 return 1;
c94deb1c
JA
1330 }
1331
c4c8f7b3
JA
1332 if (!td->file_size || (td->file_size > bytes))
1333 td->file_size = bytes;
1334
1335 return 0;
1336}
1337
1338static int get_file_size(struct thread_data *td)
1339{
1340 int ret;
1341
1342 if (td->filetype == FIO_TYPE_FILE)
1343 ret = file_size(td);
1344 else
1345 ret = bdev_size(td);
1346
1347 if (ret)
1348 return ret;
1349
1350 if (td->file_offset > td->file_size) {
c94deb1c
JA
1351 fprintf(stderr, "Client%d: offset larger than length\n", td->thread_number);
1352 return 1;
1353 }
1354
c4c8f7b3 1355 td->io_size = td->file_size - td->file_offset;
c94deb1c
JA
1356 if (td->io_size == 0) {
1357 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1358 td->error = EINVAL;
1359 return 1;
1360 }
1361
1362 return 0;
1363}
1364
6e2c38cc
JA
1365static int setup_file_mmap(struct thread_data *td)
1366{
1367 int flags;
1368
1369 if (td_read(td))
1370 flags = PROT_READ;
1371 else {
1372 flags = PROT_WRITE;
1373
7f46ef08 1374 if (td->verify != VERIFY_NONE)
6e2c38cc
JA
1375 flags |= PROT_READ;
1376 }
1377
1378 td->mmap = mmap(NULL, td->file_size, flags, MAP_SHARED, td->fd, td->file_offset);
1379 if (td->mmap == MAP_FAILED) {
1380 td->mmap = NULL;
1381 td->error = errno;
1382 return 1;
1383 }
1384
1385 if (td->invalidate_cache) {
1386 if (madvise(td->mmap, td->file_size, MADV_DONTNEED) < 0) {
1387 td->error = errno;
1388 return 1;
1389 }
1390 }
1391
1392 if (td->sequential) {
1393 if (madvise(td->mmap, td->file_size, MADV_SEQUENTIAL) < 0) {
1394 td->error = errno;
1395 return 1;
1396 }
1397 } else {
1398 if (madvise(td->mmap, td->file_size, MADV_RANDOM) < 0) {
1399 td->error = errno;
1400 return 1;
1401 }
1402 }
1403
1404 return 0;
1405}
1406
1407static int setup_file_plain(struct thread_data *td)
1408{
1409 if (td->invalidate_cache) {
1410 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_DONTNEED) < 0) {
1411 td->error = errno;
1412 return 1;
1413 }
1414 }
1415
1416 if (td->sequential) {
1417 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_SEQUENTIAL) < 0) {
1418 td->error = errno;
1419 return 1;
1420 }
1421 } else {
1422 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_RANDOM) < 0) {
1423 td->error = errno;
1424 return 1;
1425 }
1426 }
1427
1428 return 0;
1429}
1430
c94deb1c
JA
1431static int setup_file(struct thread_data *td)
1432{
02983297
JA
1433 int flags = 0;
1434
1435 if (!file_exists(td)) {
1436 if (!td->create_file) {
1437 td->error = ENOENT;
1438 return 1;
1439 }
1440 if (create_file(td))
1441 return 1;
1442 }
1443
1444 if (td->odirect)
1445 flags |= O_DIRECT;
1446
1447 if (td_read(td))
1448 td->fd = open(td->file_name, flags | O_RDONLY);
1449 else {
1450 if (!td->overwrite)
1451 flags |= O_TRUNC;
74b4b5fb
JA
1452 if (td->sync_io)
1453 flags |= O_SYNC;
6e2c38cc
JA
1454
1455 flags |= O_RDWR;
02983297 1456
e8457004 1457 td->fd = open(td->file_name, flags | O_CREAT, 0600);
02983297
JA
1458 }
1459
1460 if (td->fd == -1) {
1461 td->error = errno;
1462 return 1;
1463 }
1464
c94deb1c 1465 if (get_file_size(td))
49d2caab 1466 return 1;
49d2caab 1467
62bb4285 1468 if (td_write(td) && ftruncate(td->fd, td->file_size) == -1) {
c94deb1c 1469 td->error = errno;
02983297
JA
1470 return 1;
1471 }
1472
6e2c38cc
JA
1473 if (!td->use_mmap)
1474 return setup_file_plain(td);
1475 else
1476 return setup_file_mmap(td);
02983297
JA
1477}
1478
debf703a
JA
1479static int check_dev_match(dev_t dev, char *path)
1480{
1481 int major, minor;
1482 char line[256], *p;
1483 FILE *f;
1484
1485 f = fopen(path, "r");
1486 if (!f) {
1487 perror("open path");
1488 return 1;
1489 }
1490
1491 p = fgets(line, sizeof(line), f);
1492 if (!p) {
1493 fclose(f);
1494 return 1;
1495 }
1496
1497 if (sscanf(p, "%u:%u", &major, &minor) != 2) {
1498 fclose(f);
1499 return 1;
1500 }
1501
1502 if (((major << 8) | minor) == dev) {
1503 fclose(f);
1504 return 0;
1505 }
1506
1507 fclose(f);
1508 return 1;
1509}
1510
1511static char *find_block_dir(dev_t dev, char *path)
1512{
1513 struct dirent *dir;
1514 char *found = NULL;
1515 struct stat st;
1516 DIR *D;
1517
1518 D = opendir(path);
1519 if (!D)
1520 return NULL;
1521
1522 while ((dir = readdir(D)) != NULL) {
1523 char full_path[256];
1524
1525 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1526 continue;
1527 if (!strcmp(dir->d_name, "device"))
1528 continue;
1529
1530 sprintf(full_path, "%s/%s", path, dir->d_name);
1531
1532 if (!strcmp(dir->d_name, "dev")) {
1533 if (!check_dev_match(dev, full_path)) {
1534 found = path;
1535 break;
1536 }
1537 }
1538
1539 if (stat(full_path, &st) == -1) {
1540 perror("stat");
1541 break;
1542 }
1543
1544 if (!S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode))
1545 continue;
1546
1547 if ((found = find_block_dir(dev, full_path)) != NULL)
1548 break;
1549 }
1550
1551 closedir(D);
1552 return found;
1553}
1554
1555static int get_io_ticks(struct thread_data *td)
1556{
1557 int i1, i2, i3, i4, i5, i6, i7, i8, i9;
1558 unsigned long long ull1, ull2;
1559 char line[256];
1560 FILE *f;
1561 char *p;
1562
1563 f = fopen(td->disk_stat_path, "r");
1564 if (!f)
1565 return 0;
1566
1567 p = fgets(line, sizeof(line), f);
1568 if (!p) {
1569 fclose(f);
1570 return 0;
1571 }
1572
1573 if (sscanf(p, "%8u %8u %8llu %8u %8u %8u %8llu %8u %8u %8u %8u", &i1, &i2, &ull1, &i3, &i4, &i5, &ull2, &i6, &i7, &i8, &i9) != 11) {
1574 fclose(f);
1575 return 0;
1576 }
1577
1578 fclose(f);
1579 return i8;
1580}
1581
3d7c8c59 1582static void update_io_ticks(struct thread_data *td)
debf703a
JA
1583{
1584 unsigned long ticks;
1585
1586 ticks = get_io_ticks(td);
3d7c8c59
JA
1587 td->io_ticks += (ticks - td->start_io_ticks);
1588 td->start_io_ticks = ticks;
debf703a
JA
1589}
1590
1591static int init_disk_stats(struct thread_data *td)
1592{
1593 struct stat st;
1594 char foo[256], tmp[256];
1595 dev_t dev;
1596 char *p, *dir;
1597
1598 if (fstat(td->fd, &st) < 0) {
1599 td->error = errno;
1600 return 1;
1601 }
1602
1603 if (td->filetype == FIO_TYPE_FILE)
1604 dev = st.st_dev;
1605 else
1606 dev = st.st_rdev;
1607
1608 sprintf(foo, "/sys/block");
1609 dir = find_block_dir(dev, foo);
1610 if (!dir)
1611 return 0;
1612
1613 /*
1614 * if this is inside a partition dir, jump back to parent
1615 */
1616 sprintf(tmp, "%s/queue", dir);
1617 if (stat(tmp, &st)) {
1618 p = dirname(dir);
1619 sprintf(tmp, "%s/queue", p);
1620 if (stat(tmp, &st)) {
1621 fprintf(stderr, "unknown sysfs layout\n");
1622 return 0;
1623 }
1624 sprintf(td->disk_stat_path, "%s/stat", p);
1625 }
1626
3d7c8c59 1627 td->start_io_ticks = get_io_ticks(td);
debf703a
JA
1628 return 0;
1629}
1630
d32d9284
JA
1631static void clear_io_state(struct thread_data *td)
1632{
9d0c6ca2
JA
1633 if (!td->use_aio)
1634 lseek(td->fd, SEEK_SET, 0);
1635
d32d9284 1636 td->cur_off = 0;
49d2caab
JA
1637 td->last_bytes = 0;
1638 td->stat_io_bytes = 0;
1639 td->this_io_bytes = 0;
1640
1641 if (td->file_map)
1642 memset(td->file_map, 0, td->num_maps * sizeof(long));
d32d9284
JA
1643}
1644
f6dcd824
JA
1645static void update_rusage_stat(struct thread_data *td)
1646{
1647 if (!td->runtime)
1648 return;
1649
1650 getrusage(RUSAGE_SELF, &td->ru_end);
1651
1652 td->usr_time += mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
1653 td->sys_time += mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
1654 td->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
1655
1656
1657 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
1658}
1659
189873de 1660static void *thread_main(void *data)
892199bd 1661{
189873de 1662 struct thread_data *td = data;
02983297 1663 int ret = 1;
892199bd 1664
7292613b 1665 setsid();
892199bd
JA
1666 td->pid = getpid();
1667
99c6704f
JA
1668 if (init_io_u(td))
1669 goto err;
2c83567e 1670
18e0b78c
JA
1671 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
1672 td->error = errno;
1673 goto err;
1674 }
1675
43000118
JA
1676 if (td->use_aio && init_aio(td))
1677 goto err;
1678
f737299d 1679 if (td->ioprio) {
892199bd
JA
1680 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1681 td->error = errno;
599002b3 1682 goto err;
892199bd
JA
1683 }
1684 }
1685
1686 sem_post(&startup_sem);
1687 sem_wait(&td->mutex);
43000118 1688
fc097bfe
JA
1689 if (!td->create_serialize && setup_file(td))
1690 goto err;
1691
debf703a
JA
1692 if (init_disk_stats(td))
1693 goto err;
1694
49d2caab
JA
1695 if (init_random_state(td))
1696 goto err;
1697
293753bb 1698 while (td->loops--) {
f6dcd824
JA
1699 getrusage(RUSAGE_SELF, &td->ru_start);
1700 gettimeofday(&td->start, NULL);
1701 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
293753bb
JA
1702
1703 if (td->ratemin)
1704 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
7292613b 1705
d32d9284 1706 clear_io_state(td);
9d0c6ca2 1707 prune_io_piece_log(td);
fd1ae4c9 1708
b2de0ed2 1709 if (!td->use_aio)
b6794fbf 1710 do_sync_io(td);
b2de0ed2
JA
1711 else
1712 do_async_io(td);
1713
91fc5dc9
JA
1714 if (td->error)
1715 break;
1716
f6dcd824
JA
1717 td->runtime += mtime_since_now(&td->start);
1718 update_rusage_stat(td);
3d7c8c59 1719 update_io_ticks(td);
f6dcd824 1720
7f46ef08 1721 if (td->verify == VERIFY_NONE)
b2de0ed2 1722 continue;
cfc702bd 1723
b2de0ed2 1724 clear_io_state(td);
d32d9284 1725
91fc5dc9
JA
1726 if (!td->use_aio)
1727 do_sync_verify(td);
1728 else
1729 do_async_verify(td);
1730
1731 if (td->error)
1732 break;
b6794fbf 1733 }
7292613b 1734
892199bd 1735 ret = 0;
a0a9b35b
JA
1736
1737 if (td->bw_log)
1738 finish_log(td, td->bw_log, "bw");
1739 if (td->lat_log)
1740 finish_log(td, td->lat_log, "lat");
4ac89145 1741
98dd52d6 1742 if (exitall_on_terminate)
27c32a38 1743 terminate_threads(td->groupid);
98dd52d6 1744
892199bd 1745err:
7292613b
JA
1746 if (td->fd != -1) {
1747 close(td->fd);
1748 td->fd = -1;
1749 }
6e2c38cc
JA
1750 if (td->mmap)
1751 munmap(td->mmap, td->file_size);
4ac89145
JA
1752 if (td->use_aio)
1753 cleanup_aio(td);
2c83567e 1754 cleanup_io_u(td);
599002b3 1755 if (ret) {
892199bd 1756 sem_post(&startup_sem);
599002b3
JA
1757 sem_wait(&td->mutex);
1758 }
40ef7f64 1759 td_set_runstate(td, TD_EXITED);
189873de
JA
1760 return NULL;
1761
1762}
1763
1764static void *fork_main(int shm_id, int offset)
1765{
1766 struct thread_data *td;
1767 void *data;
1768
1769 data = shmat(shm_id, NULL, 0);
1770 if (data == (void *) -1) {
1771 perror("shmat");
1772 return NULL;
1773 }
1774
1775 td = data + offset * sizeof(struct thread_data);
1776 thread_main(td);
4240cfa1 1777 shmdt(data);
892199bd
JA
1778 return NULL;
1779}
1780
57d753e3
JA
1781static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1782 double *mean, double *dev)
1783{
1784 double n;
1785
1786 if (is->samples == 0)
1787 return 0;
1788
1789 *min = is->min_val;
1790 *max = is->max_val;
1791
1792 n = (double) is->samples;
1793 *mean = (double) is->val / n;
1794 *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1795 return 1;
1796}
1797
557e4102
JA
1798static void show_thread_status(struct thread_data *td,
1799 struct group_run_stats *rs)
892199bd
JA
1800{
1801 int prio, prio_class;
f6dcd824 1802 unsigned long min, max, bw = 0;
92b229ed 1803 double mean, dev, usr_cpu, sys_cpu;
892199bd 1804
49d2caab 1805 if (!td->io_bytes && !td->error)
213b446c
JA
1806 return;
1807
892199bd 1808 if (td->runtime)
49d2caab 1809 bw = td->io_bytes / td->runtime;
892199bd
JA
1810
1811 prio = td->ioprio & 0xff;
1812 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
1813
f6dcd824 1814 printf("Client%d (g=%d): err=%2d, io=%6luMiB, bw=%6luKiB/s, runt=%6lumsec\n", td->thread_number, td->groupid, td->error, td->io_bytes >> 20, bw, td->runtime);
fd1ae4c9 1815
57d753e3
JA
1816 if (calc_lat(&td->slat_stat, &min, &max, &mean, &dev))
1817 printf(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1818 if (calc_lat(&td->clat_stat, &min, &max, &mean, &dev))
1819 printf(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
557e4102
JA
1820 if (calc_lat(&td->bw_stat, &min, &max, &mean, &dev)) {
1821 double p_of_agg;
1822
1823 p_of_agg = mean * 100 / (double) rs->agg[td->ddir];
1824 printf(" bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, dev=%5.02f\n", min, max, p_of_agg, mean, dev);
1825 }
92b229ed
JA
1826
1827 if (td->runtime) {
f6dcd824
JA
1828 usr_cpu = (double) td->usr_time * 100 / (double) td->runtime;
1829 sys_cpu = (double) td->sys_time * 100 / (double) td->runtime;
92b229ed
JA
1830 } else {
1831 usr_cpu = 0;
1832 sys_cpu = 0;
1833 }
1834
f6dcd824 1835 printf(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ctx);
debf703a
JA
1836
1837 if (td->io_ticks) {
1838 double disk_util = (double) 100 * td->io_ticks / (double) td->runtime;
1839 printf(" disk : util=%3.2f%%\n", disk_util);
1840 }
892199bd
JA
1841}
1842
3f39453a 1843static void print_thread_status(struct thread_data *td, int nr_running,
8dbff0b1 1844 int t_rate, int m_rate)
3f39453a 1845{
3f39453a
JA
1846 printf("Threads now running: %d", nr_running);
1847 if (m_rate || t_rate)
1848 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
8dbff0b1
JA
1849 printf(" : [%s]\r", run_str);
1850 fflush(stdout);
3f39453a
JA
1851}
1852
40ef7f64
JA
1853static void check_str_update(struct thread_data *td, int n, int t, int m)
1854{
1855 char c = run_str[td->thread_number - 1];
1856
1857 if (td->runstate == td->old_runstate)
1858 return;
1859
1860 switch (td->runstate) {
1861 case TD_REAPED:
1862 c = '_';
1863 break;
f4bb2243
JA
1864 case TD_EXITED:
1865 c = 'E';
1866 break;
40ef7f64 1867 case TD_RUNNING:
af678352
JA
1868 if (td_read(td)) {
1869 if (td->sequential)
1870 c = 'R';
1871 else
1872 c = 'r';
1873 } else {
1874 if (td->sequential)
1875 c = 'W';
1876 else
1877 c = 'w';
1878 }
40ef7f64
JA
1879 break;
1880 case TD_VERIFYING:
1881 c = 'V';
1882 break;
1883 case TD_CREATED:
1884 c = 'C';
1885 break;
1886 case TD_NOT_CREATED:
1887 c = 'P';
1888 break;
1889 default:
1890 printf("state %d\n", td->runstate);
1891 }
1892
1893 run_str[td->thread_number - 1] = c;
1894 print_thread_status(td, n, t, m);
1895 td->old_runstate = td->runstate;
1896}
1897
213b446c 1898static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
02bdd9ba 1899{
213b446c 1900 int i;
02bdd9ba 1901
3f39453a
JA
1902 /*
1903 * reap exited threads (TD_EXITED -> TD_REAPED)
1904 */
02bdd9ba
JA
1905 for (i = 0; i < thread_number; i++) {
1906 struct thread_data *td = &threads[i];
1907
40ef7f64
JA
1908 check_str_update(td, *nr_running, *t_rate, *m_rate);
1909
213b446c
JA
1910 if (td->runstate != TD_EXITED)
1911 continue;
02bdd9ba 1912
40ef7f64 1913 td_set_runstate(td, TD_REAPED);
189873de
JA
1914
1915 if (td->use_thread) {
1916 long ret;
1917
1918 if (pthread_join(td->thread, (void *) &ret))
1919 perror("thread_join");
1920 } else
1921 waitpid(td->pid, NULL, 0);
1922
213b446c
JA
1923 (*nr_running)--;
1924 (*m_rate) -= td->ratemin;
1925 (*t_rate) -= td->rate;
40ef7f64 1926 check_str_update(td, *nr_running, *t_rate, *m_rate);
213b446c 1927 }
02bdd9ba
JA
1928}
1929
fc24389f
JA
1930static void run_threads(char *argv[])
1931{
be33abe4 1932 struct timeval genesis;
fc24389f
JA
1933 struct thread_data *td;
1934 unsigned long spent;
2a81240d 1935 int i, todo, nr_running, m_rate, t_rate, nr_started;
fc24389f 1936
fc24389f
JA
1937 printf("Starting %d threads\n", thread_number);
1938 fflush(stdout);
1939
7292613b
JA
1940 signal(SIGINT, sig_handler);
1941
fc24389f 1942 todo = thread_number;
02bdd9ba 1943 nr_running = 0;
2a81240d 1944 nr_started = 0;
213b446c 1945 m_rate = t_rate = 0;
fc24389f 1946
8bdcfab5
JA
1947 for (i = 0; i < thread_number; i++) {
1948 td = &threads[i];
1949
fc097bfe
JA
1950 if (!td->create_serialize)
1951 continue;
1952
8bdcfab5
JA
1953 /*
1954 * do file setup here so it happens sequentially,
1955 * we don't want X number of threads getting their
1956 * client data interspersed on disk
1957 */
1958 if (setup_file(td)) {
40ef7f64 1959 td_set_runstate(td, TD_REAPED);
8bdcfab5
JA
1960 todo--;
1961 }
1962 }
1963
1964 gettimeofday(&genesis, NULL);
1965
213b446c 1966 while (todo) {
3f39453a
JA
1967 /*
1968 * create threads (TD_NOT_CREATED -> TD_CREATED)
1969 */
fc24389f
JA
1970 for (i = 0; i < thread_number; i++) {
1971 td = &threads[i];
1972
02bdd9ba 1973 if (td->runstate != TD_NOT_CREATED)
fc24389f
JA
1974 continue;
1975
213b446c
JA
1976 /*
1977 * never got a chance to start, killed by other
1978 * thread for some reason
1979 */
1980 if (td->terminate) {
1981 todo--;
1982 continue;
1983 }
1984
fc24389f 1985 if (td->start_delay) {
be33abe4 1986 spent = mtime_since_now(&genesis);
fc24389f
JA
1987
1988 if (td->start_delay * 1000 > spent)
1989 continue;
1990 }
1991
2a81240d 1992 if (td->stonewall && (nr_started || nr_running))
ea6f96a2 1993 break;
2a81240d 1994
40ef7f64
JA
1995 td_set_runstate(td, TD_CREATED);
1996 check_str_update(td, nr_running, t_rate, m_rate);
fc24389f
JA
1997 sem_init(&startup_sem, 1, 1);
1998 todo--;
2a81240d 1999 nr_started++;
fc24389f 2000
189873de
JA
2001 if (td->use_thread) {
2002 if (pthread_create(&td->thread, NULL, thread_main, td)) {
2003 perror("thread_create");
2004 nr_started--;
2005 }
2006 } else {
2007 if (fork())
2008 sem_wait(&startup_sem);
2009 else {
2010 fork_main(shm_id, i);
2011 exit(0);
2012 }
fc24389f
JA
2013 }
2014 }
2015
3f39453a 2016 /*
e8457004 2017 * start created threads (TD_CREATED -> TD_RUNNING)
3f39453a 2018 */
fc24389f
JA
2019 for (i = 0; i < thread_number; i++) {
2020 struct thread_data *td = &threads[i];
2021
3f39453a
JA
2022 if (td->runstate != TD_CREATED)
2023 continue;
2024
40ef7f64 2025 td_set_runstate(td, TD_RUNNING);
3f39453a 2026 nr_running++;
2a81240d 2027 nr_started--;
3f39453a
JA
2028 m_rate += td->ratemin;
2029 t_rate += td->rate;
40ef7f64 2030 check_str_update(td, nr_running, t_rate, m_rate);
3f39453a 2031 sem_post(&td->mutex);
fc24389f
JA
2032 }
2033
e8457004
JA
2034 for (i = 0; i < thread_number; i++) {
2035 struct thread_data *td = &threads[i];
2036
b48889bb
JA
2037 if (td->runstate != TD_RUNNING &&
2038 td->runstate != TD_VERIFYING)
e8457004
JA
2039 continue;
2040
40ef7f64 2041 check_str_update(td, nr_running, t_rate, m_rate);
e8457004
JA
2042 }
2043
213b446c 2044 reap_threads(&nr_running, &t_rate, &m_rate);
02bdd9ba 2045
fc24389f
JA
2046 if (todo)
2047 usleep(100000);
2048 }
02bdd9ba
JA
2049
2050 while (nr_running) {
213b446c 2051 reap_threads(&nr_running, &t_rate, &m_rate);
02bdd9ba
JA
2052 usleep(10000);
2053 }
fc24389f
JA
2054}
2055
0d80f40d 2056static void show_group_stats(struct group_run_stats *rs, int id)
8867c0a8 2057{
0d80f40d
JA
2058 printf("\nRun status group %d:\n", id);
2059
2060 if (rs->max_run[DDIR_READ])
2061 printf(" READ: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", rs->io_mb[0], rs->agg[0], rs->min_bw[0], rs->max_bw[0], rs->min_run[0], rs->max_run[0]);
2062 if (rs->max_run[DDIR_WRITE])
2063 printf(" WRITE: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", rs->io_mb[1], rs->agg[1], rs->min_bw[1], rs->max_bw[1], rs->min_run[1], rs->max_run[1]);
2064}
2065
2066static void show_run_stats(void)
2067{
2068 struct group_run_stats *runstats, *rs;
557e4102 2069 struct thread_data *td;
8867c0a8
JA
2070 int i;
2071
0d80f40d
JA
2072 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
2073
2074 for (i = 0; i < groupid + 1; i++) {
2075 rs = &runstats[i];
2076
f6dcd824 2077 memset(rs, 0, sizeof(*rs));
0d80f40d
JA
2078 rs->min_bw[0] = rs->min_run[0] = ~0UL;
2079 rs->min_bw[1] = rs->min_run[1] = ~0UL;
0d80f40d
JA
2080 }
2081
2082 for (i = 0; i < thread_number; i++) {
0d80f40d
JA
2083 unsigned long bw = 0;
2084
557e4102
JA
2085 td = &threads[i];
2086
2087 if (td->error)
2088 continue;
2089
0d80f40d
JA
2090 rs = &runstats[td->groupid];
2091
557e4102
JA
2092 if (td->runtime < rs->min_run[td->ddir])
2093 rs->min_run[td->ddir] = td->runtime;
2094 if (td->runtime > rs->max_run[td->ddir])
2095 rs->max_run[td->ddir] = td->runtime;
0d80f40d 2096
557e4102
JA
2097 if (td->runtime)
2098 bw = td->io_bytes / td->runtime;
2099 if (bw < rs->min_bw[td->ddir])
2100 rs->min_bw[td->ddir] = bw;
2101 if (bw > rs->max_bw[td->ddir])
2102 rs->max_bw[td->ddir] = bw;
0d80f40d 2103
557e4102 2104 rs->io_mb[td->ddir] += td->io_bytes >> 20;
0d80f40d 2105 }
9d489c62 2106
0d80f40d
JA
2107 for (i = 0; i < groupid + 1; i++) {
2108 rs = &runstats[i];
2109
2110 if (rs->max_run[0])
2111 rs->agg[0] = (rs->io_mb[0]*1024*1000) / rs->max_run[0];
2112 if (rs->max_run[1])
2113 rs->agg[1] = (rs->io_mb[1]*1024*1000) / rs->max_run[1];
0d80f40d 2114 }
557e4102
JA
2115
2116 for (i = 0; i < thread_number; i++) {
2117 td = &threads[i];
2118 rs = &runstats[td->groupid];
2119
c4c8f7b3 2120 show_thread_status(td, rs);
557e4102 2121 }
9d489c62
JA
2122
2123 for (i = 0; i < groupid + 1; i++)
2124 show_group_stats(&runstats[i], i);
0d80f40d
JA
2125}
2126
2127int main(int argc, char *argv[])
2128{
27c32a38 2129 memset(run_str, 0, sizeof(run_str));
5961d92c 2130
27c32a38 2131 if (parse_options(argc, argv))
5961d92c 2132 return 1;
7dd1389e 2133
4240cfa1
JA
2134 if (!thread_number) {
2135 printf("Nothing to do\n");
2136 return 1;
2137 }
7dd1389e 2138
fc24389f 2139 run_threads(argv);
0d80f40d 2140 show_run_stats();
fc24389f 2141
892199bd
JA
2142 return 0;
2143}