[PATCH] sgioread: fix sign warning
[disktools.git] / fio.c
CommitLineData
abe4da87
JA
1/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
892199bd
JA
21#include <stdio.h>
22#include <stdlib.h>
23#include <unistd.h>
24#include <fcntl.h>
25#include <string.h>
26#include <errno.h>
27#include <signal.h>
28#include <time.h>
e128065d 29#include <math.h>
49d2caab 30#include <assert.h>
189873de 31#include <pthread.h>
892199bd
JA
32#include <sys/types.h>
33#include <sys/stat.h>
34#include <sys/wait.h>
892199bd
JA
35#include <sys/ipc.h>
36#include <sys/shm.h>
c94deb1c 37#include <sys/ioctl.h>
6e2c38cc 38#include <sys/mman.h>
892199bd
JA
39#include <asm/unistd.h>
40
27c32a38 41#include "fio.h"
892199bd 42
892199bd
JA
43#define MASK (4095)
44
4240cfa1 45#define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
892199bd 46
27c32a38
JA
47int groupid = 0;
48int thread_number = 0;
49char run_str[MAX_JOBS + 1];
50int shm_id = 0;
892199bd 51
02bdd9ba
JA
52/*
53 * thread life cycle
54 */
55enum {
56 TD_NOT_CREATED = 0,
57 TD_CREATED,
e8457004
JA
58 TD_RUNNING,
59 TD_VERIFYING,
02bdd9ba
JA
60 TD_EXITED,
61 TD_REAPED,
62};
63
2c83567e
JA
64/*
65 * The io unit
66 */
67struct io_u {
68 struct iocb iocb;
57d753e3 69 struct timeval start_time;
2c83567e
JA
70 struct timeval issue_time;
71
2c83567e
JA
72 char *buf;
73 unsigned int buflen;
4ac89145 74 unsigned long long offset;
2c83567e
JA
75
76 struct list_head list;
77};
78
62bb4285 79#define should_fsync(td) (td_write(td) && !(td)->odirect)
02983297 80
892199bd
JA
81static sem_t startup_sem;
82
27c32a38
JA
83#define TERMINATE_ALL (-1)
84
85static void terminate_threads(int groupid)
892199bd
JA
86{
87 int i;
88
213b446c
JA
89 for (i = 0; i < thread_number; i++) {
90 struct thread_data *td = &threads[i];
91
27c32a38
JA
92 if (groupid == TERMINATE_ALL || groupid == td->groupid) {
93 td->terminate = 1;
94 td->start_delay = 0;
95 }
213b446c 96 }
02bdd9ba
JA
97}
98
27c32a38 99static void sig_handler(int sig)
946d8870 100{
27c32a38 101 terminate_threads(TERMINATE_ALL);
892199bd
JA
102}
103
5c24b2c4 104static unsigned long utime_since(struct timeval *s, struct timeval *e)
892199bd
JA
105{
106 double sec, usec;
107
108 sec = e->tv_sec - s->tv_sec;
109 usec = e->tv_usec - s->tv_usec;
110 if (sec > 0 && usec < 0) {
111 sec--;
112 usec += 1000000;
113 }
114
115 sec *= (double) 1000000;
116
117 return sec + usec;
118}
119
fd11d7af
JA
120static unsigned long utime_since_now(struct timeval *s)
121{
122 struct timeval t;
123
124 gettimeofday(&t, NULL);
125 return utime_since(s, &t);
126}
127
5c24b2c4 128static unsigned long mtime_since(struct timeval *s, struct timeval *e)
892199bd
JA
129{
130 double sec, usec;
131
132 sec = e->tv_sec - s->tv_sec;
133 usec = e->tv_usec - s->tv_usec;
134 if (sec > 0 && usec < 0) {
135 sec--;
136 usec += 1000000;
137 }
138
139 sec *= (double) 1000;
140 usec /= (double) 1000;
141
142 return sec + usec;
143}
144
be33abe4
JA
145static unsigned long mtime_since_now(struct timeval *s)
146{
147 struct timeval t;
148
149 gettimeofday(&t, NULL);
150 return mtime_since(s, &t);
151}
152
98168d55
JA
153static inline unsigned long msec_now(struct timeval *s)
154{
155 return s->tv_sec * 1000 + s->tv_usec / 1000;
156}
157
49d2caab
JA
158static int random_map_free(struct thread_data *td, unsigned long long block)
159{
75b2ab2c
JA
160 unsigned int idx = RAND_MAP_IDX(td, block);
161 unsigned int bit = RAND_MAP_BIT(td, block);
49d2caab
JA
162
163 return (td->file_map[idx] & (1UL << bit)) == 0;
164}
165
166static int get_next_free_block(struct thread_data *td, unsigned long long *b)
892199bd 167{
49d2caab
JA
168 int i;
169
170 *b = 0;
171 i = 0;
172 while ((*b) * td->min_bs < td->io_size) {
173 if (td->file_map[i] != -1UL) {
174 *b += ffz(td->file_map[i]);
175 return 0;
176 }
177
178 *b += BLOCKS_PER_MAP;
179 i++;
180 }
181
182 return 1;
183}
184
185static void mark_random_map(struct thread_data *td, struct io_u *io_u)
186{
187 unsigned long block = io_u->offset / td->min_bs;
188 unsigned int blocks = 0;
189
190 while (blocks < (io_u->buflen / td->min_bs)) {
191 int idx, bit;
192
193 if (!random_map_free(td, block))
194 break;
195
75b2ab2c
JA
196 idx = RAND_MAP_IDX(td, block);
197 bit = RAND_MAP_BIT(td, block);
49d2caab
JA
198
199 assert(idx < td->num_maps);
200
201 td->file_map[idx] |= (1UL << bit);
202 block++;
203 blocks++;
204 }
205
206 if ((blocks * td->min_bs) < io_u->buflen)
207 io_u->buflen = blocks * td->min_bs;
208}
209
210static int get_next_offset(struct thread_data *td, unsigned long long *offset)
211{
75b2ab2c 212 unsigned long long b, rb;
d32d9284 213 long r;
892199bd
JA
214
215 if (!td->sequential) {
49d2caab
JA
216 unsigned long max_blocks = td->io_size / td->min_bs;
217 int loops = 50;
218
219 do {
220 lrand48_r(&td->random_state, &r);
221 b = ((max_blocks - 1) * r / (RAND_MAX+1.0));
75b2ab2c 222 rb = b + (td->file_offset / td->min_bs);
49d2caab 223 loops--;
75b2ab2c 224 } while (!random_map_free(td, rb) && loops);
49d2caab
JA
225
226 if (!loops) {
227 if (get_next_free_block(td, &b))
228 return 1;
229 }
7889f07b 230 } else
49d2caab 231 b = td->last_bytes / td->min_bs;
7889f07b 232
49d2caab 233 *offset = (b * td->min_bs) + td->file_offset;
75b2ab2c
JA
234 if (*offset > td->file_size)
235 return 1;
236
49d2caab 237 return 0;
7889f07b
JA
238}
239
240static unsigned int get_next_buflen(struct thread_data *td)
241{
242 unsigned int buflen;
d32d9284 243 long r;
7889f07b
JA
244
245 if (td->min_bs == td->max_bs)
246 buflen = td->min_bs;
247 else {
d32d9284 248 lrand48_r(&td->bsrange_state, &r);
7889f07b
JA
249 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
250 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
892199bd
JA
251 }
252
49d2caab
JA
253 if (buflen > td->io_size - td->this_io_bytes)
254 buflen = td->io_size - td->this_io_bytes;
7889f07b 255
7889f07b 256 return buflen;
892199bd
JA
257}
258
57d753e3
JA
259static inline void add_stat_sample(struct thread_data *td, struct io_stat *is,
260 unsigned long val)
892199bd 261{
57d753e3
JA
262 if (val > is->max_val)
263 is->max_val = val;
264 if (val < is->min_val)
265 is->min_val = val;
266
267 is->val += val;
268 is->val_sq += val * val;
269 is->samples++;
270}
fd1ae4c9 271
a0a9b35b
JA
272static void add_log_sample(struct thread_data *td, struct io_log *log,
273 unsigned long val)
274{
275 if (log->nr_samples == log->max_samples) {
276 int new_size = sizeof(struct io_sample) * log->max_samples * 2;
277
278 log->log = realloc(log->log, new_size);
279 log->max_samples <<= 1;
280 }
281
282 log->log[log->nr_samples].val = val;
283 log->log[log->nr_samples].time = mtime_since_now(&td->start);
284 log->nr_samples++;
285}
286
57d753e3
JA
287static void add_clat_sample(struct thread_data *td, unsigned long msec)
288{
289 add_stat_sample(td, &td->clat_stat, msec);
a0a9b35b
JA
290
291 if (td->lat_log)
292 add_log_sample(td, td->lat_log, msec);
57d753e3 293}
fd1ae4c9 294
57d753e3
JA
295static void add_slat_sample(struct thread_data *td, unsigned long msec)
296{
297 add_stat_sample(td, &td->slat_stat, msec);
298}
fd1ae4c9 299
645785e5 300static void add_bw_sample(struct thread_data *td)
57d753e3
JA
301{
302 unsigned long spent = mtime_since_now(&td->stat_sample_time);
303 unsigned long rate;
304
1d035750 305 if (spent < td->bw_avg_time)
57d753e3
JA
306 return;
307
49d2caab 308 rate = (td->this_io_bytes - td->stat_io_bytes) / spent;
57d753e3
JA
309 add_stat_sample(td, &td->bw_stat, rate);
310
a0a9b35b
JA
311 if (td->bw_log)
312 add_log_sample(td, td->bw_log, rate);
313
57d753e3 314 gettimeofday(&td->stat_sample_time, NULL);
49d2caab 315 td->stat_io_bytes = td->this_io_bytes;
892199bd
JA
316}
317
fd11d7af
JA
318/*
319 * busy looping version for the last few usec
320 */
321static void __usec_sleep(int usec)
322{
323 struct timeval start;
324
325 gettimeofday(&start, NULL);
326 while (utime_since_now(&start) < usec)
3782a8cd 327 nop;
fd11d7af
JA
328}
329
d15c5195 330static void usec_sleep(struct thread_data *td, unsigned long usec)
892199bd 331{
d15c5195
JA
332 struct timespec req, rem;
333
334 req.tv_sec = usec / 1000000;
335 req.tv_nsec = usec * 1000 - req.tv_sec * 1000000;
892199bd
JA
336
337 do {
fd11d7af
JA
338 if (usec < 5000) {
339 __usec_sleep(usec);
340 break;
341 }
d15c5195 342
86184d14 343 rem.tv_sec = rem.tv_nsec = 0;
d15c5195
JA
344 if (nanosleep(&req, &rem) < 0)
345 break;
346
347 if ((rem.tv_sec + rem.tv_nsec) == 0)
892199bd 348 break;
86184d14
JA
349
350 req.tv_nsec = rem.tv_nsec;
d15c5195
JA
351 req.tv_sec = rem.tv_sec;
352
353 usec = rem.tv_sec * 1000000 + rem.tv_nsec / 1000;
354 } while (!td->terminate);
892199bd
JA
355}
356
9e850933
JA
357static void rate_throttle(struct thread_data *td, unsigned long time_spent,
358 unsigned int bytes)
86184d14 359{
9e850933
JA
360 unsigned long usec_cycle;
361
4240cfa1
JA
362 if (!td->rate)
363 return;
364
9e850933
JA
365 usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
366
367 if (time_spent < usec_cycle) {
368 unsigned long s = usec_cycle - time_spent;
86184d14
JA
369
370 td->rate_pending_usleep += s;
fad86e6a 371 if (td->rate_pending_usleep >= 100000) {
d15c5195 372 usec_sleep(td, td->rate_pending_usleep);
86184d14
JA
373 td->rate_pending_usleep = 0;
374 }
4240cfa1 375 } else {
9e850933 376 long overtime = time_spent - usec_cycle;
42b2b9fe 377
4240cfa1
JA
378 td->rate_pending_usleep -= overtime;
379 }
380}
381
5c24b2c4 382static int check_min_rate(struct thread_data *td, struct timeval *now)
4240cfa1 383{
7607bc6b 384 unsigned long spent;
4240cfa1
JA
385 unsigned long rate;
386
387 /*
388 * allow a 2 second settle period in the beginning
389 */
7607bc6b 390 if (mtime_since(&td->start, now) < 2000)
4240cfa1
JA
391 return 0;
392
393 /*
394 * if rate blocks is set, sample is running
395 */
49d2caab 396 if (td->rate_bytes) {
4240cfa1
JA
397 spent = mtime_since(&td->lastrate, now);
398 if (spent < td->ratecycle)
399 return 0;
400
49d2caab 401 rate = (td->this_io_bytes - td->rate_bytes) / spent;
4240cfa1
JA
402 if (rate < td->ratemin) {
403 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
02bdd9ba 404 if (rate_quit)
27c32a38 405 terminate_threads(td->groupid);
4240cfa1
JA
406 return 1;
407 }
86184d14 408 }
4240cfa1 409
49d2caab 410 td->rate_bytes = td->this_io_bytes;
4240cfa1
JA
411 memcpy(&td->lastrate, now, sizeof(*now));
412 return 0;
86184d14
JA
413}
414
67903a2e
JA
415static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
416{
01f79976
JA
417 if (!td->timeout)
418 return 0;
67903a2e
JA
419 if (mtime_since(&td->start, t) >= td->timeout * 1000)
420 return 1;
421
422 return 0;
423}
424
e8457004
JA
425static void fill_random_bytes(struct thread_data *td,
426 unsigned char *p, unsigned int len)
427{
645785e5 428 unsigned int todo;
40ef7f64 429 double r;
e8457004
JA
430
431 while (len) {
40ef7f64 432 drand48_r(&td->verify_state, &r);
e8457004 433
40ef7f64
JA
434 /*
435 * lrand48_r seems to be broken and only fill the bottom
436 * 32-bits, even on 64-bit archs with 64-bit longs
437 */
438 todo = sizeof(r);
e8457004
JA
439 if (todo > len)
440 todo = len;
441
442 memcpy(p, &r, todo);
443
444 len -= todo;
445 p += todo;
446 }
447}
448
9d0c6ca2
JA
449static void hexdump(void *buffer, int len)
450{
451 unsigned char *p = buffer;
452 int i;
453
454 for (i = 0; i < len; i++)
455 printf("%02x", p[i]);
456 printf("\n");
457}
458
7f46ef08
JA
459static int verify_io_u_crc32(struct verify_header *hdr, struct io_u *io_u)
460{
461 unsigned char *p = (unsigned char *) io_u->buf;
462 unsigned long c;
463
464 p += sizeof(*hdr);
465 c = crc32(p, hdr->len - sizeof(*hdr));
466
467 return c != hdr->crc32;
468}
469
470static int verify_io_u_md5(struct verify_header *hdr, struct io_u *io_u)
e8457004 471{
e8457004
JA
472 unsigned char *p = (unsigned char *) io_u->buf;
473 struct md5_ctx md5_ctx;
9d0c6ca2 474 int ret;
e8457004 475
e8457004
JA
476 memset(&md5_ctx, 0, sizeof(md5_ctx));
477 p += sizeof(*hdr);
478 md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
479
9d0c6ca2
JA
480 ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
481 if (ret) {
482 hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
483 hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
484 }
485
486 return ret;
e8457004
JA
487}
488
7f46ef08
JA
489static int verify_io_u(struct io_u *io_u)
490{
491 struct verify_header *hdr = (struct verify_header *) io_u->buf;
492 int ret;
493
494 if (hdr->fio_magic != FIO_HDR_MAGIC)
495 return 1;
496
497 if (hdr->verify_type == VERIFY_MD5)
498 ret = verify_io_u_md5(hdr, io_u);
499 else if (hdr->verify_type == VERIFY_CRC32)
500 ret = verify_io_u_crc32(hdr, io_u);
501 else {
502 fprintf(stderr, "Bad verify type %d\n", hdr->verify_type);
503 ret = 1;
504 }
505
506 return ret;
507}
508
509static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
510{
511 hdr->crc32 = crc32(p, len);
512}
513
514static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
515{
516 struct md5_ctx md5_ctx;
517
518 memset(&md5_ctx, 0, sizeof(md5_ctx));
519 md5_update(&md5_ctx, p, len);
520 memcpy(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
521}
522
cfc702bd
JA
523/*
524 * fill body of io_u->buf with random data and add a header with the
525 * (eg) sha1sum of that data.
526 */
e8457004 527static void populate_io_u(struct thread_data *td, struct io_u *io_u)
cfc702bd 528{
e8457004 529 unsigned char *p = (unsigned char *) io_u->buf;
7f46ef08 530 struct verify_header hdr;
e8457004
JA
531
532 hdr.fio_magic = FIO_HDR_MAGIC;
533 hdr.len = io_u->buflen;
534 p += sizeof(hdr);
535 fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
536
7f46ef08
JA
537 if (td->verify == VERIFY_MD5) {
538 fill_md5(&hdr, p, io_u->buflen - sizeof(hdr));
539 hdr.verify_type = VERIFY_MD5;
540 } else {
541 fill_crc32(&hdr, p, io_u->buflen - sizeof(hdr));
542 hdr.verify_type = VERIFY_CRC32;
543 }
544
e8457004 545 memcpy(io_u->buf, &hdr, sizeof(hdr));
cfc702bd
JA
546}
547
2c83567e
JA
548static void put_io_u(struct thread_data *td, struct io_u *io_u)
549{
550 list_del(&io_u->list);
551 list_add(&io_u->list, &td->io_u_freelist);
552 td->cur_depth--;
553}
554
f0f3411b
JA
555#define queue_full(td) (list_empty(&(td)->io_u_freelist))
556
e8457004
JA
557static struct io_u *__get_io_u(struct thread_data *td)
558{
559 struct io_u *io_u;
560
f0f3411b 561 if (queue_full(td))
e8457004
JA
562 return NULL;
563
564 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
565 list_del(&io_u->list);
566 list_add(&io_u->list, &td->io_u_busylist);
f4bb2243 567 td->cur_depth++;
e8457004
JA
568 return io_u;
569}
570
2c83567e
JA
571static struct io_u *get_io_u(struct thread_data *td)
572{
573 struct io_u *io_u;
574
e8457004
JA
575 io_u = __get_io_u(td);
576 if (!io_u)
2c83567e
JA
577 return NULL;
578
406e7b7c
JA
579 if (get_next_offset(td, &io_u->offset)) {
580 put_io_u(td, io_u);
49d2caab 581 return NULL;
406e7b7c 582 }
49d2caab 583
b2a369fb
JA
584 io_u->buflen = get_next_buflen(td);
585 if (!io_u->buflen) {
e8457004 586 put_io_u(td, io_u);
7889f07b 587 return NULL;
e8457004 588 }
2c83567e 589
75b2ab2c
JA
590 if (io_u->buflen + io_u->offset > td->file_size)
591 io_u->buflen = td->file_size - io_u->offset;
49d2caab
JA
592
593 if (!td->sequential)
594 mark_random_map(td, io_u);
595
596 td->last_bytes += io_u->buflen;
597
7f46ef08 598 if (td->verify != VERIFY_NONE)
e8457004 599 populate_io_u(td, io_u);
cfc702bd 600
2c83567e
JA
601 if (td->use_aio) {
602 if (td_read(td))
603 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
604 else
605 io_prep_pwrite(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
606 }
607
57d753e3 608 gettimeofday(&io_u->start_time, NULL);
2c83567e
JA
609 return io_u;
610}
611
40ef7f64
JA
612static inline void td_set_runstate(struct thread_data *td, int runstate)
613{
614 td->old_runstate = td->runstate;
615 td->runstate = runstate;
616}
617
645785e5
JA
618static int get_next_verify(struct thread_data *td,
619 unsigned long long *offset, unsigned int *len)
620{
621 struct io_piece *ipo;
622
623 if (list_empty(&td->io_hist_list))
624 return 1;
625
626 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
627 list_del(&ipo->list);
628
629 *offset = ipo->offset;
630 *len = ipo->len;
631 free(ipo);
632 return 0;
633}
634
9d0c6ca2
JA
635static void prune_io_piece_log(struct thread_data *td)
636{
637 struct io_piece *ipo;
638
639 while (!list_empty(&td->io_hist_list)) {
640 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
641
642 list_del(&ipo->list);
643 free(ipo);
644 }
645}
646
9d0c6ca2
JA
647/*
648 * log a succesful write, so we can unwind the log for verify
649 */
650static void log_io_piece(struct thread_data *td, struct io_u *io_u)
651{
49d2caab 652 struct io_piece *ipo = malloc(sizeof(struct io_piece));
9d0c6ca2
JA
653 struct list_head *entry;
654
655 INIT_LIST_HEAD(&ipo->list);
656 ipo->offset = io_u->offset;
657 ipo->len = io_u->buflen;
658
49d2caab
JA
659 /*
660 * for random io where the writes extend the file, it will typically
661 * be laid out with the block scattered as written. it's faster to
662 * read them in in that order again, so don't sort
663 */
664 if (td->sequential || !td->overwrite) {
9d0c6ca2
JA
665 list_add_tail(&ipo->list, &td->io_hist_list);
666 return;
667 }
668
669 /*
670 * for random io, sort the list so verify will run faster
671 */
672 entry = &td->io_hist_list;
673 while ((entry = entry->prev) != &td->io_hist_list) {
674 struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
675
9d0c6ca2
JA
676 if (__ipo->offset < ipo->offset)
677 break;
678 }
679
680 list_add(&ipo->list, entry);
681}
682
91fc5dc9 683static void do_sync_verify(struct thread_data *td)
cfc702bd 684{
40ef7f64 685 struct timeval t;
e8457004 686 struct io_u *io_u = NULL;
645785e5 687 int ret;
e8457004 688
40ef7f64 689 td_set_runstate(td, TD_VERIFYING);
e8457004
JA
690
691 io_u = __get_io_u(td);
692
40ef7f64 693 if (!td->odirect) {
6e2c38cc
JA
694 if (!td->use_mmap) {
695 if (fadvise(td->fd, td->file_offset, td->io_size, POSIX_FADV_DONTNEED) < 0) {
696 td->error = errno;
697 goto out;
698 }
699 } else {
700 if (madvise(td->mmap, td->io_size, MADV_DONTNEED)) {
701 td->error = errno;
702 goto out;
703 }
40ef7f64
JA
704 }
705 }
706
e8457004
JA
707 do {
708 if (td->terminate)
709 break;
40ef7f64
JA
710
711 gettimeofday(&t, NULL);
712 if (runtime_exceeded(td, &t))
713 break;
714
645785e5
JA
715 if (get_next_verify(td, &io_u->offset, &io_u->buflen))
716 break;
717
718 if (td->cur_off != io_u->offset) {
719 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
720 td->error = errno;
721 break;
722 }
723 }
e8457004
JA
724
725 ret = read(td->fd, io_u->buf, io_u->buflen);
726 if (ret < (int) io_u->buflen) {
727 if (ret == -1) {
728 td->error = errno;
729 break;
730 } else if (!ret)
731 break;
732 else
733 io_u->buflen = ret;
734 }
735
645785e5 736 if (verify_io_u(io_u))
e8457004
JA
737 break;
738
645785e5 739 td->cur_off = io_u->offset + io_u->buflen;
e8457004
JA
740 } while (1);
741
742out:
40ef7f64 743 td_set_runstate(td, TD_RUNNING);
e8457004 744 put_io_u(td, io_u);
cfc702bd
JA
745}
746
6e2c38cc
JA
747static int __do_sync_mmap(struct thread_data *td, struct io_u *io_u)
748{
749 unsigned long long real_off = io_u->offset - td->file_offset;
750
751 if (td_read(td))
752 memcpy(io_u->buf, td->mmap + real_off, io_u->buflen);
753 else
754 memcpy(td->mmap + real_off, io_u->buf, io_u->buflen);
c009cdac
JA
755
756 /*
757 * not really direct, but should drop the pages from the cache
758 */
759 if (td->odirect) {
760 msync(td->mmap + real_off, io_u->buflen, MS_SYNC);
761 madvise(td->mmap + real_off, io_u->buflen, MADV_DONTNEED);
762 }
6e2c38cc
JA
763
764 return io_u->buflen;
765}
766
767static int __do_sync_rw(struct thread_data *td, struct io_u *io_u)
768{
769 if (td->cur_off != io_u->offset) {
770 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
771 td->error = errno;
772 return 1;
773 }
774 }
775
776 if (td_read(td))
777 return read(td->fd, io_u->buf, io_u->buflen);
778 else
779 return write(td->fd, io_u->buf, io_u->buflen);
780}
781
782static void sync_td(struct thread_data *td)
783{
784 if (!td->use_mmap)
785 fsync(td->fd);
786 else
787 msync(td->mmap, td->file_size, MS_SYNC);
788}
789
43000118 790static void do_sync_io(struct thread_data *td)
892199bd 791{
7889f07b 792 unsigned long msec, usec;
e8457004 793 struct io_u *io_u = NULL;
2c83567e 794 struct timeval e;
892199bd 795
49d2caab 796 while (td->this_io_bytes < td->io_size) {
892199bd
JA
797 int ret;
798
799 if (td->terminate)
800 break;
801
2c83567e 802 io_u = get_io_u(td);
7889f07b
JA
803 if (!io_u)
804 break;
2c83567e 805
6e2c38cc
JA
806 if (!td->use_mmap)
807 ret = __do_sync_rw(td, io_u);
892199bd 808 else
6e2c38cc 809 ret = __do_sync_mmap(td, io_u);
892199bd 810
2c83567e 811 if (ret < (int) io_u->buflen) {
892199bd
JA
812 if (ret == -1)
813 td->error = errno;
814 break;
815 }
816
62bb4285 817 if (td_write(td))
645785e5
JA
818 log_io_piece(td, io_u);
819
4240cfa1 820 td->io_blocks++;
49d2caab
JA
821 td->io_bytes += io_u->buflen;
822 td->this_io_bytes += io_u->buflen;
63a09e51 823 td->cur_off = io_u->offset + io_u->buflen;
4240cfa1 824
86184d14
JA
825 gettimeofday(&e, NULL);
826
57d753e3 827 usec = utime_since(&io_u->start_time, &e);
86184d14 828
9e850933 829 rate_throttle(td, usec, io_u->buflen);
892199bd 830
4240cfa1
JA
831 if (check_min_rate(td, &e)) {
832 td->error = ENODATA;
833 break;
834 }
892199bd 835
4240cfa1 836 msec = usec / 1000;
57d753e3 837 add_clat_sample(td, msec);
645785e5 838 add_bw_sample(td);
67903a2e
JA
839
840 if (runtime_exceeded(td, &e))
841 break;
2c83567e 842
cdf92433 843 put_io_u(td, io_u);
e8457004 844 io_u = NULL;
cdf92433 845
e97712ed 846 if (td->thinktime)
d15c5195 847 usec_sleep(td, td->thinktime);
e97712ed 848
cdf92433
JA
849 if (should_fsync(td) && td->fsync_blocks &&
850 (td->io_blocks % td->fsync_blocks) == 0)
6e2c38cc 851 sync_td(td);
892199bd
JA
852 }
853
e8457004
JA
854 if (io_u)
855 put_io_u(td, io_u);
856
4240cfa1 857 if (should_fsync(td))
6e2c38cc 858 sync_td(td);
892199bd 859}
43000118 860
1ad72b11
JA
861static int io_u_getevents(struct thread_data *td, int min, int max,
862 struct timespec *t)
863{
864 int r;
865
866 do {
867 r = io_getevents(td->aio_ctx, min, max, td->aio_events, t);
868 if (r != -EAGAIN && r != -EINTR)
869 break;
870 } while (1);
871
872 return r;
873}
874
2c83567e 875static int io_u_queue(struct thread_data *td, struct io_u *io_u)
56b0eff0 876{
2c83567e 877 struct iocb *iocb = &io_u->iocb;
56b0eff0
JA
878 int ret;
879
880 do {
254605cd 881 ret = io_submit(td->aio_ctx, 1, &iocb);
56b0eff0
JA
882 if (ret == 1)
883 return 0;
406e7b7c 884 else if (ret == -EAGAIN)
56b0eff0 885 usleep(100);
406e7b7c 886 else if (ret == -EINTR)
a592bd33 887 continue;
56b0eff0
JA
888 else
889 break;
890 } while (1);
891
a592bd33 892 return ret;
56b0eff0
JA
893}
894
98168d55 895#define iocb_time(iocb) ((unsigned long) (iocb)->data)
2c83567e
JA
896#define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
897
f0f3411b 898static int ios_completed(struct thread_data *td, int nr)
2c83567e
JA
899{
900 unsigned long msec;
901 struct io_u *io_u;
902 struct timeval e;
9e850933 903 int i, bytes_done;
2c83567e 904
f0f3411b 905 gettimeofday(&e, NULL);
2c83567e 906
9e850933 907 for (i = 0, bytes_done = 0; i < nr; i++) {
2c83567e
JA
908 io_u = ev_to_iou(td->aio_events + i);
909
f0f3411b 910 td->io_blocks++;
49d2caab
JA
911 td->io_bytes += io_u->buflen;
912 td->this_io_bytes += io_u->buflen;
8c033f93 913
f0f3411b 914 msec = mtime_since(&io_u->issue_time, &e);
2c83567e 915
f0f3411b 916 add_clat_sample(td, msec);
645785e5
JA
917 add_bw_sample(td);
918
62bb4285 919 if (td_write(td))
645785e5 920 log_io_piece(td, io_u);
2c83567e 921
f4bb2243 922 bytes_done += io_u->buflen;
2c83567e
JA
923 put_io_u(td, io_u);
924 }
9e850933
JA
925
926 return bytes_done;
2c83567e
JA
927}
928
929static void cleanup_pending_aio(struct thread_data *td)
930{
931 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
932 struct list_head *entry, *n;
933 struct io_u *io_u;
934 int r;
935
936 /*
937 * get immediately available events, if any
938 */
1ad72b11 939 r = io_u_getevents(td, 0, td->cur_depth, &ts);
2c83567e 940 if (r > 0)
f0f3411b 941 ios_completed(td, r);
2c83567e
JA
942
943 /*
944 * now cancel remaining active events
945 */
946 list_for_each_safe(entry, n, &td->io_u_busylist) {
947 io_u = list_entry(entry, struct io_u, list);
948
949 r = io_cancel(td->aio_ctx, &io_u->iocb, td->aio_events);
950 if (!r)
951 put_io_u(td, io_u);
952 }
953
954 if (td->cur_depth) {
1ad72b11 955 r = io_u_getevents(td, td->cur_depth, td->cur_depth, NULL);
2c83567e 956 if (r > 0)
f0f3411b 957 ios_completed(td, r);
2c83567e
JA
958 }
959}
98168d55 960
d32d9284
JA
961static int async_do_verify(struct thread_data *td, struct io_u **io_u)
962{
963 struct io_u *v_io_u = *io_u;
964 int ret = 0;
965
966 if (v_io_u) {
645785e5 967 ret = verify_io_u(v_io_u);
d32d9284
JA
968 put_io_u(td, v_io_u);
969 *io_u = NULL;
970 }
971
972 return ret;
973}
974
91fc5dc9 975static void do_async_verify(struct thread_data *td)
cfc702bd 976{
f4bb2243 977 struct timeval t;
d32d9284 978 struct io_u *io_u, *v_io_u = NULL;
645785e5 979 int ret;
f4bb2243
JA
980
981 td_set_runstate(td, TD_VERIFYING);
982
f4bb2243
JA
983 do {
984 if (td->terminate)
985 break;
986
987 gettimeofday(&t, NULL);
988 if (runtime_exceeded(td, &t))
989 break;
990
991 io_u = __get_io_u(td);
992 if (!io_u)
993 break;
994
645785e5
JA
995 if (get_next_verify(td, &io_u->offset, &io_u->buflen)) {
996 put_io_u(td, io_u);
997 break;
f4bb2243
JA
998 }
999
1000 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
1001 ret = io_u_queue(td, io_u);
1002 if (ret) {
1003 put_io_u(td, io_u);
1004 td->error = ret;
1005 break;
1006 }
1007
f0f3411b
JA
1008 /*
1009 * we have one pending to verify, do that while the next
1010 * we are doing io on the next one
1011 */
d32d9284
JA
1012 if (async_do_verify(td, &v_io_u))
1013 break;
f0f3411b 1014
1ad72b11 1015 ret = io_u_getevents(td, 1, 1, NULL);
f0f3411b
JA
1016 if (ret != 1) {
1017 if (ret < 0)
1018 td->error = ret;
f4bb2243
JA
1019 break;
1020 }
1021
f0f3411b 1022 v_io_u = ev_to_iou(td->aio_events);
f4bb2243 1023
645785e5 1024 td->cur_off = v_io_u->offset + v_io_u->buflen;
f0f3411b
JA
1025
1026 /*
d32d9284 1027 * if we can't submit more io, we need to verify now
f0f3411b 1028 */
d32d9284
JA
1029 if (queue_full(td) && async_do_verify(td, &v_io_u))
1030 break;
1031
f4bb2243
JA
1032 } while (1);
1033
d32d9284 1034 async_do_verify(td, &v_io_u);
f0f3411b 1035
f4bb2243
JA
1036 if (td->cur_depth)
1037 cleanup_pending_aio(td);
1038
1039 td_set_runstate(td, TD_RUNNING);
cfc702bd
JA
1040}
1041
43000118
JA
1042static void do_async_io(struct thread_data *td)
1043{
1044 struct timeval s, e;
7889f07b 1045 unsigned long usec;
43000118 1046
49d2caab 1047 while (td->this_io_bytes < td->io_size) {
43000118
JA
1048 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1049 struct timespec *timeout;
2c83567e
JA
1050 int ret, min_evts = 0;
1051 struct io_u *io_u;
9e850933 1052 unsigned int bytes_done;
43000118
JA
1053
1054 if (td->terminate)
1055 break;
1056
2c83567e 1057 io_u = get_io_u(td);
7889f07b
JA
1058 if (!io_u)
1059 break;
43000118 1060
57d753e3 1061 memcpy(&s, &io_u->start_time, sizeof(s));
8baf1bcc 1062
2c83567e 1063 ret = io_u_queue(td, io_u);
56b0eff0 1064 if (ret) {
a3fdb993 1065 put_io_u(td, io_u);
a592bd33 1066 td->error = ret;
43000118
JA
1067 break;
1068 }
1069
57d753e3
JA
1070 gettimeofday(&io_u->issue_time, NULL);
1071 add_slat_sample(td, mtime_since(&io_u->start_time, &io_u->issue_time));
2c83567e 1072 if (td->cur_depth < td->aio_depth) {
43000118
JA
1073 timeout = &ts;
1074 min_evts = 0;
1075 } else {
1076 timeout = NULL;
1077 min_evts = 1;
1078 }
1079
1ad72b11 1080 ret = io_u_getevents(td, min_evts, td->cur_depth, timeout);
43000118 1081 if (ret < 0) {
406e7b7c 1082 td->error = ret;
43000118
JA
1083 break;
1084 } else if (!ret)
1085 continue;
1086
f0f3411b 1087 bytes_done = ios_completed(td, ret);
43000118 1088
98168d55
JA
1089 /*
1090 * the rate is batched for now, it should work for batches
1091 * of completions except the very first one which may look
1092 * a little bursty
1093 */
2c83567e 1094 gettimeofday(&e, NULL);
43000118
JA
1095 usec = utime_since(&s, &e);
1096
9e850933 1097 rate_throttle(td, usec, bytes_done);
43000118
JA
1098
1099 if (check_min_rate(td, &e)) {
1100 td->error = ENODATA;
1101 break;
1102 }
67903a2e
JA
1103
1104 if (runtime_exceeded(td, &e))
1105 break;
765d9223
JA
1106
1107 if (td->thinktime)
d15c5195 1108 usec_sleep(td, td->thinktime);
cdf92433
JA
1109
1110 if (should_fsync(td) && td->fsync_blocks &&
1111 (td->io_blocks % td->fsync_blocks) == 0)
1112 fsync(td->fd);
43000118 1113 }
56b0eff0 1114
2c83567e
JA
1115 if (td->cur_depth)
1116 cleanup_pending_aio(td);
4ac89145
JA
1117
1118 if (should_fsync(td))
1119 fsync(td->fd);
56b0eff0
JA
1120}
1121
1122static void cleanup_aio(struct thread_data *td)
1123{
254605cd
JA
1124 io_destroy(td->aio_ctx);
1125
43000118
JA
1126 if (td->aio_events)
1127 free(td->aio_events);
43000118
JA
1128}
1129
1130static int init_aio(struct thread_data *td)
1131{
254605cd 1132 if (io_queue_init(td->aio_depth, &td->aio_ctx)) {
43000118
JA
1133 td->error = errno;
1134 return 1;
1135 }
1136
43000118 1137 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
43000118
JA
1138 return 0;
1139}
1140
2c83567e
JA
1141static void cleanup_io_u(struct thread_data *td)
1142{
1143 struct list_head *entry, *n;
1144 struct io_u *io_u;
1145
1146 list_for_each_safe(entry, n, &td->io_u_freelist) {
1147 io_u = list_entry(entry, struct io_u, list);
1148
1149 list_del(&io_u->list);
2c83567e
JA
1150 free(io_u);
1151 }
6b71c826 1152
99c6704f
JA
1153 if (td->mem_type == MEM_MALLOC)
1154 free(td->orig_buffer);
1155 else if (td->mem_type == MEM_SHM) {
1156 struct shmid_ds sbuf;
1157
1158 shmdt(td->orig_buffer);
1159 shmctl(td->shm_id, IPC_RMID, &sbuf);
891e70f8
JA
1160 } else if (td->mem_type == MEM_MMAP)
1161 munmap(td->orig_buffer, td->orig_buffer_size);
1162 else
1163 fprintf(stderr, "Bad memory type %d\n", td->mem_type);
1164
1165 td->orig_buffer = NULL;
2c83567e
JA
1166}
1167
99c6704f 1168static int init_io_u(struct thread_data *td)
2c83567e
JA
1169{
1170 struct io_u *io_u;
891e70f8 1171 int i, max_units;
6b71c826 1172 char *p;
2c83567e
JA
1173
1174 if (!td->use_aio)
1175 max_units = 1;
1176 else
1177 max_units = td->aio_depth;
1178
891e70f8 1179 td->orig_buffer_size = td->max_bs * max_units + MASK;
99c6704f
JA
1180
1181 if (td->mem_type == MEM_MALLOC)
891e70f8 1182 td->orig_buffer = malloc(td->orig_buffer_size);
99c6704f 1183 else if (td->mem_type == MEM_SHM) {
891e70f8 1184 td->shm_id = shmget(IPC_PRIVATE, td->orig_buffer_size, IPC_CREAT | 0600);
99c6704f
JA
1185 if (td->shm_id < 0) {
1186 td->error = errno;
1187 perror("shmget");
1188 return 1;
1189 }
1190
1191 td->orig_buffer = shmat(td->shm_id, NULL, 0);
1192 if (td->orig_buffer == (void *) -1) {
1193 td->error = errno;
1194 perror("shmat");
891e70f8
JA
1195 td->orig_buffer = NULL;
1196 return 1;
1197 }
1198 } else if (td->mem_type == MEM_MMAP) {
1199 td->orig_buffer = mmap(NULL, td->orig_buffer_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
1200 if (td->orig_buffer == MAP_FAILED) {
1201 td->error = errno;
1202 perror("mmap");
1203 td->orig_buffer = NULL;
99c6704f
JA
1204 return 1;
1205 }
1206 }
6b71c826 1207
2c83567e
JA
1208 INIT_LIST_HEAD(&td->io_u_freelist);
1209 INIT_LIST_HEAD(&td->io_u_busylist);
645785e5 1210 INIT_LIST_HEAD(&td->io_hist_list);
2c83567e 1211
99c6704f 1212 p = ALIGN(td->orig_buffer);
2c83567e
JA
1213 for (i = 0; i < max_units; i++) {
1214 io_u = malloc(sizeof(*io_u));
1215 memset(io_u, 0, sizeof(*io_u));
1216 INIT_LIST_HEAD(&io_u->list);
1217
7889f07b 1218 io_u->buf = p + td->max_bs * i;
2c83567e
JA
1219 list_add(&io_u->list, &td->io_u_freelist);
1220 }
99c6704f
JA
1221
1222 return 0;
2c83567e
JA
1223}
1224
02983297
JA
1225static int create_file(struct thread_data *td)
1226{
7889f07b 1227 unsigned long long left;
645785e5 1228 unsigned int bs;
02983297 1229 char *b;
645785e5 1230 int r;
02983297 1231
02983297
JA
1232 /*
1233 * unless specifically asked for overwrite, let normal io extend it
1234 */
62bb4285 1235 if (td_write(td) && !td->overwrite)
02983297
JA
1236 return 0;
1237
57d753e3
JA
1238 if (!td->file_size) {
1239 fprintf(stderr, "Need size for create\n");
1240 td->error = EINVAL;
1241 return 1;
1242 }
1243
42fd89a7
JA
1244 printf("Client%d: Laying out IO file\n", td->thread_number);
1245
02983297
JA
1246 td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
1247 if (td->fd < 0) {
1248 td->error = errno;
1249 return 1;
1250 }
1251
c94deb1c
JA
1252 if (ftruncate(td->fd, td->file_size) == -1) {
1253 td->error = errno;
1254 return 1;
1255 }
1256
49d2caab 1257 td->io_size = td->file_size;
7889f07b
JA
1258 b = malloc(td->max_bs);
1259 memset(b, 0, td->max_bs);
1260
1261 left = td->file_size;
1262 while (left) {
1263 bs = td->max_bs;
1264 if (bs > left)
1265 bs = left;
02983297 1266
7889f07b 1267 r = write(td->fd, b, bs);
02983297 1268
645785e5 1269 if (r == (int) bs) {
7889f07b 1270 left -= bs;
02983297 1271 continue;
7889f07b 1272 } else {
02983297
JA
1273 if (r < 0)
1274 td->error = errno;
1275 else
1276 td->error = EIO;
1277
1278 break;
1279 }
1280 }
1281
fc097bfe
JA
1282 if (td->create_fsync)
1283 fsync(td->fd);
1284
02983297
JA
1285 close(td->fd);
1286 td->fd = -1;
1287 free(b);
1288 return 0;
1289}
1290
1291static int file_exists(struct thread_data *td)
1292{
1293 struct stat st;
1294
1295 if (stat(td->file_name, &st) != -1)
1296 return 1;
1297
1298 return errno != ENOENT;
1299}
1300
c4c8f7b3 1301static int file_size(struct thread_data *td)
02983297
JA
1302{
1303 struct stat st;
c94deb1c
JA
1304
1305 if (fstat(td->fd, &st) == -1) {
1306 td->error = errno;
1307 return 1;
1308 }
1309
c94deb1c 1310 if (td_read(td)) {
c4c8f7b3
JA
1311 if (!td->file_size || td->file_size > st.st_size)
1312 td->file_size = st.st_size;
c94deb1c
JA
1313 } else {
1314 if (!td->file_size)
1315 td->file_size = 1024 * 1024 * 1024;
c4c8f7b3
JA
1316 }
1317
1318 return 0;
1319}
1320
1321static int bdev_size(struct thread_data *td)
1322{
1323 size_t bytes;
c94deb1c 1324
c4c8f7b3
JA
1325 if (ioctl(td->fd, BLKGETSIZE64, &bytes) < 0) {
1326 td->error = errno;
1327 return 1;
c94deb1c
JA
1328 }
1329
c4c8f7b3
JA
1330 if (!td->file_size || (td->file_size > bytes))
1331 td->file_size = bytes;
1332
1333 return 0;
1334}
1335
1336static int get_file_size(struct thread_data *td)
1337{
1338 int ret;
1339
1340 if (td->filetype == FIO_TYPE_FILE)
1341 ret = file_size(td);
1342 else
1343 ret = bdev_size(td);
1344
1345 if (ret)
1346 return ret;
1347
1348 if (td->file_offset > td->file_size) {
c94deb1c
JA
1349 fprintf(stderr, "Client%d: offset larger than length\n", td->thread_number);
1350 return 1;
1351 }
1352
c4c8f7b3 1353 td->io_size = td->file_size - td->file_offset;
c94deb1c
JA
1354 if (td->io_size == 0) {
1355 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1356 td->error = EINVAL;
1357 return 1;
1358 }
1359
1360 return 0;
1361}
1362
6e2c38cc
JA
1363static int setup_file_mmap(struct thread_data *td)
1364{
1365 int flags;
1366
1367 if (td_read(td))
1368 flags = PROT_READ;
1369 else {
1370 flags = PROT_WRITE;
1371
7f46ef08 1372 if (td->verify != VERIFY_NONE)
6e2c38cc
JA
1373 flags |= PROT_READ;
1374 }
1375
1376 td->mmap = mmap(NULL, td->file_size, flags, MAP_SHARED, td->fd, td->file_offset);
1377 if (td->mmap == MAP_FAILED) {
1378 td->mmap = NULL;
1379 td->error = errno;
1380 return 1;
1381 }
1382
1383 if (td->invalidate_cache) {
1384 if (madvise(td->mmap, td->file_size, MADV_DONTNEED) < 0) {
1385 td->error = errno;
1386 return 1;
1387 }
1388 }
1389
1390 if (td->sequential) {
1391 if (madvise(td->mmap, td->file_size, MADV_SEQUENTIAL) < 0) {
1392 td->error = errno;
1393 return 1;
1394 }
1395 } else {
1396 if (madvise(td->mmap, td->file_size, MADV_RANDOM) < 0) {
1397 td->error = errno;
1398 return 1;
1399 }
1400 }
1401
1402 return 0;
1403}
1404
1405static int setup_file_plain(struct thread_data *td)
1406{
1407 if (td->invalidate_cache) {
1408 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_DONTNEED) < 0) {
1409 td->error = errno;
1410 return 1;
1411 }
1412 }
1413
1414 if (td->sequential) {
1415 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_SEQUENTIAL) < 0) {
1416 td->error = errno;
1417 return 1;
1418 }
1419 } else {
1420 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_RANDOM) < 0) {
1421 td->error = errno;
1422 return 1;
1423 }
1424 }
1425
1426 return 0;
1427}
1428
c94deb1c
JA
1429static int setup_file(struct thread_data *td)
1430{
02983297
JA
1431 int flags = 0;
1432
1433 if (!file_exists(td)) {
1434 if (!td->create_file) {
1435 td->error = ENOENT;
1436 return 1;
1437 }
1438 if (create_file(td))
1439 return 1;
1440 }
1441
1442 if (td->odirect)
1443 flags |= O_DIRECT;
1444
1445 if (td_read(td))
1446 td->fd = open(td->file_name, flags | O_RDONLY);
1447 else {
1448 if (!td->overwrite)
1449 flags |= O_TRUNC;
74b4b5fb
JA
1450 if (td->sync_io)
1451 flags |= O_SYNC;
6e2c38cc
JA
1452
1453 flags |= O_RDWR;
02983297 1454
e8457004 1455 td->fd = open(td->file_name, flags | O_CREAT, 0600);
02983297
JA
1456 }
1457
1458 if (td->fd == -1) {
1459 td->error = errno;
1460 return 1;
1461 }
1462
c94deb1c 1463 if (get_file_size(td))
49d2caab 1464 return 1;
49d2caab 1465
62bb4285 1466 if (td_write(td) && ftruncate(td->fd, td->file_size) == -1) {
c94deb1c 1467 td->error = errno;
02983297
JA
1468 return 1;
1469 }
1470
6e2c38cc
JA
1471 if (!td->use_mmap)
1472 return setup_file_plain(td);
1473 else
1474 return setup_file_mmap(td);
02983297
JA
1475}
1476
d32d9284
JA
1477static void clear_io_state(struct thread_data *td)
1478{
9d0c6ca2
JA
1479 if (!td->use_aio)
1480 lseek(td->fd, SEEK_SET, 0);
1481
d32d9284 1482 td->cur_off = 0;
49d2caab
JA
1483 td->last_bytes = 0;
1484 td->stat_io_bytes = 0;
1485 td->this_io_bytes = 0;
1486
1487 if (td->file_map)
1488 memset(td->file_map, 0, td->num_maps * sizeof(long));
d32d9284
JA
1489}
1490
f6dcd824
JA
1491static void update_rusage_stat(struct thread_data *td)
1492{
1493 if (!td->runtime)
1494 return;
1495
1496 getrusage(RUSAGE_SELF, &td->ru_end);
1497
1498 td->usr_time += mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
1499 td->sys_time += mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
1500 td->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
1501
1502
1503 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
1504}
1505
189873de 1506static void *thread_main(void *data)
892199bd 1507{
189873de 1508 struct thread_data *td = data;
02983297 1509 int ret = 1;
892199bd 1510
7292613b 1511 setsid();
892199bd
JA
1512 td->pid = getpid();
1513
99c6704f
JA
1514 if (init_io_u(td))
1515 goto err;
2c83567e 1516
18e0b78c
JA
1517 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
1518 td->error = errno;
1519 goto err;
1520 }
1521
43000118
JA
1522 if (td->use_aio && init_aio(td))
1523 goto err;
1524
f737299d 1525 if (td->ioprio) {
892199bd
JA
1526 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1527 td->error = errno;
599002b3 1528 goto err;
892199bd
JA
1529 }
1530 }
1531
1532 sem_post(&startup_sem);
1533 sem_wait(&td->mutex);
43000118 1534
fc097bfe
JA
1535 if (!td->create_serialize && setup_file(td))
1536 goto err;
1537
49d2caab
JA
1538 if (init_random_state(td))
1539 goto err;
1540
293753bb 1541 while (td->loops--) {
f6dcd824
JA
1542 getrusage(RUSAGE_SELF, &td->ru_start);
1543 gettimeofday(&td->start, NULL);
1544 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
293753bb
JA
1545
1546 if (td->ratemin)
1547 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
7292613b 1548
d32d9284 1549 clear_io_state(td);
9d0c6ca2 1550 prune_io_piece_log(td);
fd1ae4c9 1551
b2de0ed2 1552 if (!td->use_aio)
b6794fbf 1553 do_sync_io(td);
b2de0ed2
JA
1554 else
1555 do_async_io(td);
1556
91fc5dc9
JA
1557 if (td->error)
1558 break;
1559
f6dcd824
JA
1560 td->runtime += mtime_since_now(&td->start);
1561 update_rusage_stat(td);
1562
7f46ef08 1563 if (td->verify == VERIFY_NONE)
b2de0ed2 1564 continue;
cfc702bd 1565
b2de0ed2 1566 clear_io_state(td);
d32d9284 1567
91fc5dc9
JA
1568 if (!td->use_aio)
1569 do_sync_verify(td);
1570 else
1571 do_async_verify(td);
1572
1573 if (td->error)
1574 break;
b6794fbf 1575 }
7292613b 1576
892199bd 1577 ret = 0;
a0a9b35b
JA
1578
1579 if (td->bw_log)
1580 finish_log(td, td->bw_log, "bw");
1581 if (td->lat_log)
1582 finish_log(td, td->lat_log, "lat");
4ac89145 1583
98dd52d6 1584 if (exitall_on_terminate)
27c32a38 1585 terminate_threads(td->groupid);
98dd52d6 1586
892199bd 1587err:
7292613b
JA
1588 if (td->fd != -1) {
1589 close(td->fd);
1590 td->fd = -1;
1591 }
6e2c38cc
JA
1592 if (td->mmap)
1593 munmap(td->mmap, td->file_size);
4ac89145
JA
1594 if (td->use_aio)
1595 cleanup_aio(td);
2c83567e 1596 cleanup_io_u(td);
599002b3 1597 if (ret) {
892199bd 1598 sem_post(&startup_sem);
599002b3
JA
1599 sem_wait(&td->mutex);
1600 }
40ef7f64 1601 td_set_runstate(td, TD_EXITED);
189873de
JA
1602 return NULL;
1603
1604}
1605
1606static void *fork_main(int shm_id, int offset)
1607{
1608 struct thread_data *td;
1609 void *data;
1610
1611 data = shmat(shm_id, NULL, 0);
1612 if (data == (void *) -1) {
1613 perror("shmat");
1614 return NULL;
1615 }
1616
1617 td = data + offset * sizeof(struct thread_data);
1618 thread_main(td);
4240cfa1 1619 shmdt(data);
892199bd
JA
1620 return NULL;
1621}
1622
57d753e3
JA
1623static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1624 double *mean, double *dev)
1625{
1626 double n;
1627
1628 if (is->samples == 0)
1629 return 0;
1630
1631 *min = is->min_val;
1632 *max = is->max_val;
1633
1634 n = (double) is->samples;
1635 *mean = (double) is->val / n;
1636 *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1637 return 1;
1638}
1639
557e4102
JA
1640static void show_thread_status(struct thread_data *td,
1641 struct group_run_stats *rs)
892199bd
JA
1642{
1643 int prio, prio_class;
f6dcd824 1644 unsigned long min, max, bw = 0;
92b229ed 1645 double mean, dev, usr_cpu, sys_cpu;
892199bd 1646
49d2caab 1647 if (!td->io_bytes && !td->error)
213b446c
JA
1648 return;
1649
892199bd 1650 if (td->runtime)
49d2caab 1651 bw = td->io_bytes / td->runtime;
892199bd
JA
1652
1653 prio = td->ioprio & 0xff;
1654 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
1655
f6dcd824 1656 printf("Client%d (g=%d): err=%2d, io=%6luMiB, bw=%6luKiB/s, runt=%6lumsec\n", td->thread_number, td->groupid, td->error, td->io_bytes >> 20, bw, td->runtime);
fd1ae4c9 1657
57d753e3
JA
1658 if (calc_lat(&td->slat_stat, &min, &max, &mean, &dev))
1659 printf(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1660 if (calc_lat(&td->clat_stat, &min, &max, &mean, &dev))
1661 printf(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
557e4102
JA
1662 if (calc_lat(&td->bw_stat, &min, &max, &mean, &dev)) {
1663 double p_of_agg;
1664
1665 p_of_agg = mean * 100 / (double) rs->agg[td->ddir];
1666 printf(" bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, dev=%5.02f\n", min, max, p_of_agg, mean, dev);
1667 }
92b229ed
JA
1668
1669 if (td->runtime) {
f6dcd824
JA
1670 usr_cpu = (double) td->usr_time * 100 / (double) td->runtime;
1671 sys_cpu = (double) td->sys_time * 100 / (double) td->runtime;
92b229ed
JA
1672 } else {
1673 usr_cpu = 0;
1674 sys_cpu = 0;
1675 }
1676
f6dcd824 1677 printf(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ctx);
892199bd
JA
1678}
1679
3f39453a 1680static void print_thread_status(struct thread_data *td, int nr_running,
8dbff0b1 1681 int t_rate, int m_rate)
3f39453a 1682{
3f39453a
JA
1683 printf("Threads now running: %d", nr_running);
1684 if (m_rate || t_rate)
1685 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
8dbff0b1
JA
1686 printf(" : [%s]\r", run_str);
1687 fflush(stdout);
3f39453a
JA
1688}
1689
40ef7f64
JA
1690static void check_str_update(struct thread_data *td, int n, int t, int m)
1691{
1692 char c = run_str[td->thread_number - 1];
1693
1694 if (td->runstate == td->old_runstate)
1695 return;
1696
1697 switch (td->runstate) {
1698 case TD_REAPED:
1699 c = '_';
1700 break;
f4bb2243
JA
1701 case TD_EXITED:
1702 c = 'E';
1703 break;
40ef7f64 1704 case TD_RUNNING:
af678352
JA
1705 if (td_read(td)) {
1706 if (td->sequential)
1707 c = 'R';
1708 else
1709 c = 'r';
1710 } else {
1711 if (td->sequential)
1712 c = 'W';
1713 else
1714 c = 'w';
1715 }
40ef7f64
JA
1716 break;
1717 case TD_VERIFYING:
1718 c = 'V';
1719 break;
1720 case TD_CREATED:
1721 c = 'C';
1722 break;
1723 case TD_NOT_CREATED:
1724 c = 'P';
1725 break;
1726 default:
1727 printf("state %d\n", td->runstate);
1728 }
1729
1730 run_str[td->thread_number - 1] = c;
1731 print_thread_status(td, n, t, m);
1732 td->old_runstate = td->runstate;
1733}
1734
213b446c 1735static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
02bdd9ba 1736{
213b446c 1737 int i;
02bdd9ba 1738
3f39453a
JA
1739 /*
1740 * reap exited threads (TD_EXITED -> TD_REAPED)
1741 */
02bdd9ba
JA
1742 for (i = 0; i < thread_number; i++) {
1743 struct thread_data *td = &threads[i];
1744
40ef7f64
JA
1745 check_str_update(td, *nr_running, *t_rate, *m_rate);
1746
213b446c
JA
1747 if (td->runstate != TD_EXITED)
1748 continue;
02bdd9ba 1749
40ef7f64 1750 td_set_runstate(td, TD_REAPED);
189873de
JA
1751
1752 if (td->use_thread) {
1753 long ret;
1754
1755 if (pthread_join(td->thread, (void *) &ret))
1756 perror("thread_join");
1757 } else
1758 waitpid(td->pid, NULL, 0);
1759
213b446c
JA
1760 (*nr_running)--;
1761 (*m_rate) -= td->ratemin;
1762 (*t_rate) -= td->rate;
40ef7f64 1763 check_str_update(td, *nr_running, *t_rate, *m_rate);
213b446c 1764 }
02bdd9ba
JA
1765}
1766
fc24389f
JA
1767static void run_threads(char *argv[])
1768{
be33abe4 1769 struct timeval genesis;
fc24389f
JA
1770 struct thread_data *td;
1771 unsigned long spent;
2a81240d 1772 int i, todo, nr_running, m_rate, t_rate, nr_started;
fc24389f 1773
fc24389f
JA
1774 printf("Starting %d threads\n", thread_number);
1775 fflush(stdout);
1776
7292613b
JA
1777 signal(SIGINT, sig_handler);
1778
fc24389f 1779 todo = thread_number;
02bdd9ba 1780 nr_running = 0;
2a81240d 1781 nr_started = 0;
213b446c 1782 m_rate = t_rate = 0;
fc24389f 1783
8bdcfab5
JA
1784 for (i = 0; i < thread_number; i++) {
1785 td = &threads[i];
1786
fc097bfe
JA
1787 if (!td->create_serialize)
1788 continue;
1789
8bdcfab5
JA
1790 /*
1791 * do file setup here so it happens sequentially,
1792 * we don't want X number of threads getting their
1793 * client data interspersed on disk
1794 */
1795 if (setup_file(td)) {
40ef7f64 1796 td_set_runstate(td, TD_REAPED);
8bdcfab5
JA
1797 todo--;
1798 }
1799 }
1800
1801 gettimeofday(&genesis, NULL);
1802
213b446c 1803 while (todo) {
3f39453a
JA
1804 /*
1805 * create threads (TD_NOT_CREATED -> TD_CREATED)
1806 */
fc24389f
JA
1807 for (i = 0; i < thread_number; i++) {
1808 td = &threads[i];
1809
02bdd9ba 1810 if (td->runstate != TD_NOT_CREATED)
fc24389f
JA
1811 continue;
1812
213b446c
JA
1813 /*
1814 * never got a chance to start, killed by other
1815 * thread for some reason
1816 */
1817 if (td->terminate) {
1818 todo--;
1819 continue;
1820 }
1821
fc24389f 1822 if (td->start_delay) {
be33abe4 1823 spent = mtime_since_now(&genesis);
fc24389f
JA
1824
1825 if (td->start_delay * 1000 > spent)
1826 continue;
1827 }
1828
2a81240d 1829 if (td->stonewall && (nr_started || nr_running))
ea6f96a2 1830 break;
2a81240d 1831
40ef7f64
JA
1832 td_set_runstate(td, TD_CREATED);
1833 check_str_update(td, nr_running, t_rate, m_rate);
fc24389f
JA
1834 sem_init(&startup_sem, 1, 1);
1835 todo--;
2a81240d 1836 nr_started++;
fc24389f 1837
189873de
JA
1838 if (td->use_thread) {
1839 if (pthread_create(&td->thread, NULL, thread_main, td)) {
1840 perror("thread_create");
1841 nr_started--;
1842 }
1843 } else {
1844 if (fork())
1845 sem_wait(&startup_sem);
1846 else {
1847 fork_main(shm_id, i);
1848 exit(0);
1849 }
fc24389f
JA
1850 }
1851 }
1852
3f39453a 1853 /*
e8457004 1854 * start created threads (TD_CREATED -> TD_RUNNING)
3f39453a 1855 */
fc24389f
JA
1856 for (i = 0; i < thread_number; i++) {
1857 struct thread_data *td = &threads[i];
1858
3f39453a
JA
1859 if (td->runstate != TD_CREATED)
1860 continue;
1861
40ef7f64 1862 td_set_runstate(td, TD_RUNNING);
3f39453a 1863 nr_running++;
2a81240d 1864 nr_started--;
3f39453a
JA
1865 m_rate += td->ratemin;
1866 t_rate += td->rate;
40ef7f64 1867 check_str_update(td, nr_running, t_rate, m_rate);
3f39453a 1868 sem_post(&td->mutex);
fc24389f
JA
1869 }
1870
e8457004
JA
1871 for (i = 0; i < thread_number; i++) {
1872 struct thread_data *td = &threads[i];
1873
b48889bb
JA
1874 if (td->runstate != TD_RUNNING &&
1875 td->runstate != TD_VERIFYING)
e8457004
JA
1876 continue;
1877
40ef7f64 1878 check_str_update(td, nr_running, t_rate, m_rate);
e8457004
JA
1879 }
1880
213b446c 1881 reap_threads(&nr_running, &t_rate, &m_rate);
02bdd9ba 1882
fc24389f
JA
1883 if (todo)
1884 usleep(100000);
1885 }
02bdd9ba
JA
1886
1887 while (nr_running) {
213b446c 1888 reap_threads(&nr_running, &t_rate, &m_rate);
02bdd9ba
JA
1889 usleep(10000);
1890 }
fc24389f
JA
1891}
1892
0d80f40d 1893static void show_group_stats(struct group_run_stats *rs, int id)
8867c0a8 1894{
0d80f40d
JA
1895 printf("\nRun status group %d:\n", id);
1896
1897 if (rs->max_run[DDIR_READ])
1898 printf(" READ: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", rs->io_mb[0], rs->agg[0], rs->min_bw[0], rs->max_bw[0], rs->min_run[0], rs->max_run[0]);
1899 if (rs->max_run[DDIR_WRITE])
1900 printf(" WRITE: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", rs->io_mb[1], rs->agg[1], rs->min_bw[1], rs->max_bw[1], rs->min_run[1], rs->max_run[1]);
1901}
1902
1903static void show_run_stats(void)
1904{
1905 struct group_run_stats *runstats, *rs;
557e4102 1906 struct thread_data *td;
8867c0a8
JA
1907 int i;
1908
0d80f40d
JA
1909 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
1910
1911 for (i = 0; i < groupid + 1; i++) {
1912 rs = &runstats[i];
1913
f6dcd824 1914 memset(rs, 0, sizeof(*rs));
0d80f40d
JA
1915 rs->min_bw[0] = rs->min_run[0] = ~0UL;
1916 rs->min_bw[1] = rs->min_run[1] = ~0UL;
0d80f40d
JA
1917 }
1918
1919 for (i = 0; i < thread_number; i++) {
0d80f40d
JA
1920 unsigned long bw = 0;
1921
557e4102
JA
1922 td = &threads[i];
1923
1924 if (td->error)
1925 continue;
1926
0d80f40d
JA
1927 rs = &runstats[td->groupid];
1928
557e4102
JA
1929 if (td->runtime < rs->min_run[td->ddir])
1930 rs->min_run[td->ddir] = td->runtime;
1931 if (td->runtime > rs->max_run[td->ddir])
1932 rs->max_run[td->ddir] = td->runtime;
0d80f40d 1933
557e4102
JA
1934 if (td->runtime)
1935 bw = td->io_bytes / td->runtime;
1936 if (bw < rs->min_bw[td->ddir])
1937 rs->min_bw[td->ddir] = bw;
1938 if (bw > rs->max_bw[td->ddir])
1939 rs->max_bw[td->ddir] = bw;
0d80f40d 1940
557e4102 1941 rs->io_mb[td->ddir] += td->io_bytes >> 20;
0d80f40d 1942 }
9d489c62 1943
0d80f40d
JA
1944 for (i = 0; i < groupid + 1; i++) {
1945 rs = &runstats[i];
1946
1947 if (rs->max_run[0])
1948 rs->agg[0] = (rs->io_mb[0]*1024*1000) / rs->max_run[0];
1949 if (rs->max_run[1])
1950 rs->agg[1] = (rs->io_mb[1]*1024*1000) / rs->max_run[1];
0d80f40d 1951 }
557e4102
JA
1952
1953 for (i = 0; i < thread_number; i++) {
1954 td = &threads[i];
1955 rs = &runstats[td->groupid];
1956
c4c8f7b3 1957 show_thread_status(td, rs);
557e4102 1958 }
9d489c62
JA
1959
1960 for (i = 0; i < groupid + 1; i++)
1961 show_group_stats(&runstats[i], i);
0d80f40d
JA
1962}
1963
1964int main(int argc, char *argv[])
1965{
27c32a38 1966 memset(run_str, 0, sizeof(run_str));
5961d92c 1967
27c32a38 1968 if (parse_options(argc, argv))
5961d92c 1969 return 1;
7dd1389e 1970
4240cfa1
JA
1971 if (!thread_number) {
1972 printf("Nothing to do\n");
1973 return 1;
1974 }
7dd1389e 1975
fc24389f 1976 run_threads(argv);
0d80f40d 1977 show_run_stats();
fc24389f 1978
892199bd
JA
1979 return 0;
1980}