add -A option for better stats
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27static inline void clear_error(struct thread_data *td)
28{
29 td->error = 0;
30 td->verror[0] = '\0';
31}
32
33/*
34 * Leaves f->fd open on success, caller must close
35 */
36static int extend_file(struct thread_data *td, struct fio_file *f)
37{
38 int r, new_layout = 0, unlink_file = 0, flags;
39 unsigned long long left;
40 unsigned int bs;
41 char *b = NULL;
42
43 if (read_only) {
44 log_err("fio: refusing extend of file due to read-only\n");
45 return 0;
46 }
47
48 /*
49 * check if we need to lay the file out complete again. fio
50 * does that for operations involving reads, or for writes
51 * where overwrite is set
52 */
53 if (td_read(td) ||
54 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
56 new_layout = 1;
57 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
58 unlink_file = 1;
59
60 if (unlink_file || new_layout) {
61 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
62 if ((td_io_unlink_file(td, f) < 0) && (errno != ENOENT)) {
63 td_verror(td, errno, "unlink");
64 return 1;
65 }
66 }
67
68 flags = O_WRONLY;
69 if (td->o.allow_create)
70 flags |= O_CREAT;
71 if (new_layout)
72 flags |= O_TRUNC;
73
74#ifdef WIN32
75 flags |= _O_BINARY;
76#endif
77
78 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
79 f->fd = open(f->file_name, flags, 0644);
80 if (f->fd < 0) {
81 int err = errno;
82
83 if (err == ENOENT && !td->o.allow_create)
84 log_err("fio: file creation disallowed by "
85 "allow_file_create=0\n");
86 else
87 td_verror(td, err, "open");
88 return 1;
89 }
90
91#ifdef CONFIG_POSIX_FALLOCATE
92 if (!td->o.fill_device) {
93 switch (td->o.fallocate_mode) {
94 case FIO_FALLOCATE_NONE:
95 break;
96 case FIO_FALLOCATE_POSIX:
97 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
98 f->file_name,
99 (unsigned long long) f->real_file_size);
100
101 r = posix_fallocate(f->fd, 0, f->real_file_size);
102 if (r > 0) {
103 log_err("fio: posix_fallocate fails: %s\n",
104 strerror(r));
105 }
106 break;
107#ifdef CONFIG_LINUX_FALLOCATE
108 case FIO_FALLOCATE_KEEP_SIZE:
109 dprint(FD_FILE,
110 "fallocate(FALLOC_FL_KEEP_SIZE) "
111 "file %s size %llu\n", f->file_name,
112 (unsigned long long) f->real_file_size);
113
114 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
115 f->real_file_size);
116 if (r != 0)
117 td_verror(td, errno, "fallocate");
118
119 break;
120#endif /* CONFIG_LINUX_FALLOCATE */
121 default:
122 log_err("fio: unknown fallocate mode: %d\n",
123 td->o.fallocate_mode);
124 assert(0);
125 }
126 }
127#endif /* CONFIG_POSIX_FALLOCATE */
128
129 if (!new_layout)
130 goto done;
131
132 /*
133 * The size will be -1ULL when fill_device is used, so don't truncate
134 * or fallocate this file, just write it
135 */
136 if (!td->o.fill_device) {
137 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
138 (unsigned long long) f->real_file_size);
139 if (ftruncate(f->fd, f->real_file_size) == -1) {
140 if (errno != EFBIG) {
141 td_verror(td, errno, "ftruncate");
142 goto err;
143 }
144 }
145 }
146
147 b = malloc(td->o.max_bs[DDIR_WRITE]);
148
149 left = f->real_file_size;
150 while (left && !td->terminate) {
151 bs = td->o.max_bs[DDIR_WRITE];
152 if (bs > left)
153 bs = left;
154
155 fill_io_buffer(td, b, bs, bs);
156
157 r = write(f->fd, b, bs);
158
159 if (r > 0) {
160 left -= r;
161 continue;
162 } else {
163 if (r < 0) {
164 int __e = errno;
165
166 if (__e == ENOSPC) {
167 if (td->o.fill_device)
168 break;
169 log_info("fio: ENOSPC on laying out "
170 "file, stopping\n");
171 break;
172 }
173 td_verror(td, errno, "write");
174 } else
175 td_verror(td, EIO, "write");
176
177 break;
178 }
179 }
180
181 if (td->terminate) {
182 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
183 td_io_unlink_file(td, f);
184 } else if (td->o.create_fsync) {
185 if (fsync(f->fd) < 0) {
186 td_verror(td, errno, "fsync");
187 goto err;
188 }
189 }
190 if (td->o.fill_device && !td_write(td)) {
191 fio_file_clear_size_known(f);
192 if (td_io_get_file_size(td, f))
193 goto err;
194 if (f->io_size > f->real_file_size)
195 f->io_size = f->real_file_size;
196 }
197
198 free(b);
199done:
200 return 0;
201err:
202 close(f->fd);
203 f->fd = -1;
204 if (b)
205 free(b);
206 return 1;
207}
208
209static int pre_read_file(struct thread_data *td, struct fio_file *f)
210{
211 int ret = 0, r, did_open = 0, old_runstate;
212 unsigned long long left;
213 unsigned int bs;
214 char *b;
215
216 if (td->io_ops->flags & FIO_PIPEIO)
217 return 0;
218
219 if (!fio_file_open(f)) {
220 if (td->io_ops->open_file(td, f)) {
221 log_err("fio: cannot pre-read, failed to open file\n");
222 return 1;
223 }
224 did_open = 1;
225 }
226
227 old_runstate = td_bump_runstate(td, TD_PRE_READING);
228
229 bs = td->o.max_bs[DDIR_READ];
230 b = malloc(bs);
231 memset(b, 0, bs);
232
233 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
234 td_verror(td, errno, "lseek");
235 log_err("fio: failed to lseek pre-read file\n");
236 ret = 1;
237 goto error;
238 }
239
240 left = f->io_size;
241
242 while (left && !td->terminate) {
243 if (bs > left)
244 bs = left;
245
246 r = read(f->fd, b, bs);
247
248 if (r == (int) bs) {
249 left -= bs;
250 continue;
251 } else {
252 td_verror(td, EIO, "pre_read");
253 break;
254 }
255 }
256
257error:
258 td_restore_runstate(td, old_runstate);
259
260 if (did_open)
261 td->io_ops->close_file(td, f);
262
263 free(b);
264 return ret;
265}
266
267unsigned long long get_rand_file_size(struct thread_data *td)
268{
269 unsigned long long ret, sized;
270 uint64_t frand_max;
271 unsigned long r;
272
273 frand_max = rand_max(&td->file_size_state);
274 r = __rand(&td->file_size_state);
275 sized = td->o.file_size_high - td->o.file_size_low;
276 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
277 ret += td->o.file_size_low;
278 ret -= (ret % td->o.rw_min_bs);
279 return ret;
280}
281
282static int file_size(struct thread_data *td, struct fio_file *f)
283{
284 struct stat st;
285
286 if (stat(f->file_name, &st) == -1) {
287 td_verror(td, errno, "fstat");
288 return 1;
289 }
290
291 f->real_file_size = st.st_size;
292 return 0;
293}
294
295static int bdev_size(struct thread_data *td, struct fio_file *f)
296{
297 unsigned long long bytes = 0;
298 int r;
299
300 if (td->io_ops->open_file(td, f)) {
301 log_err("fio: failed opening blockdev %s for size check\n",
302 f->file_name);
303 return 1;
304 }
305
306 r = blockdev_size(f, &bytes);
307 if (r) {
308 td_verror(td, r, "blockdev_size");
309 goto err;
310 }
311
312 if (!bytes) {
313 log_err("%s: zero sized block device?\n", f->file_name);
314 goto err;
315 }
316
317 f->real_file_size = bytes;
318 td->io_ops->close_file(td, f);
319 return 0;
320err:
321 td->io_ops->close_file(td, f);
322 return 1;
323}
324
325static int char_size(struct thread_data *td, struct fio_file *f)
326{
327#ifdef FIO_HAVE_CHARDEV_SIZE
328 unsigned long long bytes = 0;
329 int r;
330
331 if (td->io_ops->open_file(td, f)) {
332 log_err("fio: failed opening blockdev %s for size check\n",
333 f->file_name);
334 return 1;
335 }
336
337 r = chardev_size(f, &bytes);
338 if (r) {
339 td_verror(td, r, "chardev_size");
340 goto err;
341 }
342
343 if (!bytes) {
344 log_err("%s: zero sized char device?\n", f->file_name);
345 goto err;
346 }
347
348 f->real_file_size = bytes;
349 td->io_ops->close_file(td, f);
350 return 0;
351err:
352 td->io_ops->close_file(td, f);
353 return 1;
354#else
355 f->real_file_size = -1ULL;
356 return 0;
357#endif
358}
359
360static int get_file_size(struct thread_data *td, struct fio_file *f)
361{
362 int ret = 0;
363
364 if (fio_file_size_known(f))
365 return 0;
366
367 if (f->filetype == FIO_TYPE_FILE)
368 ret = file_size(td, f);
369 else if (f->filetype == FIO_TYPE_BD)
370 ret = bdev_size(td, f);
371 else if (f->filetype == FIO_TYPE_CHAR)
372 ret = char_size(td, f);
373 else
374 f->real_file_size = -1;
375
376 if (ret)
377 return ret;
378
379 if (f->file_offset > f->real_file_size) {
380 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
381 (unsigned long long) f->file_offset,
382 (unsigned long long) f->real_file_size);
383 return 1;
384 }
385
386 fio_file_set_size_known(f);
387 return 0;
388}
389
390static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
391 unsigned long long off,
392 unsigned long long len)
393{
394 int errval = 0, ret = 0;
395
396#ifdef CONFIG_ESX
397 return 0;
398#endif
399
400 if (len == -1ULL)
401 len = f->io_size;
402 if (off == -1ULL)
403 off = f->file_offset;
404
405 if (len == -1ULL || off == -1ULL)
406 return 0;
407
408 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
409 len);
410
411 if (td->io_ops->invalidate) {
412 ret = td->io_ops->invalidate(td, f);
413 if (ret < 0)
414 errval = ret;
415 } else if (f->filetype == FIO_TYPE_FILE) {
416 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
417 if (ret)
418 errval = ret;
419 } else if (f->filetype == FIO_TYPE_BD) {
420 int retry_count = 0;
421
422 ret = blockdev_invalidate_cache(f);
423 while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
424 /*
425 * Linux multipath devices reject ioctl while
426 * the maps are being updated. That window can
427 * last tens of milliseconds; we'll try up to
428 * a quarter of a second.
429 */
430 usleep(10000);
431 ret = blockdev_invalidate_cache(f);
432 }
433 if (ret < 0 && errno == EACCES && geteuid()) {
434 if (!root_warn) {
435 log_err("fio: only root may flush block "
436 "devices. Cache flush bypassed!\n");
437 root_warn = 1;
438 }
439 ret = 0;
440 }
441 if (ret < 0)
442 errval = errno;
443 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
444 ret = 0;
445
446 /*
447 * Cache flushing isn't a fatal condition, and we know it will
448 * happen on some platforms where we don't have the proper
449 * function to flush eg block device caches. So just warn and
450 * continue on our way.
451 */
452 if (errval)
453 log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errval));
454
455 return 0;
456
457}
458
459int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
460{
461 if (!fio_file_open(f))
462 return 0;
463
464 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
465}
466
467int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
468{
469 int ret = 0;
470
471 dprint(FD_FILE, "fd close %s\n", f->file_name);
472
473 remove_file_hash(f);
474
475 if (close(f->fd) < 0)
476 ret = errno;
477
478 f->fd = -1;
479
480 if (f->shadow_fd != -1) {
481 close(f->shadow_fd);
482 f->shadow_fd = -1;
483 }
484
485 f->engine_data = 0;
486 return ret;
487}
488
489int file_lookup_open(struct fio_file *f, int flags)
490{
491 struct fio_file *__f;
492 int from_hash;
493
494 __f = lookup_file_hash(f->file_name);
495 if (__f) {
496 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
497 /*
498 * racy, need the __f->lock locked
499 */
500 f->lock = __f->lock;
501 from_hash = 1;
502 } else {
503 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
504 from_hash = 0;
505 }
506
507#ifdef WIN32
508 flags |= _O_BINARY;
509#endif
510
511 f->fd = open(f->file_name, flags, 0600);
512 return from_hash;
513}
514
515static int file_close_shadow_fds(struct thread_data *td)
516{
517 struct fio_file *f;
518 int num_closed = 0;
519 unsigned int i;
520
521 for_each_file(td, f, i) {
522 if (f->shadow_fd == -1)
523 continue;
524
525 close(f->shadow_fd);
526 f->shadow_fd = -1;
527 num_closed++;
528 }
529
530 return num_closed;
531}
532
533int generic_open_file(struct thread_data *td, struct fio_file *f)
534{
535 int is_std = 0;
536 int flags = 0;
537 int from_hash = 0;
538
539 dprint(FD_FILE, "fd open %s\n", f->file_name);
540
541 if (!strcmp(f->file_name, "-")) {
542 if (td_rw(td)) {
543 log_err("fio: can't read/write to stdin/out\n");
544 return 1;
545 }
546 is_std = 1;
547
548 /*
549 * move output logging to stderr, if we are writing to stdout
550 */
551 if (td_write(td))
552 f_out = stderr;
553 }
554
555 if (td_trim(td))
556 goto skip_flags;
557 if (td->o.odirect)
558 flags |= OS_O_DIRECT;
559 if (td->o.oatomic) {
560 if (!FIO_O_ATOMIC) {
561 td_verror(td, EINVAL, "OS does not support atomic IO");
562 return 1;
563 }
564 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
565 }
566 if (td->o.sync_io)
567 flags |= O_SYNC;
568 if (td->o.create_on_open && td->o.allow_create)
569 flags |= O_CREAT;
570skip_flags:
571 if (f->filetype != FIO_TYPE_FILE)
572 flags |= FIO_O_NOATIME;
573
574open_again:
575 if (td_write(td)) {
576 if (!read_only)
577 flags |= O_RDWR;
578
579 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
580 flags |= O_CREAT;
581
582 if (is_std)
583 f->fd = dup(STDOUT_FILENO);
584 else
585 from_hash = file_lookup_open(f, flags);
586 } else if (td_read(td)) {
587 if (f->filetype == FIO_TYPE_CHAR && !read_only)
588 flags |= O_RDWR;
589 else
590 flags |= O_RDONLY;
591
592 if (is_std)
593 f->fd = dup(STDIN_FILENO);
594 else
595 from_hash = file_lookup_open(f, flags);
596 } else { //td trim
597 flags |= O_RDWR;
598 from_hash = file_lookup_open(f, flags);
599 }
600
601 if (f->fd == -1) {
602 char buf[FIO_VERROR_SIZE];
603 int __e = errno;
604
605 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
606 flags &= ~FIO_O_NOATIME;
607 goto open_again;
608 }
609 if (__e == EMFILE && file_close_shadow_fds(td))
610 goto open_again;
611
612 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
613
614 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
615 log_err("fio: looks like your file system does not " \
616 "support direct=1/buffered=0\n");
617 }
618
619 td_verror(td, __e, buf);
620 return 1;
621 }
622
623 if (!from_hash && f->fd != -1) {
624 if (add_file_hash(f)) {
625 int fio_unused ret;
626
627 /*
628 * Stash away descriptor for later close. This is to
629 * work-around a "feature" on Linux, where a close of
630 * an fd that has been opened for write will trigger
631 * udev to call blkid to check partitions, fs id, etc.
632 * That pollutes the device cache, which can slow down
633 * unbuffered accesses.
634 */
635 if (f->shadow_fd == -1)
636 f->shadow_fd = f->fd;
637 else {
638 /*
639 * OK to ignore, we haven't done anything
640 * with it
641 */
642 ret = generic_close_file(td, f);
643 }
644 goto open_again;
645 }
646 }
647
648 return 0;
649}
650
651int generic_get_file_size(struct thread_data *td, struct fio_file *f)
652{
653 return get_file_size(td, f);
654}
655
656/*
657 * open/close all files, so that ->real_file_size gets set
658 */
659static int get_file_sizes(struct thread_data *td)
660{
661 struct fio_file *f;
662 unsigned int i;
663 int err = 0;
664
665 for_each_file(td, f, i) {
666 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
667 f->file_name);
668
669 if (td_io_get_file_size(td, f)) {
670 if (td->error != ENOENT) {
671 log_err("%s\n", td->verror);
672 err = 1;
673 break;
674 }
675 clear_error(td);
676 }
677
678 if (f->real_file_size == -1ULL && td->o.size)
679 f->real_file_size = td->o.size / td->o.nr_files;
680 }
681
682 return err;
683}
684
685struct fio_mount {
686 struct flist_head list;
687 const char *base;
688 char __base[256];
689 unsigned int key;
690};
691
692/*
693 * Get free number of bytes for each file on each unique mount.
694 */
695static unsigned long long get_fs_free_counts(struct thread_data *td)
696{
697 struct flist_head *n, *tmp;
698 unsigned long long ret = 0;
699 struct fio_mount *fm;
700 FLIST_HEAD(list);
701 struct fio_file *f;
702 unsigned int i;
703
704 for_each_file(td, f, i) {
705 struct stat sb;
706 char buf[256];
707
708 if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
709 if (f->real_file_size != -1ULL)
710 ret += f->real_file_size;
711 continue;
712 } else if (f->filetype != FIO_TYPE_FILE)
713 continue;
714
715 buf[255] = '\0';
716 strncpy(buf, f->file_name, 255);
717
718 if (stat(buf, &sb) < 0) {
719 if (errno != ENOENT)
720 break;
721 strcpy(buf, ".");
722 if (stat(buf, &sb) < 0)
723 break;
724 }
725
726 fm = NULL;
727 flist_for_each(n, &list) {
728 fm = flist_entry(n, struct fio_mount, list);
729 if (fm->key == sb.st_dev)
730 break;
731
732 fm = NULL;
733 }
734
735 if (fm)
736 continue;
737
738 fm = calloc(1, sizeof(*fm));
739 strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
740 fm->base = basename(fm->__base);
741 fm->key = sb.st_dev;
742 flist_add(&fm->list, &list);
743 }
744
745 flist_for_each_safe(n, tmp, &list) {
746 unsigned long long sz;
747
748 fm = flist_entry(n, struct fio_mount, list);
749 flist_del(&fm->list);
750
751 sz = get_fs_free_size(fm->base);
752 if (sz && sz != -1ULL)
753 ret += sz;
754
755 free(fm);
756 }
757
758 return ret;
759}
760
761uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
762{
763 struct thread_options *o = &td->o;
764 uint64_t offset;
765
766 if (o->file_append && f->filetype == FIO_TYPE_FILE)
767 return f->real_file_size;
768
769 offset = td->o.start_offset + td->subjob_number * td->o.offset_increment;
770 if (offset % td_max_bs(td))
771 offset -= (offset % td_max_bs(td));
772
773 return offset;
774}
775
776/*
777 * Open the files and setup files sizes, creating files if necessary.
778 */
779int setup_files(struct thread_data *td)
780{
781 unsigned long long total_size, extend_size;
782 struct thread_options *o = &td->o;
783 struct fio_file *f;
784 unsigned int i, nr_fs_extra = 0;
785 int err = 0, need_extend;
786 int old_state;
787 const unsigned int bs = td_min_bs(td);
788 uint64_t fs = 0;
789
790 dprint(FD_FILE, "setup files\n");
791
792 old_state = td_bump_runstate(td, TD_SETTING_UP);
793
794 if (o->read_iolog_file)
795 goto done;
796
797 /*
798 * if ioengine defines a setup() method, it's responsible for
799 * opening the files and setting f->real_file_size to indicate
800 * the valid range for that file.
801 */
802 if (td->io_ops->setup)
803 err = td->io_ops->setup(td);
804 else
805 err = get_file_sizes(td);
806
807 if (err)
808 goto err_out;
809
810 /*
811 * check sizes. if the files/devices do not exist and the size
812 * isn't passed to fio, abort.
813 */
814 total_size = 0;
815 for_each_file(td, f, i) {
816 f->fileno = i;
817 if (f->real_file_size == -1ULL)
818 total_size = -1ULL;
819 else
820 total_size += f->real_file_size;
821 }
822
823 if (o->fill_device)
824 td->fill_device_size = get_fs_free_counts(td);
825
826 /*
827 * device/file sizes are zero and no size given, punt
828 */
829 if ((!total_size || total_size == -1ULL) && !o->size &&
830 !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
831 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
832 log_err("%s: you need to specify size=\n", o->name);
833 td_verror(td, EINVAL, "total_file_size");
834 goto err_out;
835 }
836
837 /*
838 * Calculate per-file size and potential extra size for the
839 * first files, if needed.
840 */
841 if (!o->file_size_low && o->nr_files) {
842 uint64_t all_fs;
843
844 fs = o->size / o->nr_files;
845 all_fs = fs * o->nr_files;
846
847 if (all_fs < o->size)
848 nr_fs_extra = (o->size - all_fs) / bs;
849 }
850
851 /*
852 * now file sizes are known, so we can set ->io_size. if size= is
853 * not given, ->io_size is just equal to ->real_file_size. if size
854 * is given, ->io_size is size / nr_files.
855 */
856 extend_size = total_size = 0;
857 need_extend = 0;
858 for_each_file(td, f, i) {
859 f->file_offset = get_start_offset(td, f);
860
861 if (!o->file_size_low) {
862 /*
863 * no file size range given, file size is equal to
864 * total size divided by number of files. If that is
865 * zero, set it to the real file size. If the size
866 * doesn't divide nicely with the min blocksize,
867 * make the first files bigger.
868 */
869 f->io_size = fs;
870 if (nr_fs_extra) {
871 nr_fs_extra--;
872 f->io_size += bs;
873 }
874
875 if (!f->io_size)
876 f->io_size = f->real_file_size - f->file_offset;
877 } else if (f->real_file_size < o->file_size_low ||
878 f->real_file_size > o->file_size_high) {
879 if (f->file_offset > o->file_size_low)
880 goto err_offset;
881 /*
882 * file size given. if it's fixed, use that. if it's a
883 * range, generate a random size in-between.
884 */
885 if (o->file_size_low == o->file_size_high)
886 f->io_size = o->file_size_low - f->file_offset;
887 else {
888 f->io_size = get_rand_file_size(td)
889 - f->file_offset;
890 }
891 } else
892 f->io_size = f->real_file_size - f->file_offset;
893
894 if (f->io_size == -1ULL)
895 total_size = -1ULL;
896 else {
897 if (o->size_percent)
898 f->io_size = (f->io_size * o->size_percent) / 100;
899 total_size += f->io_size;
900 }
901
902 if (f->filetype == FIO_TYPE_FILE &&
903 (f->io_size + f->file_offset) > f->real_file_size &&
904 !(td->io_ops->flags & FIO_DISKLESSIO)) {
905 if (!o->create_on_open) {
906 need_extend++;
907 extend_size += (f->io_size + f->file_offset);
908 } else
909 f->real_file_size = f->io_size + f->file_offset;
910 fio_file_set_extend(f);
911 }
912 }
913
914 if (td->o.block_error_hist) {
915 int len;
916
917 assert(td->o.nr_files == 1); /* checked in fixup_options */
918 f = td->files[0];
919 len = f->io_size / td->o.bs[DDIR_TRIM];
920 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
921 log_err("fio: cannot calculate block histogram with "
922 "%d trim blocks, maximum %d\n",
923 len, MAX_NR_BLOCK_INFOS);
924 td_verror(td, EINVAL, "block_error_hist");
925 goto err_out;
926 }
927
928 td->ts.nr_block_infos = len;
929 for (i = 0; i < len; i++)
930 td->ts.block_infos[i] =
931 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
932 } else
933 td->ts.nr_block_infos = 0;
934
935 if (!o->size || (total_size && o->size > total_size))
936 o->size = total_size;
937
938 if (o->size < td_min_bs(td)) {
939 log_err("fio: blocksize too large for data set\n");
940 goto err_out;
941 }
942
943 /*
944 * See if we need to extend some files
945 */
946 if (need_extend) {
947 temp_stall_ts = 1;
948 if (output_format & FIO_OUTPUT_NORMAL)
949 log_info("%s: Laying out IO file(s) (%u file(s) /"
950 " %lluMB)\n", o->name, need_extend,
951 extend_size >> 20);
952
953 for_each_file(td, f, i) {
954 unsigned long long old_len = -1ULL, extend_len = -1ULL;
955
956 if (!fio_file_extend(f))
957 continue;
958
959 assert(f->filetype == FIO_TYPE_FILE);
960 fio_file_clear_extend(f);
961 if (!o->fill_device) {
962 old_len = f->real_file_size;
963 extend_len = f->io_size + f->file_offset -
964 old_len;
965 }
966 f->real_file_size = (f->io_size + f->file_offset);
967 err = extend_file(td, f);
968 if (err)
969 break;
970
971 err = __file_invalidate_cache(td, f, old_len,
972 extend_len);
973
974 /*
975 * Shut up static checker
976 */
977 if (f->fd != -1)
978 close(f->fd);
979
980 f->fd = -1;
981 if (err)
982 break;
983 }
984 temp_stall_ts = 0;
985 }
986
987 if (err)
988 goto err_out;
989
990 if (!o->zone_size)
991 o->zone_size = o->size;
992
993 /*
994 * iolog already set the total io size, if we read back
995 * stored entries.
996 */
997 if (!o->read_iolog_file) {
998 if (o->io_limit)
999 td->total_io_size = o->io_limit * o->loops;
1000 else
1001 td->total_io_size = o->size * o->loops;
1002 }
1003
1004done:
1005 if (o->create_only)
1006 td->done = 1;
1007
1008 td_restore_runstate(td, old_state);
1009 return 0;
1010err_offset:
1011 log_err("%s: you need to specify valid offset=\n", o->name);
1012err_out:
1013 td_restore_runstate(td, old_state);
1014 return 1;
1015}
1016
1017int pre_read_files(struct thread_data *td)
1018{
1019 struct fio_file *f;
1020 unsigned int i;
1021
1022 dprint(FD_FILE, "pre_read files\n");
1023
1024 for_each_file(td, f, i) {
1025 pre_read_file(td, f);
1026 }
1027
1028 return 1;
1029}
1030
1031static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1032{
1033 unsigned int range_size, seed;
1034 unsigned long nranges;
1035 uint64_t fsize;
1036
1037 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1038 fsize = min(f->real_file_size, f->io_size);
1039
1040 nranges = (fsize + range_size - 1) / range_size;
1041
1042 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1043 if (!td->o.rand_repeatable)
1044 seed = td->rand_seeds[4];
1045
1046 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1047 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1048 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1049 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1050 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1051 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1052
1053 return 1;
1054}
1055
1056static int init_rand_distribution(struct thread_data *td)
1057{
1058 struct fio_file *f;
1059 unsigned int i;
1060 int state;
1061
1062 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1063 return 0;
1064
1065 state = td_bump_runstate(td, TD_SETTING_UP);
1066
1067 for_each_file(td, f, i)
1068 __init_rand_distribution(td, f);
1069
1070 td_restore_runstate(td, state);
1071
1072 return 1;
1073}
1074
1075/*
1076 * Check if the number of blocks exceeds the randomness capability of
1077 * the selected generator. Tausworthe is 32-bit, the others are fullly
1078 * 64-bit capable.
1079 */
1080static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1081 uint64_t blocks)
1082{
1083 if (blocks <= FRAND32_MAX)
1084 return 0;
1085 if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1086 return 0;
1087
1088 /*
1089 * If the user hasn't specified a random generator, switch
1090 * to tausworthe64 with informational warning. If the user did
1091 * specify one, just warn.
1092 */
1093 log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1094 f->file_name);
1095
1096 if (!fio_option_is_set(&td->o, random_generator)) {
1097 log_info("fio: Switching to tausworthe64. Use the "
1098 "random_generator= option to get rid of this "
1099 " warning.\n");
1100 td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1101 return 0;
1102 }
1103
1104 /*
1105 * Just make this information to avoid breaking scripts.
1106 */
1107 log_info("fio: Use the random_generator= option to switch to lfsr or "
1108 "tausworthe64.\n");
1109 return 0;
1110}
1111
1112int init_random_map(struct thread_data *td)
1113{
1114 unsigned long long blocks;
1115 struct fio_file *f;
1116 unsigned int i;
1117
1118 if (init_rand_distribution(td))
1119 return 0;
1120 if (!td_random(td))
1121 return 0;
1122
1123 for_each_file(td, f, i) {
1124 uint64_t fsize = min(f->real_file_size, f->io_size);
1125
1126 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1127
1128 if (check_rand_gen_limits(td, f, blocks))
1129 return 1;
1130
1131 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1132 unsigned long seed;
1133
1134 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1135
1136 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1137 fio_file_set_lfsr(f);
1138 continue;
1139 }
1140 } else if (!td->o.norandommap) {
1141 f->io_axmap = axmap_new(blocks);
1142 if (f->io_axmap) {
1143 fio_file_set_axmap(f);
1144 continue;
1145 }
1146 } else if (td->o.norandommap)
1147 continue;
1148
1149 if (!td->o.softrandommap) {
1150 log_err("fio: failed allocating random map. If running"
1151 " a large number of jobs, try the 'norandommap'"
1152 " option or set 'softrandommap'. Or give"
1153 " a larger --alloc-size to fio.\n");
1154 return 1;
1155 }
1156
1157 log_info("fio: file %s failed allocating random map. Running "
1158 "job without.\n", f->file_name);
1159 }
1160
1161 return 0;
1162}
1163
1164void close_files(struct thread_data *td)
1165{
1166 struct fio_file *f;
1167 unsigned int i;
1168
1169 for_each_file(td, f, i) {
1170 if (fio_file_open(f))
1171 td_io_close_file(td, f);
1172 }
1173}
1174
1175void close_and_free_files(struct thread_data *td)
1176{
1177 struct fio_file *f;
1178 unsigned int i;
1179
1180 dprint(FD_FILE, "close files\n");
1181
1182 for_each_file(td, f, i) {
1183 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1184 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1185 td_io_unlink_file(td, f);
1186 }
1187
1188 if (fio_file_open(f))
1189 td_io_close_file(td, f);
1190
1191 remove_file_hash(f);
1192
1193 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1194 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1195 td_io_unlink_file(td, f);
1196 }
1197
1198 sfree(f->file_name);
1199 f->file_name = NULL;
1200 if (fio_file_axmap(f)) {
1201 axmap_free(f->io_axmap);
1202 f->io_axmap = NULL;
1203 }
1204 sfree(f);
1205 }
1206
1207 td->o.filename = NULL;
1208 free(td->files);
1209 free(td->file_locks);
1210 td->files_index = 0;
1211 td->files = NULL;
1212 td->file_locks = NULL;
1213 td->o.file_lock_mode = FILE_LOCK_NONE;
1214 td->o.nr_files = 0;
1215}
1216
1217static void get_file_type(struct fio_file *f)
1218{
1219 struct stat sb;
1220
1221 if (!strcmp(f->file_name, "-"))
1222 f->filetype = FIO_TYPE_PIPE;
1223 else
1224 f->filetype = FIO_TYPE_FILE;
1225
1226 /* \\.\ is the device namespace in Windows, where every file is
1227 * a block device */
1228 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1229 f->filetype = FIO_TYPE_BD;
1230
1231 if (!stat(f->file_name, &sb)) {
1232 if (S_ISBLK(sb.st_mode))
1233 f->filetype = FIO_TYPE_BD;
1234 else if (S_ISCHR(sb.st_mode))
1235 f->filetype = FIO_TYPE_CHAR;
1236 else if (S_ISFIFO(sb.st_mode))
1237 f->filetype = FIO_TYPE_PIPE;
1238 }
1239}
1240
1241static int __is_already_allocated(const char *fname)
1242{
1243 struct flist_head *entry;
1244 char *filename;
1245
1246 if (flist_empty(&filename_list))
1247 return 0;
1248
1249 flist_for_each(entry, &filename_list) {
1250 filename = flist_entry(entry, struct file_name, list)->filename;
1251
1252 if (strcmp(filename, fname) == 0)
1253 return 1;
1254 }
1255
1256 return 0;
1257}
1258
1259static int is_already_allocated(const char *fname)
1260{
1261 int ret;
1262
1263 fio_file_hash_lock();
1264 ret = __is_already_allocated(fname);
1265 fio_file_hash_unlock();
1266 return ret;
1267}
1268
1269static void set_already_allocated(const char *fname)
1270{
1271 struct file_name *fn;
1272
1273 fn = malloc(sizeof(struct file_name));
1274 fn->filename = strdup(fname);
1275
1276 fio_file_hash_lock();
1277 if (!__is_already_allocated(fname)) {
1278 flist_add_tail(&fn->list, &filename_list);
1279 fn = NULL;
1280 }
1281 fio_file_hash_unlock();
1282
1283 if (fn) {
1284 free(fn->filename);
1285 free(fn);
1286 }
1287}
1288
1289
1290static void free_already_allocated(void)
1291{
1292 struct flist_head *entry, *tmp;
1293 struct file_name *fn;
1294
1295 if (flist_empty(&filename_list))
1296 return;
1297
1298 fio_file_hash_lock();
1299 flist_for_each_safe(entry, tmp, &filename_list) {
1300 fn = flist_entry(entry, struct file_name, list);
1301 free(fn->filename);
1302 flist_del(&fn->list);
1303 free(fn);
1304 }
1305
1306 fio_file_hash_unlock();
1307}
1308
1309static struct fio_file *alloc_new_file(struct thread_data *td)
1310{
1311 struct fio_file *f;
1312
1313 f = smalloc(sizeof(*f));
1314 if (!f) {
1315 log_err("fio: smalloc OOM\n");
1316 assert(0);
1317 return NULL;
1318 }
1319
1320 f->fd = -1;
1321 f->shadow_fd = -1;
1322 fio_file_reset(td, f);
1323 return f;
1324}
1325
1326int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1327{
1328 int cur_files = td->files_index;
1329 char file_name[PATH_MAX];
1330 struct fio_file *f;
1331 int len = 0;
1332
1333 dprint(FD_FILE, "add file %s\n", fname);
1334
1335 if (td->o.directory)
1336 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob);
1337
1338 sprintf(file_name + len, "%s", fname);
1339
1340 /* clean cloned siblings using existing files */
1341 if (numjob && is_already_allocated(file_name))
1342 return 0;
1343
1344 f = alloc_new_file(td);
1345
1346 if (td->files_size <= td->files_index) {
1347 unsigned int new_size = td->o.nr_files + 1;
1348
1349 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1350
1351 td->files = realloc(td->files, new_size * sizeof(f));
1352 if (td->files == NULL) {
1353 log_err("fio: realloc OOM\n");
1354 assert(0);
1355 }
1356 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1357 td->file_locks = realloc(td->file_locks, new_size);
1358 if (!td->file_locks) {
1359 log_err("fio: realloc OOM\n");
1360 assert(0);
1361 }
1362 td->file_locks[cur_files] = FILE_LOCK_NONE;
1363 }
1364 td->files_size = new_size;
1365 }
1366 td->files[cur_files] = f;
1367 f->fileno = cur_files;
1368
1369 /*
1370 * init function, io engine may not be loaded yet
1371 */
1372 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1373 f->real_file_size = -1ULL;
1374
1375 f->file_name = smalloc_strdup(file_name);
1376 if (!f->file_name) {
1377 log_err("fio: smalloc OOM\n");
1378 assert(0);
1379 }
1380
1381 get_file_type(f);
1382
1383 switch (td->o.file_lock_mode) {
1384 case FILE_LOCK_NONE:
1385 break;
1386 case FILE_LOCK_READWRITE:
1387 f->rwlock = fio_rwlock_init();
1388 break;
1389 case FILE_LOCK_EXCLUSIVE:
1390 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1391 break;
1392 default:
1393 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1394 assert(0);
1395 }
1396
1397 td->files_index++;
1398 if (f->filetype == FIO_TYPE_FILE)
1399 td->nr_normal_files++;
1400
1401 set_already_allocated(file_name);
1402
1403 if (inc)
1404 td->o.nr_files++;
1405
1406 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1407 cur_files);
1408
1409 return cur_files;
1410}
1411
1412int add_file_exclusive(struct thread_data *td, const char *fname)
1413{
1414 struct fio_file *f;
1415 unsigned int i;
1416
1417 for_each_file(td, f, i) {
1418 if (!strcmp(f->file_name, fname))
1419 return i;
1420 }
1421
1422 return add_file(td, fname, 0, 1);
1423}
1424
1425void get_file(struct fio_file *f)
1426{
1427 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1428 assert(fio_file_open(f));
1429 f->references++;
1430}
1431
1432int put_file(struct thread_data *td, struct fio_file *f)
1433{
1434 int f_ret = 0, ret = 0;
1435
1436 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1437
1438 if (!fio_file_open(f)) {
1439 assert(f->fd == -1);
1440 return 0;
1441 }
1442
1443 assert(f->references);
1444 if (--f->references)
1445 return 0;
1446
1447 if (should_fsync(td) && td->o.fsync_on_close) {
1448 f_ret = fsync(f->fd);
1449 if (f_ret < 0)
1450 f_ret = errno;
1451 }
1452
1453 if (td->io_ops->close_file)
1454 ret = td->io_ops->close_file(td, f);
1455
1456 if (!ret)
1457 ret = f_ret;
1458
1459 td->nr_open_files--;
1460 fio_file_clear_open(f);
1461 assert(f->fd == -1);
1462 return ret;
1463}
1464
1465void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1466{
1467 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1468 return;
1469
1470 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1471 if (ddir == DDIR_READ)
1472 fio_rwlock_read(f->rwlock);
1473 else
1474 fio_rwlock_write(f->rwlock);
1475 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1476 fio_mutex_down(f->lock);
1477
1478 td->file_locks[f->fileno] = td->o.file_lock_mode;
1479}
1480
1481void unlock_file(struct thread_data *td, struct fio_file *f)
1482{
1483 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1484 return;
1485
1486 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1487 fio_rwlock_unlock(f->rwlock);
1488 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1489 fio_mutex_up(f->lock);
1490
1491 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1492}
1493
1494void unlock_file_all(struct thread_data *td, struct fio_file *f)
1495{
1496 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1497 return;
1498 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1499 unlock_file(td, f);
1500}
1501
1502static int recurse_dir(struct thread_data *td, const char *dirname)
1503{
1504 struct dirent *dir;
1505 int ret = 0;
1506 DIR *D;
1507
1508 D = opendir(dirname);
1509 if (!D) {
1510 char buf[FIO_VERROR_SIZE];
1511
1512 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1513 td_verror(td, errno, buf);
1514 return 1;
1515 }
1516
1517 while ((dir = readdir(D)) != NULL) {
1518 char full_path[PATH_MAX];
1519 struct stat sb;
1520
1521 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1522 continue;
1523
1524 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1525
1526 if (lstat(full_path, &sb) == -1) {
1527 if (errno != ENOENT) {
1528 td_verror(td, errno, "stat");
1529 ret = 1;
1530 break;
1531 }
1532 }
1533
1534 if (S_ISREG(sb.st_mode)) {
1535 add_file(td, full_path, 0, 1);
1536 continue;
1537 }
1538 if (!S_ISDIR(sb.st_mode))
1539 continue;
1540
1541 ret = recurse_dir(td, full_path);
1542 if (ret)
1543 break;
1544 }
1545
1546 closedir(D);
1547 return ret;
1548}
1549
1550int add_dir_files(struct thread_data *td, const char *path)
1551{
1552 int ret = recurse_dir(td, path);
1553
1554 if (!ret)
1555 log_info("fio: opendir added %d files\n", td->o.nr_files);
1556
1557 return ret;
1558}
1559
1560void dup_files(struct thread_data *td, struct thread_data *org)
1561{
1562 struct fio_file *f;
1563 unsigned int i;
1564
1565 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1566
1567 if (!org->files)
1568 return;
1569
1570 td->files = malloc(org->files_index * sizeof(f));
1571
1572 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1573 td->file_locks = malloc(org->files_index);
1574
1575 for_each_file(org, f, i) {
1576 struct fio_file *__f;
1577
1578 __f = alloc_new_file(td);
1579
1580 if (f->file_name) {
1581 __f->file_name = smalloc_strdup(f->file_name);
1582 if (!__f->file_name) {
1583 log_err("fio: smalloc OOM\n");
1584 assert(0);
1585 }
1586
1587 __f->filetype = f->filetype;
1588 }
1589
1590 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1591 __f->lock = f->lock;
1592 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1593 __f->rwlock = f->rwlock;
1594
1595 td->files[i] = __f;
1596 }
1597}
1598
1599/*
1600 * Returns the index that matches the filename, or -1 if not there
1601 */
1602int get_fileno(struct thread_data *td, const char *fname)
1603{
1604 struct fio_file *f;
1605 unsigned int i;
1606
1607 for_each_file(td, f, i)
1608 if (!strcmp(f->file_name, fname))
1609 return i;
1610
1611 return -1;
1612}
1613
1614/*
1615 * For log usage, where we add/open/close files automatically
1616 */
1617void free_release_files(struct thread_data *td)
1618{
1619 close_files(td);
1620 td->o.nr_files = 0;
1621 td->o.open_files = 0;
1622 td->files_index = 0;
1623 td->nr_normal_files = 0;
1624}
1625
1626void fio_file_reset(struct thread_data *td, struct fio_file *f)
1627{
1628 int i;
1629
1630 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1631 f->last_pos[i] = f->file_offset;
1632 f->last_start[i] = -1ULL;
1633 }
1634
1635 if (fio_file_axmap(f))
1636 axmap_reset(f->io_axmap);
1637 else if (fio_file_lfsr(f))
1638 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1639}
1640
1641int fio_files_done(struct thread_data *td)
1642{
1643 struct fio_file *f;
1644 unsigned int i;
1645
1646 for_each_file(td, f, i)
1647 if (!fio_file_done(f))
1648 return 0;
1649
1650 return 1;
1651}
1652
1653/* free memory used in initialization phase only */
1654void filesetup_mem_free(void)
1655{
1656 free_already_allocated();
1657}