15 #include "lib/axmap.h"
19 #ifdef CONFIG_LINUX_FALLOCATE
20 #include <linux/falloc.h>
23 static FLIST_HEAD(filename_list);
26 * List entry for filename_list
29 struct flist_head list;
33 static inline void clear_error(struct thread_data *td)
39 static int native_fallocate(struct thread_data *td, struct fio_file *f)
43 success = fio_fallocate(f, 0, f->real_file_size);
44 dprint(FD_FILE, "native fallocate of file %s size %llu was "
45 "%ssuccessful\n", f->file_name,
46 (unsigned long long) f->real_file_size,
53 dprint(FD_FILE, "native fallocate is not implemented\n");
58 static void fallocate_file(struct thread_data *td, struct fio_file *f)
60 if (td->o.fill_device)
63 switch (td->o.fallocate_mode) {
64 case FIO_FALLOCATE_NATIVE:
65 native_fallocate(td, f);
67 case FIO_FALLOCATE_NONE:
69 #ifdef CONFIG_POSIX_FALLOCATE
70 case FIO_FALLOCATE_POSIX: {
73 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
75 (unsigned long long) f->real_file_size);
77 r = posix_fallocate(f->fd, 0, f->real_file_size);
79 log_err("fio: posix_fallocate fails: %s\n", strerror(r));
82 #endif /* CONFIG_POSIX_FALLOCATE */
83 #ifdef CONFIG_LINUX_FALLOCATE
84 case FIO_FALLOCATE_KEEP_SIZE: {
87 dprint(FD_FILE, "fallocate(FALLOC_FL_KEEP_SIZE) "
88 "file %s size %llu\n", f->file_name,
89 (unsigned long long) f->real_file_size);
91 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0, f->real_file_size);
93 td_verror(td, errno, "fallocate");
97 #endif /* CONFIG_LINUX_FALLOCATE */
98 case FIO_FALLOCATE_TRUNCATE: {
101 dprint(FD_FILE, "ftruncate file %s size %llu\n",
103 (unsigned long long) f->real_file_size);
104 r = ftruncate(f->fd, f->real_file_size);
106 td_verror(td, errno, "ftruncate");
111 log_err("fio: unknown fallocate mode: %d\n", td->o.fallocate_mode);
117 * Leaves f->fd open on success, caller must close
119 static int extend_file(struct thread_data *td, struct fio_file *f)
121 int new_layout = 0, unlink_file = 0, flags;
122 unsigned long long left;
123 unsigned long long bs;
127 log_err("fio: refusing extend of file due to read-only\n");
132 * check if we need to lay the file out complete again. fio
133 * does that for operations involving reads, or for writes
134 * where overwrite is set
137 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
138 (td_write(td) && td_ioengine_flagged(td, FIO_NOEXTEND)))
140 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
143 if (unlink_file || new_layout) {
146 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
148 ret = td_io_unlink_file(td, f);
149 if (ret != 0 && ret != ENOENT) {
150 td_verror(td, errno, "unlink");
156 if (td->o.allow_create)
165 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
166 f->fd = open(f->file_name, flags, 0644);
170 if (err == ENOENT && !td->o.allow_create)
171 log_err("fio: file creation disallowed by "
172 "allow_file_create=0\n");
174 td_verror(td, err, "open");
178 fallocate_file(td, f);
181 * If our jobs don't require regular files initially, we're done.
187 * The size will be -1ULL when fill_device is used, so don't truncate
188 * or fallocate this file, just write it
190 if (!td->o.fill_device) {
191 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
192 (unsigned long long) f->real_file_size);
193 if (ftruncate(f->fd, f->real_file_size) == -1) {
194 if (errno != EFBIG) {
195 td_verror(td, errno, "ftruncate");
201 left = f->real_file_size;
202 bs = td->o.max_bs[DDIR_WRITE];
208 td_verror(td, errno, "malloc");
212 while (left && !td->terminate) {
218 fill_io_buffer(td, b, bs, bs);
220 r = write(f->fd, b, bs);
230 if (td->o.fill_device)
232 log_info("fio: ENOSPC on laying out "
235 td_verror(td, errno, "write");
237 td_verror(td, EIO, "write");
244 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
245 td_io_unlink_file(td, f);
246 } else if (td->o.create_fsync) {
247 if (fsync(f->fd) < 0) {
248 td_verror(td, errno, "fsync");
252 if (td->o.fill_device && !td_write(td)) {
253 fio_file_clear_size_known(f);
254 if (td_io_get_file_size(td, f))
256 if (f->io_size > f->real_file_size)
257 f->io_size = f->real_file_size;
271 static bool pre_read_file(struct thread_data *td, struct fio_file *f)
273 int r, did_open = 0, old_runstate;
274 unsigned long long left;
275 unsigned long long bs;
279 if (td_ioengine_flagged(td, FIO_PIPEIO) ||
280 td_ioengine_flagged(td, FIO_NOIO))
283 if (f->filetype == FIO_TYPE_CHAR)
286 if (!fio_file_open(f)) {
287 if (td->io_ops->open_file(td, f)) {
288 log_err("fio: cannot pre-read, failed to open file\n");
294 old_runstate = td_bump_runstate(td, TD_PRE_READING);
297 bs = td->o.max_bs[DDIR_READ];
303 td_verror(td, errno, "malloc");
309 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
310 td_verror(td, errno, "lseek");
311 log_err("fio: failed to lseek pre-read file\n");
316 while (left && !td->terminate) {
320 r = read(f->fd, b, bs);
326 td_verror(td, EIO, "pre_read");
332 td_restore_runstate(td, old_runstate);
335 td->io_ops->close_file(td, f);
341 unsigned long long get_rand_file_size(struct thread_data *td)
343 unsigned long long ret, sized;
347 frand_max = rand_max(&td->file_size_state);
348 r = __rand(&td->file_size_state);
349 sized = td->o.file_size_high - td->o.file_size_low;
350 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
351 ret += td->o.file_size_low;
352 ret -= (ret % td->o.rw_min_bs);
356 static int file_size(struct thread_data *td, struct fio_file *f)
360 if (stat(f->file_name, &st) == -1) {
361 td_verror(td, errno, "fstat");
365 f->real_file_size = st.st_size;
369 static int bdev_size(struct thread_data *td, struct fio_file *f)
371 unsigned long long bytes = 0;
374 if (td->io_ops->open_file(td, f)) {
375 log_err("fio: failed opening blockdev %s for size check\n",
380 r = blockdev_size(f, &bytes);
382 td_verror(td, r, "blockdev_size");
387 log_err("%s: zero sized block device?\n", f->file_name);
391 f->real_file_size = bytes;
392 td->io_ops->close_file(td, f);
395 td->io_ops->close_file(td, f);
399 static int char_size(struct thread_data *td, struct fio_file *f)
401 #ifdef FIO_HAVE_CHARDEV_SIZE
402 unsigned long long bytes = 0;
405 if (td->io_ops->open_file(td, f)) {
406 log_err("fio: failed opening chardev %s for size check\n",
411 r = chardev_size(f, &bytes);
413 td_verror(td, r, "chardev_size");
418 log_err("%s: zero sized char device?\n", f->file_name);
422 f->real_file_size = bytes;
423 td->io_ops->close_file(td, f);
426 td->io_ops->close_file(td, f);
429 f->real_file_size = -1ULL;
434 static int get_file_size(struct thread_data *td, struct fio_file *f)
438 if (fio_file_size_known(f))
441 if (f->filetype == FIO_TYPE_FILE)
442 ret = file_size(td, f);
443 else if (f->filetype == FIO_TYPE_BLOCK)
444 ret = bdev_size(td, f);
445 else if (f->filetype == FIO_TYPE_CHAR)
446 ret = char_size(td, f);
448 f->real_file_size = -1;
449 log_info("%s: failed to get file size of %s\n", td->o.name,
451 return 1; /* avoid offset extends end error message */
455 * Leave ->real_file_size with 0 since it could be expectation
456 * of initial setup for regular files.
462 * ->file_offset normally hasn't been initialized yet, so this
463 * is basically always false unless ->real_file_size is -1, but
464 * if ->real_file_size is -1 this message doesn't make sense.
465 * As a result, this message is basically useless.
467 if (f->file_offset > f->real_file_size) {
468 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
469 (unsigned long long) f->file_offset,
470 (unsigned long long) f->real_file_size);
474 fio_file_set_size_known(f);
478 static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
479 unsigned long long off,
480 unsigned long long len)
482 int errval = 0, ret = 0;
491 off = f->file_offset;
493 if (len == -1ULL || off == -1ULL)
496 if (td->io_ops->invalidate) {
497 dprint(FD_IO, "invalidate %s cache %s\n", td->io_ops->name,
499 ret = td->io_ops->invalidate(td, f);
502 } else if (td_ioengine_flagged(td, FIO_DISKLESSIO)) {
503 dprint(FD_IO, "invalidate not supported by ioengine %s\n",
505 } else if (f->filetype == FIO_TYPE_FILE) {
506 dprint(FD_IO, "declare unneeded cache %s: %llu/%llu\n",
507 f->file_name, off, len);
508 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
511 } else if (f->filetype == FIO_TYPE_BLOCK) {
514 dprint(FD_IO, "drop page cache %s\n", f->file_name);
515 ret = blockdev_invalidate_cache(f);
516 while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
518 * Linux multipath devices reject ioctl while
519 * the maps are being updated. That window can
520 * last tens of milliseconds; we'll try up to
521 * a quarter of a second.
524 ret = blockdev_invalidate_cache(f);
526 if (ret < 0 && errno == EACCES && geteuid()) {
527 if (!fio_did_warn(FIO_WARN_ROOT_FLUSH)) {
528 log_err("fio: only root may flush block "
529 "devices. Cache flush bypassed!\n");
534 } else if (f->filetype == FIO_TYPE_CHAR ||
535 f->filetype == FIO_TYPE_PIPE) {
536 dprint(FD_IO, "invalidate not supported %s\n", f->file_name);
540 * Cache flushing isn't a fatal condition, and we know it will
541 * happen on some platforms where we don't have the proper
542 * function to flush eg block device caches. So just warn and
543 * continue on our way.
546 log_info("fio: cache invalidation of %s failed: %s\n",
547 f->file_name, strerror(errval));
553 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
555 if (!fio_file_open(f))
558 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
561 int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
565 dprint(FD_FILE, "fd close %s\n", f->file_name);
569 if (close(f->fd) < 0)
574 if (f->shadow_fd != -1) {
583 int file_lookup_open(struct fio_file *f, int flags)
585 struct fio_file *__f;
588 __f = lookup_file_hash(f->file_name);
590 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
594 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
602 f->fd = open(f->file_name, flags, 0600);
606 static int file_close_shadow_fds(struct thread_data *td)
612 for_each_file(td, f, i) {
613 if (f->shadow_fd == -1)
624 int generic_open_file(struct thread_data *td, struct fio_file *f)
630 dprint(FD_FILE, "fd open %s\n", f->file_name);
632 if (!strcmp(f->file_name, "-")) {
634 log_err("fio: can't read/write to stdin/out\n");
640 * move output logging to stderr, if we are writing to stdout
649 flags |= OS_O_DIRECT;
652 td_verror(td, EINVAL, "OS does not support atomic IO");
655 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
657 flags |= td->o.sync_io;
658 if (td->o.create_on_open && td->o.allow_create)
661 if (f->filetype != FIO_TYPE_FILE)
662 flags |= FIO_O_NOATIME;
669 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
673 f->fd = dup(STDOUT_FILENO);
675 from_hash = file_lookup_open(f, flags);
676 } else if (td_read(td)) {
677 if (f->filetype == FIO_TYPE_CHAR && !read_only)
683 f->fd = dup(STDIN_FILENO);
685 from_hash = file_lookup_open(f, flags);
686 } else if (td_trim(td)) {
687 assert(!td_rw(td)); /* should have matched above */
690 from_hash = file_lookup_open(f, flags);
694 char buf[FIO_VERROR_SIZE];
697 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
698 flags &= ~FIO_O_NOATIME;
701 if (__e == EMFILE && file_close_shadow_fds(td))
704 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
706 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
707 log_err("fio: looks like your file system does not " \
708 "support direct=1/buffered=0\n");
711 td_verror(td, __e, buf);
715 if (!from_hash && f->fd != -1) {
716 if (add_file_hash(f)) {
720 * Stash away descriptor for later close. This is to
721 * work-around a "feature" on Linux, where a close of
722 * an fd that has been opened for write will trigger
723 * udev to call blkid to check partitions, fs id, etc.
724 * That pollutes the device cache, which can slow down
725 * unbuffered accesses.
727 if (f->shadow_fd == -1)
728 f->shadow_fd = f->fd;
731 * OK to ignore, we haven't done anything
734 ret = generic_close_file(td, f);
744 * This function i.e. get_file_size() is the default .get_file_size
745 * implementation of majority of I/O engines.
747 int generic_get_file_size(struct thread_data *td, struct fio_file *f)
749 return get_file_size(td, f);
753 * open/close all files, so that ->real_file_size gets set
755 static int get_file_sizes(struct thread_data *td)
761 for_each_file(td, f, i) {
762 dprint(FD_FILE, "get file size for %p/%d/%s\n", f, i,
765 if (td_io_get_file_size(td, f)) {
766 if (td->error != ENOENT) {
767 log_err("%s\n", td->verror);
775 * There are corner cases where we end up with -1 for
776 * ->real_file_size due to unsupported file type, etc.
777 * We then just set to size option value divided by number
778 * of files, similar to the way file ->io_size is set.
779 * stat(2) failure doesn't set ->real_file_size to -1.
781 if (f->real_file_size == -1ULL && td->o.size)
782 f->real_file_size = td->o.size / td->o.nr_files;
789 struct flist_head list;
796 * Get free number of bytes for each file on each unique mount.
798 static unsigned long long get_fs_free_counts(struct thread_data *td)
800 struct flist_head *n, *tmp;
801 unsigned long long ret = 0;
802 struct fio_mount *fm;
807 for_each_file(td, f, i) {
811 if (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_CHAR) {
812 if (f->real_file_size != -1ULL)
813 ret += f->real_file_size;
815 } else if (f->filetype != FIO_TYPE_FILE)
818 snprintf(buf, FIO_ARRAY_SIZE(buf), "%s", f->file_name);
820 if (stat(buf, &sb) < 0) {
824 if (stat(buf, &sb) < 0)
829 flist_for_each(n, &list) {
830 fm = flist_entry(n, struct fio_mount, list);
831 if (fm->key == sb.st_dev)
840 fm = calloc(1, sizeof(*fm));
841 snprintf(fm->__base, FIO_ARRAY_SIZE(fm->__base), "%s", buf);
842 fm->base = basename(fm->__base);
844 flist_add(&fm->list, &list);
847 flist_for_each_safe(n, tmp, &list) {
848 unsigned long long sz;
850 fm = flist_entry(n, struct fio_mount, list);
851 flist_del(&fm->list);
853 sz = get_fs_free_size(fm->base);
854 if (sz && sz != -1ULL)
863 uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
866 struct thread_options *o = &td->o;
867 unsigned long long align_bs;
868 unsigned long long offset;
869 unsigned long long increment;
871 if (o->file_append && f->filetype == FIO_TYPE_FILE)
872 return f->real_file_size;
874 if (o->offset_increment_percent) {
875 assert(!o->offset_increment);
876 increment = o->offset_increment_percent * f->real_file_size / 100;
879 increment = o->offset_increment;
881 if (o->start_offset_percent > 0) {
882 /* calculate the raw offset */
883 offset = (f->real_file_size * o->start_offset_percent / 100) +
884 (td->subjob_number * increment);
888 /* start_offset_percent not set */
889 offset = o->start_offset +
890 td->subjob_number * increment;
895 * if offset_align is provided, use it
897 if (fio_option_is_set(o, start_offset_align)) {
898 align_bs = o->start_offset_align;
900 /* else take the minimum block size */
901 align_bs = td_min_bs(td);
905 * block align the offset at the next available boundary at
906 * ceiling(offset / align_bs) * align_bs
908 offset = (offset / align_bs + (offset % align_bs != 0)) * align_bs;
915 * Find longest path component that exists and return its length
917 int longest_existing_path(char *path) {
928 sprintf(buf, "%s", path);
931 buf_pos = strrchr(buf, FIO_OS_PATH_SEPARATOR);
938 *(buf_pos + 1) = '\0';
941 dwAttr = GetFileAttributesA(buf);
942 if (dwAttr != INVALID_FILE_ATTRIBUTES) {
946 if (stat(buf, &sb) == 0)
950 offset = buf_pos - buf;
958 static bool create_work_dirs(struct thread_data *td, const char *fname)
964 snprintf(path, PATH_MAX, "%s", fname);
967 offset = longest_existing_path(path);
968 end = start + offset;
969 while ((end = strchr(end, FIO_OS_PATH_SEPARATOR)) != NULL) {
976 if (fio_mkdir(path, 0700) && errno != EEXIST) {
977 log_err("fio: failed to create dir (%s): %s\n",
978 start, strerror(errno));
981 *end = FIO_OS_PATH_SEPARATOR;
984 td->flags |= TD_F_DIRS_CREATED;
989 * Open the files and setup files sizes, creating files if necessary.
991 int setup_files(struct thread_data *td)
993 unsigned long long total_size, extend_size;
994 struct thread_options *o = &td->o;
996 unsigned int i, nr_fs_extra = 0;
997 int err = 0, need_extend;
999 const unsigned long long bs = td_min_bs(td);
1002 dprint(FD_FILE, "setup files\n");
1004 old_state = td_bump_runstate(td, TD_SETTING_UP);
1006 for_each_file(td, f, i) {
1007 if (!td_ioengine_flagged(td, FIO_DISKLESSIO) &&
1008 strchr(f->file_name, FIO_OS_PATH_SEPARATOR) &&
1009 !(td->flags & TD_F_DIRS_CREATED) &&
1010 !create_work_dirs(td, f->file_name))
1015 * Find out physical size of files or devices for this thread,
1016 * before we determine I/O size and range of our targets.
1017 * If ioengine defines a setup() method, it's responsible for
1018 * opening the files and setting f->real_file_size to indicate
1019 * the valid range for that file.
1021 if (td->io_ops->setup)
1022 err = td->io_ops->setup(td);
1024 err = get_file_sizes(td);
1029 if (o->read_iolog_file)
1033 * check sizes. if the files/devices do not exist and the size
1034 * isn't passed to fio, abort.
1037 for_each_file(td, f, i) {
1039 if (f->real_file_size == -1ULL)
1042 total_size += f->real_file_size;
1046 td->fill_device_size = get_fs_free_counts(td);
1049 * device/file sizes are zero and no size given, punt
1051 if ((!total_size || total_size == -1ULL) && !o->size &&
1052 !td_ioengine_flagged(td, FIO_NOIO) && !o->fill_device &&
1053 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
1054 log_err("%s: you need to specify size=\n", o->name);
1055 td_verror(td, EINVAL, "total_file_size");
1060 * Calculate per-file size and potential extra size for the
1061 * first files, if needed (i.e. if we don't have a fixed size).
1063 if (!o->file_size_low && o->nr_files) {
1066 fs = o->size / o->nr_files;
1067 all_fs = fs * o->nr_files;
1069 if (all_fs < o->size)
1070 nr_fs_extra = (o->size - all_fs) / bs;
1074 * now file sizes are known, so we can set ->io_size. if size= is
1075 * not given, ->io_size is just equal to ->real_file_size. if size
1076 * is given, ->io_size is size / nr_files.
1078 extend_size = total_size = 0;
1080 for_each_file(td, f, i) {
1081 f->file_offset = get_start_offset(td, f);
1084 * Update ->io_size depending on options specified.
1085 * ->file_size_low being 0 means filesize option isn't set.
1086 * Non zero ->file_size_low equals ->file_size_high means
1087 * filesize option is set in a fixed size format.
1088 * Non zero ->file_size_low not equals ->file_size_high means
1089 * filesize option is set in a range format.
1091 if (!o->file_size_low) {
1093 * no file size or range given, file size is equal to
1094 * total size divided by number of files. If the size
1095 * doesn't divide nicely with the min blocksize,
1096 * make the first files bigger.
1105 * We normally don't come here for regular files, but
1106 * if the result is 0 for a regular file, set it to the
1107 * real file size. This could be size of the existing
1108 * one if it already exists, but otherwise will be set
1109 * to 0. A new file won't be created because
1110 * ->io_size + ->file_offset equals ->real_file_size.
1113 if (f->file_offset > f->real_file_size)
1115 f->io_size = f->real_file_size - f->file_offset;
1117 log_info("fio: file %s may be ignored\n",
1120 } else if (f->real_file_size < o->file_size_low ||
1121 f->real_file_size > o->file_size_high) {
1122 if (f->file_offset > o->file_size_low)
1125 * file size given. if it's fixed, use that. if it's a
1126 * range, generate a random size in-between.
1128 if (o->file_size_low == o->file_size_high)
1129 f->io_size = o->file_size_low - f->file_offset;
1131 f->io_size = get_rand_file_size(td)
1135 f->io_size = f->real_file_size - f->file_offset;
1137 if (f->io_size == -1ULL)
1142 if (o->size_percent && o->size_percent != 100) {
1145 file_size = f->io_size + f->file_offset;
1146 f->io_size = (file_size *
1147 o->size_percent) / 100;
1148 if (f->io_size > (file_size - f->file_offset))
1149 f->io_size = file_size - f->file_offset;
1151 f->io_size -= (f->io_size % td_min_bs(td));
1154 io_size = f->io_size;
1155 if (o->io_size_percent && o->io_size_percent != 100) {
1156 io_size *= o->io_size_percent;
1160 total_size += io_size;
1163 if (f->filetype == FIO_TYPE_FILE &&
1164 (f->io_size + f->file_offset) > f->real_file_size) {
1165 if (!td_ioengine_flagged(td, FIO_DISKLESSIO) &&
1166 !o->create_on_open) {
1168 extend_size += (f->io_size + f->file_offset);
1169 fio_file_set_extend(f);
1170 } else if (!td_ioengine_flagged(td, FIO_DISKLESSIO) ||
1171 (td_ioengine_flagged(td, FIO_DISKLESSIO) &&
1172 td_ioengine_flagged(td, FIO_FAKEIO)))
1173 f->real_file_size = f->io_size + f->file_offset;
1177 if (td->o.block_error_hist) {
1180 assert(td->o.nr_files == 1); /* checked in fixup_options */
1182 len = f->io_size / td->o.bs[DDIR_TRIM];
1183 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
1184 log_err("fio: cannot calculate block histogram with "
1185 "%d trim blocks, maximum %d\n",
1186 len, MAX_NR_BLOCK_INFOS);
1187 td_verror(td, EINVAL, "block_error_hist");
1191 td->ts.nr_block_infos = len;
1192 for (i = 0; i < len; i++)
1193 td->ts.block_infos[i] =
1194 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
1196 td->ts.nr_block_infos = 0;
1198 if (!o->size || (total_size && o->size > total_size))
1199 o->size = total_size;
1201 if (o->size < td_min_bs(td)) {
1202 log_err("fio: blocksize is larger than data set range\n");
1207 * See if we need to extend some files, typically needed when our
1208 * target regular files don't exist yet, but our jobs require them
1209 * initially due to read I/Os.
1213 if (output_format & FIO_OUTPUT_NORMAL) {
1214 log_info("%s: Laying out IO file%s (%u file%s / %s%lluMiB)\n",
1216 need_extend > 1 ? "s" : "",
1218 need_extend > 1 ? "s" : "",
1219 need_extend > 1 ? "total " : "",
1223 for_each_file(td, f, i) {
1224 unsigned long long old_len = -1ULL, extend_len = -1ULL;
1226 if (!fio_file_extend(f))
1229 assert(f->filetype == FIO_TYPE_FILE);
1230 fio_file_clear_extend(f);
1231 if (!o->fill_device) {
1232 old_len = f->real_file_size;
1233 extend_len = f->io_size + f->file_offset -
1236 f->real_file_size = (f->io_size + f->file_offset);
1237 err = extend_file(td, f);
1241 err = __file_invalidate_cache(td, f, old_len,
1245 * Shut up static checker
1261 * iolog already set the total io size, if we read back
1264 if (!o->read_iolog_file) {
1266 td->total_io_size = o->io_size * o->loops;
1268 td->total_io_size = o->size * o->loops;
1275 td_restore_runstate(td, old_state);
1277 if (td->o.zone_mode == ZONE_MODE_ZBD) {
1278 err = zbd_setup_files(td);
1285 log_err("%s: you need to specify valid offset=\n", o->name);
1287 td_restore_runstate(td, old_state);
1291 bool pre_read_files(struct thread_data *td)
1296 dprint(FD_FILE, "pre_read files\n");
1298 for_each_file(td, f, i) {
1299 if (!pre_read_file(td, f))
1306 static void __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1308 unsigned int range_size, seed;
1312 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1313 fsize = min(f->real_file_size, f->io_size);
1315 nranges = (fsize + range_size - 1ULL) / range_size;
1317 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1318 if (!td->o.rand_repeatable)
1319 seed = td->rand_seeds[4];
1321 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1322 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1323 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1324 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1325 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1326 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1329 static bool init_rand_distribution(struct thread_data *td)
1335 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM ||
1336 td->o.random_distribution == FIO_RAND_DIST_ZONED ||
1337 td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS)
1340 state = td_bump_runstate(td, TD_SETTING_UP);
1342 for_each_file(td, f, i)
1343 __init_rand_distribution(td, f);
1345 td_restore_runstate(td, state);
1350 * Check if the number of blocks exceeds the randomness capability of
1351 * the selected generator. Tausworthe is 32-bit, the others are fullly
1354 static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1357 if (blocks <= FRAND32_MAX)
1359 if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1363 * If the user hasn't specified a random generator, switch
1364 * to tausworthe64 with informational warning. If the user did
1365 * specify one, just warn.
1367 log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1370 if (!fio_option_is_set(&td->o, random_generator)) {
1371 log_info("fio: Switching to tausworthe64. Use the "
1372 "random_generator= option to get rid of this "
1374 td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1379 * Just make this information to avoid breaking scripts.
1381 log_info("fio: Use the random_generator= option to switch to lfsr or "
1386 bool init_random_map(struct thread_data *td)
1388 unsigned long long blocks;
1392 if (init_rand_distribution(td))
1397 for_each_file(td, f, i) {
1398 uint64_t fsize = min(f->real_file_size, f->io_size);
1400 if (td->o.zone_mode == ZONE_MODE_STRIDED)
1401 fsize = td->o.zone_range;
1403 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1405 if (check_rand_gen_limits(td, f, blocks))
1408 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1411 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1413 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1414 fio_file_set_lfsr(f);
1417 log_err("fio: failed initializing LFSR\n");
1420 } else if (!td->o.norandommap) {
1421 f->io_axmap = axmap_new(blocks);
1423 fio_file_set_axmap(f);
1426 } else if (td->o.norandommap)
1429 if (!td->o.softrandommap) {
1430 log_err("fio: failed allocating random map. If running"
1431 " a large number of jobs, try the 'norandommap'"
1432 " option or set 'softrandommap'. Or give"
1433 " a larger --alloc-size to fio.\n");
1437 log_info("fio: file %s failed allocating random map. Running "
1438 "job without.\n", f->file_name);
1444 void close_files(struct thread_data *td)
1449 for_each_file(td, f, i) {
1450 if (fio_file_open(f))
1451 td_io_close_file(td, f);
1455 void fio_file_free(struct fio_file *f)
1457 if (fio_file_axmap(f))
1458 axmap_free(f->io_axmap);
1459 if (!fio_file_smalloc(f)) {
1463 sfree(f->file_name);
1468 void close_and_free_files(struct thread_data *td)
1473 dprint(FD_FILE, "close files\n");
1475 for_each_file(td, f, i) {
1476 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1477 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1478 td_io_unlink_file(td, f);
1481 if (fio_file_open(f))
1482 td_io_close_file(td, f);
1484 remove_file_hash(f);
1486 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1487 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1488 td_io_unlink_file(td, f);
1495 td->o.filename = NULL;
1497 free(td->file_locks);
1498 td->files_index = 0;
1500 td->file_locks = NULL;
1501 td->o.file_lock_mode = FILE_LOCK_NONE;
1505 static void get_file_type(struct fio_file *f)
1509 if (!strcmp(f->file_name, "-"))
1510 f->filetype = FIO_TYPE_PIPE;
1512 f->filetype = FIO_TYPE_FILE;
1515 /* \\.\ is the device namespace in Windows, where every file is
1517 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1518 f->filetype = FIO_TYPE_BLOCK;
1521 if (!stat(f->file_name, &sb)) {
1522 if (S_ISBLK(sb.st_mode))
1523 f->filetype = FIO_TYPE_BLOCK;
1524 else if (S_ISCHR(sb.st_mode))
1525 f->filetype = FIO_TYPE_CHAR;
1526 else if (S_ISFIFO(sb.st_mode))
1527 f->filetype = FIO_TYPE_PIPE;
1531 static bool __is_already_allocated(const char *fname, bool set)
1533 struct flist_head *entry;
1536 ret = file_bloom_exists(fname, set);
1540 flist_for_each(entry, &filename_list) {
1541 struct file_name *fn;
1543 fn = flist_entry(entry, struct file_name, list);
1545 if (!strcmp(fn->filename, fname))
1552 static bool is_already_allocated(const char *fname)
1556 fio_file_hash_lock();
1557 ret = __is_already_allocated(fname, false);
1558 fio_file_hash_unlock();
1563 static void set_already_allocated(const char *fname)
1565 struct file_name *fn;
1567 fn = malloc(sizeof(struct file_name));
1568 fn->filename = strdup(fname);
1570 fio_file_hash_lock();
1571 if (!__is_already_allocated(fname, true)) {
1572 flist_add_tail(&fn->list, &filename_list);
1575 fio_file_hash_unlock();
1583 static void free_already_allocated(void)
1585 struct flist_head *entry, *tmp;
1586 struct file_name *fn;
1588 if (flist_empty(&filename_list))
1591 fio_file_hash_lock();
1592 flist_for_each_safe(entry, tmp, &filename_list) {
1593 fn = flist_entry(entry, struct file_name, list);
1595 flist_del(&fn->list);
1599 fio_file_hash_unlock();
1602 static struct fio_file *alloc_new_file(struct thread_data *td)
1606 if (td_ioengine_flagged(td, FIO_NOFILEHASH))
1607 f = calloc(1, sizeof(*f));
1609 f = scalloc(1, sizeof(*f));
1617 fio_file_reset(td, f);
1618 if (!td_ioengine_flagged(td, FIO_NOFILEHASH))
1619 fio_file_set_smalloc(f);
1623 bool exists_and_not_regfile(const char *filename)
1627 if (lstat(filename, &sb) == -1)
1630 #ifndef WIN32 /* NOT Windows */
1631 if (S_ISREG(sb.st_mode))
1634 /* \\.\ is the device namespace in Windows, where every file
1635 * is a device node */
1636 if (S_ISREG(sb.st_mode) && strncmp(filename, "\\\\.\\", 4) != 0)
1643 int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1645 int cur_files = td->files_index;
1646 char file_name[PATH_MAX];
1650 dprint(FD_FILE, "add file %s\n", fname);
1652 if (td->o.directory)
1653 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob,
1654 td->o.unique_filename);
1656 sprintf(file_name + len, "%s", fname);
1658 /* clean cloned siblings using existing files */
1659 if (numjob && is_already_allocated(file_name) &&
1660 !exists_and_not_regfile(fname))
1663 f = alloc_new_file(td);
1665 if (td->files_size <= td->files_index) {
1666 unsigned int new_size = td->o.nr_files + 1;
1668 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1670 td->files = realloc(td->files, new_size * sizeof(f));
1671 if (td->files == NULL) {
1672 log_err("fio: realloc OOM\n");
1675 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1676 td->file_locks = realloc(td->file_locks, new_size);
1677 if (!td->file_locks) {
1678 log_err("fio: realloc OOM\n");
1681 td->file_locks[cur_files] = FILE_LOCK_NONE;
1683 td->files_size = new_size;
1685 td->files[cur_files] = f;
1686 f->fileno = cur_files;
1689 * init function, io engine may not be loaded yet
1691 if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
1692 f->real_file_size = -1ULL;
1694 if (td_ioengine_flagged(td, FIO_NOFILEHASH))
1695 f->file_name = strdup(file_name);
1697 f->file_name = smalloc_strdup(file_name);
1699 /* can't handle smalloc failure from here */
1700 assert(f->file_name);
1704 switch (td->o.file_lock_mode) {
1705 case FILE_LOCK_NONE:
1707 case FILE_LOCK_READWRITE:
1708 f->rwlock = fio_rwlock_init();
1710 case FILE_LOCK_EXCLUSIVE:
1711 f->lock = fio_sem_init(FIO_SEM_UNLOCKED);
1714 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1720 if (td->o.numjobs > 1)
1721 set_already_allocated(file_name);
1726 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1732 int add_file_exclusive(struct thread_data *td, const char *fname)
1737 for_each_file(td, f, i) {
1738 if (!strcmp(f->file_name, fname))
1742 return add_file(td, fname, 0, 1);
1745 void get_file(struct fio_file *f)
1747 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1748 assert(fio_file_open(f));
1752 int put_file(struct thread_data *td, struct fio_file *f)
1754 int f_ret = 0, ret = 0;
1756 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1758 if (!fio_file_open(f)) {
1759 assert(f->fd == -1);
1763 assert(f->references);
1764 if (--f->references)
1767 disk_util_dec(f->du);
1769 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1770 unlock_file_all(td, f);
1772 if (should_fsync(td) && td->o.fsync_on_close) {
1773 f_ret = fsync(f->fd);
1778 if (td->io_ops->close_file)
1779 ret = td->io_ops->close_file(td, f);
1784 td->nr_open_files--;
1785 fio_file_clear_closing(f);
1786 fio_file_clear_open(f);
1787 assert(f->fd == -1);
1791 void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1793 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1796 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1797 if (ddir == DDIR_READ)
1798 fio_rwlock_read(f->rwlock);
1800 fio_rwlock_write(f->rwlock);
1801 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1802 fio_sem_down(f->lock);
1804 td->file_locks[f->fileno] = td->o.file_lock_mode;
1807 void unlock_file(struct thread_data *td, struct fio_file *f)
1809 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1812 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1813 fio_rwlock_unlock(f->rwlock);
1814 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1815 fio_sem_up(f->lock);
1817 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1820 void unlock_file_all(struct thread_data *td, struct fio_file *f)
1822 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1824 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1828 static bool recurse_dir(struct thread_data *td, const char *dirname)
1834 D = opendir(dirname);
1836 char buf[FIO_VERROR_SIZE];
1838 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1839 td_verror(td, errno, buf);
1843 while ((dir = readdir(D)) != NULL) {
1844 char full_path[PATH_MAX];
1847 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1850 sprintf(full_path, "%s%c%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1852 if (lstat(full_path, &sb) == -1) {
1853 if (errno != ENOENT) {
1854 td_verror(td, errno, "stat");
1860 if (S_ISREG(sb.st_mode)) {
1861 add_file(td, full_path, 0, 1);
1864 if (!S_ISDIR(sb.st_mode))
1867 ret = recurse_dir(td, full_path);
1876 int add_dir_files(struct thread_data *td, const char *path)
1878 int ret = recurse_dir(td, path);
1881 log_info("fio: opendir added %d files\n", td->o.nr_files);
1886 void dup_files(struct thread_data *td, struct thread_data *org)
1891 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1896 td->files = malloc(org->files_index * sizeof(f));
1898 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1899 td->file_locks = malloc(org->files_index);
1901 for_each_file(org, f, i) {
1902 struct fio_file *__f;
1904 __f = alloc_new_file(td);
1907 if (td_ioengine_flagged(td, FIO_NOFILEHASH))
1908 __f->file_name = strdup(f->file_name);
1910 __f->file_name = smalloc_strdup(f->file_name);
1912 /* can't handle smalloc failure from here */
1913 assert(__f->file_name);
1914 __f->filetype = f->filetype;
1917 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1918 __f->lock = f->lock;
1919 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1920 __f->rwlock = f->rwlock;
1927 * Returns the index that matches the filename, or -1 if not there
1929 int get_fileno(struct thread_data *td, const char *fname)
1934 for_each_file(td, f, i)
1935 if (!strcmp(f->file_name, fname))
1942 * For log usage, where we add/open/close files automatically
1944 void free_release_files(struct thread_data *td)
1948 td->o.open_files = 0;
1949 td->files_index = 0;
1952 void fio_file_reset(struct thread_data *td, struct fio_file *f)
1956 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1957 f->last_pos[i] = f->file_offset;
1958 f->last_start[i] = -1ULL;
1961 if (fio_file_axmap(f))
1962 axmap_reset(f->io_axmap);
1963 else if (fio_file_lfsr(f))
1964 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1966 zbd_file_reset(td, f);
1969 bool fio_files_done(struct thread_data *td)
1974 for_each_file(td, f, i)
1975 if (!fio_file_done(f))
1981 /* free memory used in initialization phase only */
1982 void filesetup_mem_free(void)
1984 free_already_allocated();
1988 * This function is for platforms which support direct I/O but not O_DIRECT.
1990 int fio_set_directio(struct thread_data *td, struct fio_file *f)
1992 #ifdef FIO_OS_DIRECTIO
1993 int ret = fio_set_odirect(f);
1996 td_verror(td, ret, "fio_set_directio");
1997 #if defined(__sun__)
1998 if (ret == ENOTTY) { /* ENOTTY suggests RAW device or ZFS */
1999 log_err("fio: doing directIO to RAW devices or ZFS not supported\n");
2001 log_err("fio: the file system does not seem to support direct IO\n");
2004 log_err("fio: the file system does not seem to support direct IO\n");
2011 log_err("fio: direct IO is not supported on this host operating system\n");