17 #include "lib/axmap.h"
19 #ifdef CONFIG_LINUX_FALLOCATE
20 #include <linux/falloc.h>
25 static FLIST_HEAD(filename_list);
27 static inline void clear_error(struct thread_data *td)
34 * Leaves f->fd open on success, caller must close
36 static int extend_file(struct thread_data *td, struct fio_file *f)
38 int r, new_layout = 0, unlink_file = 0, flags;
39 unsigned long long left;
44 log_err("fio: refusing extend of file due to read-only\n");
49 * check if we need to lay the file out complete again. fio
50 * does that for operations involving reads, or for writes
51 * where overwrite is set
54 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
57 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
60 if (unlink_file || new_layout) {
61 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
62 if ((td_io_unlink_file(td, f) < 0) && (errno != ENOENT)) {
63 td_verror(td, errno, "unlink");
69 if (td->o.allow_create)
78 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
79 f->fd = open(f->file_name, flags, 0644);
83 if (err == ENOENT && !td->o.allow_create)
84 log_err("fio: file creation disallowed by "
85 "allow_file_create=0\n");
87 td_verror(td, err, "open");
91 #ifdef CONFIG_POSIX_FALLOCATE
92 if (!td->o.fill_device) {
93 switch (td->o.fallocate_mode) {
94 case FIO_FALLOCATE_NONE:
96 case FIO_FALLOCATE_POSIX:
97 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
99 (unsigned long long) f->real_file_size);
101 r = posix_fallocate(f->fd, 0, f->real_file_size);
103 log_err("fio: posix_fallocate fails: %s\n",
107 #ifdef CONFIG_LINUX_FALLOCATE
108 case FIO_FALLOCATE_KEEP_SIZE:
110 "fallocate(FALLOC_FL_KEEP_SIZE) "
111 "file %s size %llu\n", f->file_name,
112 (unsigned long long) f->real_file_size);
114 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
117 td_verror(td, errno, "fallocate");
120 #endif /* CONFIG_LINUX_FALLOCATE */
122 log_err("fio: unknown fallocate mode: %d\n",
123 td->o.fallocate_mode);
127 #endif /* CONFIG_POSIX_FALLOCATE */
133 * The size will be -1ULL when fill_device is used, so don't truncate
134 * or fallocate this file, just write it
136 if (!td->o.fill_device) {
137 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
138 (unsigned long long) f->real_file_size);
139 if (ftruncate(f->fd, f->real_file_size) == -1) {
140 if (errno != EFBIG) {
141 td_verror(td, errno, "ftruncate");
147 b = malloc(td->o.max_bs[DDIR_WRITE]);
149 left = f->real_file_size;
150 while (left && !td->terminate) {
151 bs = td->o.max_bs[DDIR_WRITE];
155 fill_io_buffer(td, b, bs, bs);
157 r = write(f->fd, b, bs);
167 if (td->o.fill_device)
169 log_info("fio: ENOSPC on laying out "
173 td_verror(td, errno, "write");
175 td_verror(td, EIO, "write");
182 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
183 td_io_unlink_file(td, f);
184 } else if (td->o.create_fsync) {
185 if (fsync(f->fd) < 0) {
186 td_verror(td, errno, "fsync");
190 if (td->o.fill_device && !td_write(td)) {
191 fio_file_clear_size_known(f);
192 if (td_io_get_file_size(td, f))
194 if (f->io_size > f->real_file_size)
195 f->io_size = f->real_file_size;
209 static int pre_read_file(struct thread_data *td, struct fio_file *f)
211 int ret = 0, r, did_open = 0, old_runstate;
212 unsigned long long left;
216 if (td->io_ops->flags & FIO_PIPEIO)
219 if (!fio_file_open(f)) {
220 if (td->io_ops->open_file(td, f)) {
221 log_err("fio: cannot pre-read, failed to open file\n");
227 old_runstate = td_bump_runstate(td, TD_PRE_READING);
229 bs = td->o.max_bs[DDIR_READ];
233 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
234 td_verror(td, errno, "lseek");
235 log_err("fio: failed to lseek pre-read file\n");
242 while (left && !td->terminate) {
246 r = read(f->fd, b, bs);
252 td_verror(td, EIO, "pre_read");
258 td_restore_runstate(td, old_runstate);
261 td->io_ops->close_file(td, f);
267 static unsigned long long get_rand_file_size(struct thread_data *td)
269 unsigned long long ret, sized;
272 r = __rand(&td->file_size_state);
273 sized = td->o.file_size_high - td->o.file_size_low;
274 ret = (unsigned long long) ((double) sized * (r / (FRAND_MAX + 1.0)));
275 ret += td->o.file_size_low;
276 ret -= (ret % td->o.rw_min_bs);
280 static int file_size(struct thread_data *td, struct fio_file *f)
284 if (stat(f->file_name, &st) == -1) {
285 td_verror(td, errno, "fstat");
289 f->real_file_size = st.st_size;
293 static int bdev_size(struct thread_data *td, struct fio_file *f)
295 unsigned long long bytes = 0;
298 if (td->io_ops->open_file(td, f)) {
299 log_err("fio: failed opening blockdev %s for size check\n",
304 r = blockdev_size(f, &bytes);
306 td_verror(td, r, "blockdev_size");
311 log_err("%s: zero sized block device?\n", f->file_name);
315 f->real_file_size = bytes;
316 td->io_ops->close_file(td, f);
319 td->io_ops->close_file(td, f);
323 static int char_size(struct thread_data *td, struct fio_file *f)
325 #ifdef FIO_HAVE_CHARDEV_SIZE
326 unsigned long long bytes = 0;
329 if (td->io_ops->open_file(td, f)) {
330 log_err("fio: failed opening blockdev %s for size check\n",
335 r = chardev_size(f, &bytes);
337 td_verror(td, r, "chardev_size");
342 log_err("%s: zero sized char device?\n", f->file_name);
346 f->real_file_size = bytes;
347 td->io_ops->close_file(td, f);
350 td->io_ops->close_file(td, f);
353 f->real_file_size = -1ULL;
358 static int get_file_size(struct thread_data *td, struct fio_file *f)
362 if (fio_file_size_known(f))
365 if (f->filetype == FIO_TYPE_FILE)
366 ret = file_size(td, f);
367 else if (f->filetype == FIO_TYPE_BD)
368 ret = bdev_size(td, f);
369 else if (f->filetype == FIO_TYPE_CHAR)
370 ret = char_size(td, f);
372 f->real_file_size = -1;
377 if (f->file_offset > f->real_file_size) {
378 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
379 (unsigned long long) f->file_offset,
380 (unsigned long long) f->real_file_size);
384 fio_file_set_size_known(f);
388 static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
389 unsigned long long off,
390 unsigned long long len)
401 off = f->file_offset;
403 if (len == -1ULL || off == -1ULL)
406 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
409 if (td->io_ops->invalidate)
410 ret = td->io_ops->invalidate(td, f);
411 else if (f->filetype == FIO_TYPE_FILE)
412 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
413 else if (f->filetype == FIO_TYPE_BD) {
414 ret = blockdev_invalidate_cache(f);
415 if (ret < 0 && errno == EACCES && geteuid()) {
417 log_err("fio: only root may flush block "
418 "devices. Cache flush bypassed!\n");
423 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
427 * Cache flushing isn't a fatal condition, and we know it will
428 * happen on some platforms where we don't have the proper
429 * function to flush eg block device caches. So just warn and
430 * continue on our way.
433 log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errno));
441 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
443 if (!fio_file_open(f))
446 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
449 int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
453 dprint(FD_FILE, "fd close %s\n", f->file_name);
457 if (close(f->fd) < 0)
462 if (f->shadow_fd != -1) {
471 int file_lookup_open(struct fio_file *f, int flags)
473 struct fio_file *__f;
476 __f = lookup_file_hash(f->file_name);
478 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
480 * racy, need the __f->lock locked
485 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
493 f->fd = open(f->file_name, flags, 0600);
497 static int file_close_shadow_fds(struct thread_data *td)
503 for_each_file(td, f, i) {
504 if (f->shadow_fd == -1)
515 int generic_open_file(struct thread_data *td, struct fio_file *f)
521 dprint(FD_FILE, "fd open %s\n", f->file_name);
523 if (!strcmp(f->file_name, "-")) {
525 log_err("fio: can't read/write to stdin/out\n");
531 * move output logging to stderr, if we are writing to stdout
540 flags |= OS_O_DIRECT;
543 td_verror(td, EINVAL, "OS does not support atomic IO");
546 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
550 if (td->o.create_on_open && td->o.allow_create)
553 if (f->filetype != FIO_TYPE_FILE)
554 flags |= FIO_O_NOATIME;
561 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
565 f->fd = dup(STDOUT_FILENO);
567 from_hash = file_lookup_open(f, flags);
568 } else if (td_read(td)) {
569 if (f->filetype == FIO_TYPE_CHAR && !read_only)
575 f->fd = dup(STDIN_FILENO);
577 from_hash = file_lookup_open(f, flags);
580 from_hash = file_lookup_open(f, flags);
584 char buf[FIO_VERROR_SIZE];
587 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
588 flags &= ~FIO_O_NOATIME;
591 if (__e == EMFILE && file_close_shadow_fds(td))
594 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
596 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
597 log_err("fio: looks like your file system does not " \
598 "support direct=1/buffered=0\n");
601 td_verror(td, __e, buf);
605 if (!from_hash && f->fd != -1) {
606 if (add_file_hash(f)) {
610 * Stash away descriptor for later close. This is to
611 * work-around a "feature" on Linux, where a close of
612 * an fd that has been opened for write will trigger
613 * udev to call blkid to check partitions, fs id, etc.
614 * That pollutes the device cache, which can slow down
615 * unbuffered accesses.
617 if (f->shadow_fd == -1)
618 f->shadow_fd = f->fd;
621 * OK to ignore, we haven't done anything
624 ret = generic_close_file(td, f);
633 int generic_get_file_size(struct thread_data *td, struct fio_file *f)
635 return get_file_size(td, f);
639 * open/close all files, so that ->real_file_size gets set
641 static int get_file_sizes(struct thread_data *td)
647 for_each_file(td, f, i) {
648 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
651 if (td_io_get_file_size(td, f)) {
652 if (td->error != ENOENT) {
653 log_err("%s\n", td->verror);
660 if (f->real_file_size == -1ULL && td->o.size)
661 f->real_file_size = td->o.size / td->o.nr_files;
668 struct flist_head list;
675 * Get free number of bytes for each file on each unique mount.
677 static unsigned long long get_fs_free_counts(struct thread_data *td)
679 struct flist_head *n, *tmp;
680 unsigned long long ret = 0;
681 struct fio_mount *fm;
686 for_each_file(td, f, i) {
690 if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
691 if (f->real_file_size != -1ULL)
692 ret += f->real_file_size;
694 } else if (f->filetype != FIO_TYPE_FILE)
698 strncpy(buf, f->file_name, 255);
700 if (stat(buf, &sb) < 0) {
704 if (stat(buf, &sb) < 0)
709 flist_for_each(n, &list) {
710 fm = flist_entry(n, struct fio_mount, list);
711 if (fm->key == sb.st_dev)
720 fm = calloc(1, sizeof(*fm));
721 strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
722 fm->base = basename(fm->__base);
724 flist_add(&fm->list, &list);
727 flist_for_each_safe(n, tmp, &list) {
728 unsigned long long sz;
730 fm = flist_entry(n, struct fio_mount, list);
731 flist_del(&fm->list);
733 sz = get_fs_size(fm->base);
734 if (sz && sz != -1ULL)
743 uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
745 struct thread_options *o = &td->o;
747 if (o->file_append && f->filetype == FIO_TYPE_FILE)
748 return f->real_file_size;
750 return td->o.start_offset +
751 td->subjob_number * td->o.offset_increment;
755 * Open the files and setup files sizes, creating files if necessary.
757 int setup_files(struct thread_data *td)
759 unsigned long long total_size, extend_size;
760 struct thread_options *o = &td->o;
762 unsigned int i, nr_fs_extra = 0;
763 int err = 0, need_extend;
765 const unsigned int bs = td_min_bs(td);
768 dprint(FD_FILE, "setup files\n");
770 old_state = td_bump_runstate(td, TD_SETTING_UP);
772 if (o->read_iolog_file)
776 * if ioengine defines a setup() method, it's responsible for
777 * opening the files and setting f->real_file_size to indicate
778 * the valid range for that file.
780 if (td->io_ops->setup)
781 err = td->io_ops->setup(td);
783 err = get_file_sizes(td);
789 * check sizes. if the files/devices do not exist and the size
790 * isn't passed to fio, abort.
793 for_each_file(td, f, i) {
794 if (f->real_file_size == -1ULL)
797 total_size += f->real_file_size;
801 td->fill_device_size = get_fs_free_counts(td);
804 * device/file sizes are zero and no size given, punt
806 if ((!total_size || total_size == -1ULL) && !o->size &&
807 !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
808 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
809 log_err("%s: you need to specify size=\n", o->name);
810 td_verror(td, EINVAL, "total_file_size");
815 * Calculate per-file size and potential extra size for the
816 * first files, if needed.
818 if (!o->file_size_low && o->nr_files) {
821 fs = o->size / o->nr_files;
822 all_fs = fs * o->nr_files;
824 if (all_fs < o->size)
825 nr_fs_extra = (o->size - all_fs) / bs;
829 * now file sizes are known, so we can set ->io_size. if size= is
830 * not given, ->io_size is just equal to ->real_file_size. if size
831 * is given, ->io_size is size / nr_files.
833 extend_size = total_size = 0;
835 for_each_file(td, f, i) {
836 f->file_offset = get_start_offset(td, f);
838 if (!o->file_size_low) {
840 * no file size range given, file size is equal to
841 * total size divided by number of files. If that is
842 * zero, set it to the real file size. If the size
843 * doesn't divide nicely with the min blocksize,
844 * make the first files bigger.
853 f->io_size = f->real_file_size - f->file_offset;
854 } else if (f->real_file_size < o->file_size_low ||
855 f->real_file_size > o->file_size_high) {
856 if (f->file_offset > o->file_size_low)
859 * file size given. if it's fixed, use that. if it's a
860 * range, generate a random size in-between.
862 if (o->file_size_low == o->file_size_high)
863 f->io_size = o->file_size_low - f->file_offset;
865 f->io_size = get_rand_file_size(td)
869 f->io_size = f->real_file_size - f->file_offset;
871 if (f->io_size == -1ULL)
875 f->io_size = (f->io_size * o->size_percent) / 100;
876 total_size += f->io_size;
879 if (f->filetype == FIO_TYPE_FILE &&
880 (f->io_size + f->file_offset) > f->real_file_size &&
881 !(td->io_ops->flags & FIO_DISKLESSIO)) {
882 if (!o->create_on_open) {
884 extend_size += (f->io_size + f->file_offset);
886 f->real_file_size = f->io_size + f->file_offset;
887 fio_file_set_extend(f);
891 if (td->o.block_error_hist) {
894 assert(td->o.nr_files == 1); /* checked in fixup_options */
896 len = f->io_size / td->o.bs[DDIR_TRIM];
897 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
898 log_err("fio: cannot calculate block histogram with "
899 "%d trim blocks, maximum %d\n",
900 len, MAX_NR_BLOCK_INFOS);
901 td_verror(td, EINVAL, "block_error_hist");
905 td->ts.nr_block_infos = len;
906 for (int i = 0; i < len; i++)
907 td->ts.block_infos[i] =
908 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
910 td->ts.nr_block_infos = 0;
912 if (!o->size || (total_size && o->size > total_size))
913 o->size = total_size;
915 if (o->size < td_min_bs(td)) {
916 log_err("fio: blocksize too large for data set\n");
921 * See if we need to extend some files
925 if (output_format == FIO_OUTPUT_NORMAL)
926 log_info("%s: Laying out IO file(s) (%u file(s) /"
927 " %lluMB)\n", o->name, need_extend,
930 for_each_file(td, f, i) {
931 unsigned long long old_len = -1ULL, extend_len = -1ULL;
933 if (!fio_file_extend(f))
936 assert(f->filetype == FIO_TYPE_FILE);
937 fio_file_clear_extend(f);
938 if (!o->fill_device) {
939 old_len = f->real_file_size;
940 extend_len = f->io_size + f->file_offset -
943 f->real_file_size = (f->io_size + f->file_offset);
944 err = extend_file(td, f);
948 err = __file_invalidate_cache(td, f, old_len,
952 * Shut up static checker
968 o->zone_size = o->size;
971 * iolog already set the total io size, if we read back
974 if (!o->read_iolog_file) {
976 td->total_io_size = o->io_limit * o->loops;
978 td->total_io_size = o->size * o->loops;
985 td_restore_runstate(td, old_state);
988 log_err("%s: you need to specify valid offset=\n", o->name);
990 td_restore_runstate(td, old_state);
994 int pre_read_files(struct thread_data *td)
999 dprint(FD_FILE, "pre_read files\n");
1001 for_each_file(td, f, i) {
1002 pre_read_file(td, f);
1008 static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1010 unsigned int range_size, seed;
1011 unsigned long nranges;
1014 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1015 fsize = min(f->real_file_size, f->io_size);
1017 nranges = (fsize + range_size - 1) / range_size;
1019 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1020 if (!td->o.rand_repeatable)
1021 seed = td->rand_seeds[4];
1023 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1024 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1025 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1026 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1027 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1028 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1033 static int init_rand_distribution(struct thread_data *td)
1039 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1042 state = td_bump_runstate(td, TD_SETTING_UP);
1044 for_each_file(td, f, i)
1045 __init_rand_distribution(td, f);
1047 td_restore_runstate(td, state);
1052 int init_random_map(struct thread_data *td)
1054 unsigned long long blocks;
1058 if (init_rand_distribution(td))
1063 for_each_file(td, f, i) {
1064 uint64_t fsize = min(f->real_file_size, f->io_size);
1066 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1068 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1071 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1073 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1074 fio_file_set_lfsr(f);
1077 } else if (!td->o.norandommap) {
1078 f->io_axmap = axmap_new(blocks);
1080 fio_file_set_axmap(f);
1083 } else if (td->o.norandommap)
1086 if (!td->o.softrandommap) {
1087 log_err("fio: failed allocating random map. If running"
1088 " a large number of jobs, try the 'norandommap'"
1089 " option or set 'softrandommap'. Or give"
1090 " a larger --alloc-size to fio.\n");
1094 log_info("fio: file %s failed allocating random map. Running "
1095 "job without.\n", f->file_name);
1101 void close_files(struct thread_data *td)
1106 for_each_file(td, f, i) {
1107 if (fio_file_open(f))
1108 td_io_close_file(td, f);
1112 void close_and_free_files(struct thread_data *td)
1117 dprint(FD_FILE, "close files\n");
1119 for_each_file(td, f, i) {
1120 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1121 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1122 td_io_unlink_file(td, f);
1125 if (fio_file_open(f))
1126 td_io_close_file(td, f);
1128 remove_file_hash(f);
1130 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1131 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1132 td_io_unlink_file(td, f);
1135 sfree(f->file_name);
1136 f->file_name = NULL;
1137 if (fio_file_axmap(f)) {
1138 axmap_free(f->io_axmap);
1144 td->o.filename = NULL;
1146 free(td->file_locks);
1147 td->files_index = 0;
1149 td->file_locks = NULL;
1150 td->o.file_lock_mode = FILE_LOCK_NONE;
1154 static void get_file_type(struct fio_file *f)
1158 if (!strcmp(f->file_name, "-"))
1159 f->filetype = FIO_TYPE_PIPE;
1161 f->filetype = FIO_TYPE_FILE;
1163 /* \\.\ is the device namespace in Windows, where every file is
1165 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1166 f->filetype = FIO_TYPE_BD;
1168 if (!stat(f->file_name, &sb)) {
1169 if (S_ISBLK(sb.st_mode))
1170 f->filetype = FIO_TYPE_BD;
1171 else if (S_ISCHR(sb.st_mode))
1172 f->filetype = FIO_TYPE_CHAR;
1173 else if (S_ISFIFO(sb.st_mode))
1174 f->filetype = FIO_TYPE_PIPE;
1178 static int __is_already_allocated(const char *fname)
1180 struct flist_head *entry;
1183 if (flist_empty(&filename_list))
1186 flist_for_each(entry, &filename_list) {
1187 filename = flist_entry(entry, struct file_name, list)->filename;
1189 if (strcmp(filename, fname) == 0)
1196 static int is_already_allocated(const char *fname)
1200 fio_file_hash_lock();
1201 ret = __is_already_allocated(fname);
1202 fio_file_hash_unlock();
1206 static void set_already_allocated(const char *fname)
1208 struct file_name *fn;
1210 fn = malloc(sizeof(struct file_name));
1211 fn->filename = strdup(fname);
1213 fio_file_hash_lock();
1214 if (!__is_already_allocated(fname)) {
1215 flist_add_tail(&fn->list, &filename_list);
1218 fio_file_hash_unlock();
1227 static void free_already_allocated(void)
1229 struct flist_head *entry, *tmp;
1230 struct file_name *fn;
1232 if (flist_empty(&filename_list))
1235 fio_file_hash_lock();
1236 flist_for_each_safe(entry, tmp, &filename_list) {
1237 fn = flist_entry(entry, struct file_name, list);
1239 flist_del(&fn->list);
1243 fio_file_hash_unlock();
1246 static struct fio_file *alloc_new_file(struct thread_data *td)
1250 f = smalloc(sizeof(*f));
1252 log_err("fio: smalloc OOM\n");
1259 fio_file_reset(td, f);
1263 int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1265 int cur_files = td->files_index;
1266 char file_name[PATH_MAX];
1270 dprint(FD_FILE, "add file %s\n", fname);
1272 if (td->o.directory)
1273 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob);
1275 sprintf(file_name + len, "%s", fname);
1277 /* clean cloned siblings using existing files */
1278 if (numjob && is_already_allocated(file_name))
1281 f = alloc_new_file(td);
1283 if (td->files_size <= td->files_index) {
1284 unsigned int new_size = td->o.nr_files + 1;
1286 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1288 td->files = realloc(td->files, new_size * sizeof(f));
1289 if (td->files == NULL) {
1290 log_err("fio: realloc OOM\n");
1293 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1294 td->file_locks = realloc(td->file_locks, new_size);
1295 if (!td->file_locks) {
1296 log_err("fio: realloc OOM\n");
1299 td->file_locks[cur_files] = FILE_LOCK_NONE;
1301 td->files_size = new_size;
1303 td->files[cur_files] = f;
1304 f->fileno = cur_files;
1307 * init function, io engine may not be loaded yet
1309 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1310 f->real_file_size = -1ULL;
1312 f->file_name = smalloc_strdup(file_name);
1313 if (!f->file_name) {
1314 log_err("fio: smalloc OOM\n");
1320 switch (td->o.file_lock_mode) {
1321 case FILE_LOCK_NONE:
1323 case FILE_LOCK_READWRITE:
1324 f->rwlock = fio_rwlock_init();
1326 case FILE_LOCK_EXCLUSIVE:
1327 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1330 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1335 if (f->filetype == FIO_TYPE_FILE)
1336 td->nr_normal_files++;
1338 set_already_allocated(file_name);
1343 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1349 int add_file_exclusive(struct thread_data *td, const char *fname)
1354 for_each_file(td, f, i) {
1355 if (!strcmp(f->file_name, fname))
1359 return add_file(td, fname, 0, 1);
1362 void get_file(struct fio_file *f)
1364 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1365 assert(fio_file_open(f));
1369 int put_file(struct thread_data *td, struct fio_file *f)
1371 int f_ret = 0, ret = 0;
1373 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1375 if (!fio_file_open(f)) {
1376 assert(f->fd == -1);
1380 assert(f->references);
1381 if (--f->references)
1384 if (should_fsync(td) && td->o.fsync_on_close) {
1385 f_ret = fsync(f->fd);
1390 if (td->io_ops->close_file)
1391 ret = td->io_ops->close_file(td, f);
1396 td->nr_open_files--;
1397 fio_file_clear_open(f);
1398 assert(f->fd == -1);
1402 void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1404 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1407 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1408 if (ddir == DDIR_READ)
1409 fio_rwlock_read(f->rwlock);
1411 fio_rwlock_write(f->rwlock);
1412 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1413 fio_mutex_down(f->lock);
1415 td->file_locks[f->fileno] = td->o.file_lock_mode;
1418 void unlock_file(struct thread_data *td, struct fio_file *f)
1420 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1423 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1424 fio_rwlock_unlock(f->rwlock);
1425 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1426 fio_mutex_up(f->lock);
1428 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1431 void unlock_file_all(struct thread_data *td, struct fio_file *f)
1433 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1435 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1439 static int recurse_dir(struct thread_data *td, const char *dirname)
1445 D = opendir(dirname);
1447 char buf[FIO_VERROR_SIZE];
1449 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1450 td_verror(td, errno, buf);
1454 while ((dir = readdir(D)) != NULL) {
1455 char full_path[PATH_MAX];
1458 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1461 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1463 if (lstat(full_path, &sb) == -1) {
1464 if (errno != ENOENT) {
1465 td_verror(td, errno, "stat");
1471 if (S_ISREG(sb.st_mode)) {
1472 add_file(td, full_path, 0, 1);
1475 if (!S_ISDIR(sb.st_mode))
1478 ret = recurse_dir(td, full_path);
1487 int add_dir_files(struct thread_data *td, const char *path)
1489 int ret = recurse_dir(td, path);
1492 log_info("fio: opendir added %d files\n", td->o.nr_files);
1497 void dup_files(struct thread_data *td, struct thread_data *org)
1502 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1507 td->files = malloc(org->files_index * sizeof(f));
1509 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1510 td->file_locks = malloc(org->files_index);
1512 for_each_file(org, f, i) {
1513 struct fio_file *__f;
1515 __f = alloc_new_file(td);
1518 __f->file_name = smalloc_strdup(f->file_name);
1519 if (!__f->file_name) {
1520 log_err("fio: smalloc OOM\n");
1524 __f->filetype = f->filetype;
1527 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1528 __f->lock = f->lock;
1529 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1530 __f->rwlock = f->rwlock;
1537 * Returns the index that matches the filename, or -1 if not there
1539 int get_fileno(struct thread_data *td, const char *fname)
1544 for_each_file(td, f, i)
1545 if (!strcmp(f->file_name, fname))
1552 * For log usage, where we add/open/close files automatically
1554 void free_release_files(struct thread_data *td)
1558 td->o.open_files = 0;
1559 td->files_index = 0;
1560 td->nr_normal_files = 0;
1563 void fio_file_reset(struct thread_data *td, struct fio_file *f)
1567 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1568 f->last_pos[i] = f->file_offset;
1569 f->last_start[i] = -1ULL;
1572 if (fio_file_axmap(f))
1573 axmap_reset(f->io_axmap);
1574 else if (fio_file_lfsr(f))
1575 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1578 int fio_files_done(struct thread_data *td)
1583 for_each_file(td, f, i)
1584 if (!fio_file_done(f))
1590 /* free memory used in initialization phase only */
1591 void filesetup_mem_free(void)
1593 free_already_allocated();