17 static inline void clear_error(struct thread_data *td)
24 * Leaves f->fd open on success, caller must close
26 static int extend_file(struct thread_data *td, struct fio_file *f)
28 int r, new_layout = 0, unlink_file = 0, flags;
29 unsigned long long left;
34 log_err("fio: refusing extend of file due to read-only\n");
39 * check if we need to lay the file out complete again. fio
40 * does that for operations involving reads, or for writes
41 * where overwrite is set
43 if (td_read(td) || (td_write(td) && td->o.overwrite) ||
44 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
46 if (td_write(td) && !td->o.overwrite)
49 if (unlink_file || new_layout) {
50 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
51 if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
52 td_verror(td, errno, "unlink");
57 flags = O_WRONLY | O_CREAT;
61 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
62 f->fd = open(f->file_name, flags, 0644);
64 td_verror(td, errno, "open");
68 #ifdef FIO_HAVE_FALLOCATE
69 if (td->o.fallocate && !td->o.fill_device) {
70 dprint(FD_FILE, "fallocate file %s size %llu\n", f->file_name,
73 r = posix_fallocate(f->fd, 0, f->real_file_size);
75 log_err("fio: posix_fallocate fails: %s\n",
85 * The size will be -1ULL when fill_device is used, so don't truncate
86 * or fallocate this file, just write it
88 if (!td->o.fill_device) {
89 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
91 if (ftruncate(f->fd, f->real_file_size) == -1) {
92 td_verror(td, errno, "ftruncate");
97 b = malloc(td->o.max_bs[DDIR_WRITE]);
98 memset(b, 0, td->o.max_bs[DDIR_WRITE]);
100 left = f->real_file_size;
101 while (left && !td->terminate) {
102 bs = td->o.max_bs[DDIR_WRITE];
106 r = write(f->fd, b, bs);
116 if (td->o.fill_device)
118 log_info("fio: ENOSPC on laying out "
122 td_verror(td, errno, "write");
124 td_verror(td, EIO, "write");
131 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
132 unlink(f->file_name);
133 } else if (td->o.create_fsync) {
134 if (fsync(f->fd) < 0) {
135 td_verror(td, errno, "fsync");
139 if (td->o.fill_device && !td_write(td)) {
140 fio_file_clear_size_known(f);
141 if (td_io_get_file_size(td, f))
143 if (f->io_size > f->real_file_size)
144 f->io_size = f->real_file_size;
156 static int pre_read_file(struct thread_data *td, struct fio_file *f)
158 int r, did_open = 0, old_runstate;
159 unsigned long long left;
163 if (td->io_ops->flags & FIO_PIPEIO)
166 if (!fio_file_open(f)) {
167 if (td->io_ops->open_file(td, f)) {
168 log_err("fio: cannot pre-read, failed to open file\n");
174 old_runstate = td->runstate;
175 td_set_runstate(td, TD_PRE_READING);
177 bs = td->o.max_bs[DDIR_READ];
181 lseek(f->fd, f->file_offset, SEEK_SET);
184 while (left && !td->terminate) {
188 r = read(f->fd, b, bs);
194 td_verror(td, EIO, "pre_read");
199 td_set_runstate(td, old_runstate);
202 td->io_ops->close_file(td, f);
207 static unsigned long long get_rand_file_size(struct thread_data *td)
209 unsigned long long ret, sized;
212 r = os_random_long(&td->file_size_state);
213 sized = td->o.file_size_high - td->o.file_size_low;
214 ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0)));
215 ret += td->o.file_size_low;
216 ret -= (ret % td->o.rw_min_bs);
220 static int file_size(struct thread_data *td, struct fio_file *f)
224 if (stat(f->file_name, &st) == -1) {
225 td_verror(td, errno, "fstat");
229 f->real_file_size = st.st_size;
233 static int bdev_size(struct thread_data *td, struct fio_file *f)
235 unsigned long long bytes;
238 if (td->io_ops->open_file(td, f)) {
239 log_err("fio: failed opening blockdev %s for size check\n",
244 r = blockdev_size(f->fd, &bytes);
246 td_verror(td, r, "blockdev_size");
251 log_err("%s: zero sized block device?\n", f->file_name);
255 f->real_file_size = bytes;
256 td->io_ops->close_file(td, f);
259 td->io_ops->close_file(td, f);
263 static int get_file_size(struct thread_data *td, struct fio_file *f)
267 if (fio_file_size_known(f))
270 if (f->filetype == FIO_TYPE_FILE)
271 ret = file_size(td, f);
272 else if (f->filetype == FIO_TYPE_BD)
273 ret = bdev_size(td, f);
275 f->real_file_size = -1;
280 if (f->file_offset > f->real_file_size) {
281 log_err("%s: offset extends end (%Lu > %Lu)\n", td->o.name,
282 f->file_offset, f->real_file_size);
286 fio_file_set_size_known(f);
290 static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
291 unsigned long long off,
292 unsigned long long len)
299 off = f->file_offset;
301 if (len == -1ULL || off == -1ULL)
304 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
308 * FIXME: add blockdev flushing too
311 ret = madvise(f->mmap_ptr, f->mmap_sz, MADV_DONTNEED);
313 (void) madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE);
315 } else if (f->filetype == FIO_TYPE_FILE) {
316 ret = fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
317 } else if (f->filetype == FIO_TYPE_BD) {
318 ret = blockdev_invalidate_cache(f->fd);
319 if (ret < 0 && errno == EACCES && geteuid()) {
321 log_err("fio: only root may flush block "
322 "devices. Cache flush bypassed!\n");
327 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
331 td_verror(td, errno, "invalidate_cache");
333 } else if (ret > 0) {
334 td_verror(td, ret, "invalidate_cache");
342 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
344 if (!fio_file_open(f))
347 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
350 int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
354 dprint(FD_FILE, "fd close %s\n", f->file_name);
358 if (close(f->fd) < 0)
365 static int file_lookup_open(struct fio_file *f, int flags)
367 struct fio_file *__f;
370 __f = lookup_file_hash(f->file_name);
372 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
374 * racy, need the __f->lock locked
377 f->lock_owner = __f->lock_owner;
378 f->lock_batch = __f->lock_batch;
379 f->lock_ddir = __f->lock_ddir;
382 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
386 f->fd = open(f->file_name, flags, 0600);
390 int generic_open_file(struct thread_data *td, struct fio_file *f)
396 dprint(FD_FILE, "fd open %s\n", f->file_name);
398 if (!strcmp(f->file_name, "-")) {
400 log_err("fio: can't read/write to stdin/out\n");
406 * move output logging to stderr, if we are writing to stdout
413 flags |= OS_O_DIRECT;
416 if (f->filetype != FIO_TYPE_FILE)
417 flags |= FIO_O_NOATIME;
418 if (td->o.create_on_open)
426 if (f->filetype == FIO_TYPE_FILE)
430 f->fd = dup(STDOUT_FILENO);
432 from_hash = file_lookup_open(f, flags);
434 if (f->filetype == FIO_TYPE_CHAR && !read_only)
440 f->fd = dup(STDIN_FILENO);
442 from_hash = file_lookup_open(f, flags);
446 char buf[FIO_VERROR_SIZE];
449 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
450 flags &= ~FIO_O_NOATIME;
454 snprintf(buf, sizeof(buf) - 1, "open(%s)", f->file_name);
456 td_verror(td, __e, buf);
459 if (!from_hash && f->fd != -1) {
460 if (add_file_hash(f)) {
464 * OK to ignore, we haven't done anything with it
466 ret = generic_close_file(td, f);
474 int generic_get_file_size(struct thread_data *td, struct fio_file *f)
476 return get_file_size(td, f);
480 * open/close all files, so that ->real_file_size gets set
482 static int get_file_sizes(struct thread_data *td)
488 for_each_file(td, f, i) {
489 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
492 if (td_io_get_file_size(td, f)) {
493 if (td->error != ENOENT) {
494 log_err("%s\n", td->verror);
500 if (f->real_file_size == -1ULL && td->o.size)
501 f->real_file_size = td->o.size / td->o.nr_files;
508 struct flist_head list;
515 * Get free number of bytes for each file on each unique mount.
517 static unsigned long long get_fs_free_counts(struct thread_data *td)
519 struct flist_head *n, *tmp;
520 unsigned long long ret;
521 struct fio_mount *fm;
526 for_each_file(td, f, i) {
530 strcpy(buf, f->file_name);
532 if (stat(buf, &sb) < 0) {
536 if (stat(buf, &sb) < 0)
541 flist_for_each(n, &list) {
542 fm = flist_entry(n, struct fio_mount, list);
543 if (fm->key == sb.st_dev)
552 fm = malloc(sizeof(*fm));
553 strcpy(fm->__base, buf);
554 fm->base = basename(fm->__base);
556 flist_add(&fm->list, &list);
560 flist_for_each_safe(n, tmp, &list) {
561 unsigned long long sz;
563 fm = flist_entry(n, struct fio_mount, list);
564 flist_del(&fm->list);
566 sz = get_fs_size(fm->base);
567 if (sz && sz != -1ULL)
577 * Open the files and setup files sizes, creating files if necessary.
579 int setup_files(struct thread_data *td)
581 unsigned long long total_size, extend_size;
584 int err = 0, need_extend;
586 dprint(FD_FILE, "setup files\n");
588 if (td->o.read_iolog_file)
592 * if ioengine defines a setup() method, it's responsible for
593 * opening the files and setting f->real_file_size to indicate
594 * the valid range for that file.
596 if (td->io_ops->setup)
597 err = td->io_ops->setup(td);
599 err = get_file_sizes(td);
605 * check sizes. if the files/devices do not exist and the size
606 * isn't passed to fio, abort.
609 for_each_file(td, f, i) {
610 if (f->real_file_size == -1ULL)
613 total_size += f->real_file_size;
616 if (td->o.fill_device)
617 td->fill_device_size = get_fs_free_counts(td);
620 * device/file sizes are zero and no size given, punt
622 if ((!total_size || total_size == -1ULL) && !td->o.size &&
623 !(td->io_ops->flags & FIO_NOIO) && !td->o.fill_device) {
624 log_err("%s: you need to specify size=\n", td->o.name);
625 td_verror(td, EINVAL, "total_file_size");
630 * now file sizes are known, so we can set ->io_size. if size= is
631 * not given, ->io_size is just equal to ->real_file_size. if size
632 * is given, ->io_size is size / nr_files.
634 extend_size = total_size = 0;
636 for_each_file(td, f, i) {
637 f->file_offset = td->o.start_offset;
639 if (!td->o.file_size_low) {
641 * no file size range given, file size is equal to
642 * total size divided by number of files. if that is
643 * zero, set it to the real file size.
645 f->io_size = td->o.size / td->o.nr_files;
647 f->io_size = f->real_file_size - f->file_offset;
648 } else if (f->real_file_size < td->o.file_size_low ||
649 f->real_file_size > td->o.file_size_high) {
650 if (f->file_offset > td->o.file_size_low)
653 * file size given. if it's fixed, use that. if it's a
654 * range, generate a random size in-between.
656 if (td->o.file_size_low == td->o.file_size_high) {
657 f->io_size = td->o.file_size_low
660 f->io_size = get_rand_file_size(td)
664 f->io_size = f->real_file_size - f->file_offset;
666 if (f->io_size == -1ULL)
669 total_size += f->io_size;
671 if (f->filetype == FIO_TYPE_FILE &&
672 (f->io_size + f->file_offset) > f->real_file_size &&
673 !(td->io_ops->flags & FIO_DISKLESSIO)) {
674 if (!td->o.create_on_open) {
676 extend_size += (f->io_size + f->file_offset);
678 f->real_file_size = f->io_size + f->file_offset;
679 fio_file_set_extend(f);
683 if (!td->o.size || td->o.size > total_size)
684 td->o.size = total_size;
687 * See if we need to extend some files
692 log_info("%s: Laying out IO file(s) (%u file(s) /"
693 " %LuMB)\n", td->o.name, need_extend,
696 for_each_file(td, f, i) {
697 unsigned long long old_len = -1ULL, extend_len = -1ULL;
699 if (!fio_file_extend(f))
702 assert(f->filetype == FIO_TYPE_FILE);
703 fio_file_clear_extend(f);
704 if (!td->o.fill_device) {
705 old_len = f->real_file_size;
706 extend_len = f->io_size + f->file_offset -
709 f->real_file_size = (f->io_size + f->file_offset);
710 err = extend_file(td, f);
714 err = __file_invalidate_cache(td, f, old_len,
727 if (!td->o.zone_size)
728 td->o.zone_size = td->o.size;
731 * iolog already set the total io size, if we read back
734 if (!td->o.read_iolog_file)
735 td->total_io_size = td->o.size * td->o.loops;
738 log_err("%s: you need to specify valid offset=\n", td->o.name);
742 int pre_read_files(struct thread_data *td)
747 dprint(FD_FILE, "pre_read files\n");
749 for_each_file(td, f, i) {
750 pre_read_file(td, f);
756 int init_random_map(struct thread_data *td)
758 unsigned long long blocks, num_maps;
762 if (td->o.norandommap || !td_random(td))
765 for_each_file(td, f, i) {
766 blocks = (f->real_file_size + td->o.rw_min_bs - 1) /
767 (unsigned long long) td->o.rw_min_bs;
768 num_maps = (blocks + BLOCKS_PER_MAP - 1) /
769 (unsigned long long) BLOCKS_PER_MAP;
770 f->file_map = smalloc(num_maps * sizeof(int));
772 f->num_maps = num_maps;
775 if (!td->o.softrandommap) {
776 log_err("fio: failed allocating random map. If running"
777 " a large number of jobs, try the 'norandommap'"
778 " option or set 'softrandommap'. Or give"
779 " a larger --alloc-size to fio.\n");
783 log_info("fio: file %s failed allocating random map. Running "
784 "job without.\n", f->file_name);
791 void close_files(struct thread_data *td)
796 for_each_file(td, f, i) {
797 if (fio_file_open(f))
798 td_io_close_file(td, f);
802 void close_and_free_files(struct thread_data *td)
807 dprint(FD_FILE, "close files\n");
809 for_each_file(td, f, i) {
810 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
811 dprint(FD_FILE, "free unlink %s\n", f->file_name);
812 unlink(f->file_name);
815 if (fio_file_open(f))
816 td_io_close_file(td, f);
827 td->o.filename = NULL;
834 static void get_file_type(struct fio_file *f)
838 if (!strcmp(f->file_name, "-"))
839 f->filetype = FIO_TYPE_PIPE;
841 f->filetype = FIO_TYPE_FILE;
843 if (!stat(f->file_name, &sb)) {
844 if (S_ISBLK(sb.st_mode))
845 f->filetype = FIO_TYPE_BD;
846 else if (S_ISCHR(sb.st_mode))
847 f->filetype = FIO_TYPE_CHAR;
848 else if (S_ISFIFO(sb.st_mode))
849 f->filetype = FIO_TYPE_PIPE;
853 int add_file(struct thread_data *td, const char *fname)
855 int cur_files = td->files_index;
856 char file_name[PATH_MAX];
860 dprint(FD_FILE, "add file %s\n", fname);
862 f = smalloc(sizeof(*f));
864 log_err("fio: smalloc OOM\n");
870 if (td->files_size <= td->files_index) {
871 int new_size = td->o.nr_files + 1;
873 dprint(FD_FILE, "resize file array to %d files\n", new_size);
875 td->files = realloc(td->files, new_size * sizeof(f));
876 td->files_size = new_size;
878 td->files[cur_files] = f;
881 * init function, io engine may not be loaded yet
883 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
884 f->real_file_size = -1ULL;
887 len = sprintf(file_name, "%s/", td->o.directory);
889 sprintf(file_name + len, "%s", fname);
890 f->file_name = smalloc_strdup(file_name);
892 log_err("fio: smalloc OOM\n");
898 switch (td->o.file_lock_mode) {
901 case FILE_LOCK_READWRITE:
902 f->lock = fio_mutex_rw_init();
904 case FILE_LOCK_EXCLUSIVE:
905 f->lock = fio_mutex_init(1);
908 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
913 if (f->filetype == FIO_TYPE_FILE)
914 td->nr_normal_files++;
916 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
922 void get_file(struct fio_file *f)
924 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
925 assert(fio_file_open(f));
929 int put_file(struct thread_data *td, struct fio_file *f)
931 int f_ret = 0, ret = 0;
933 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
935 if (!fio_file_open(f)) {
940 assert(f->references);
944 if (should_fsync(td) && td->o.fsync_on_close)
945 f_ret = fsync(f->fd);
947 if (td->io_ops->close_file)
948 ret = td->io_ops->close_file(td, f);
954 fio_file_clear_open(f);
959 void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
961 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
964 if (f->lock_owner == td && f->lock_batch--)
967 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
968 if (ddir == DDIR_READ)
969 fio_mutex_down_read(f->lock);
971 fio_mutex_down_write(f->lock);
972 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
973 fio_mutex_down(f->lock);
976 f->lock_batch = td->o.lockfile_batch;
980 void unlock_file(struct thread_data *td, struct fio_file *f)
982 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
987 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
988 const int is_read = f->lock_ddir == DDIR_READ;
989 int val = fio_mutex_getval(f->lock);
991 if ((is_read && val == 1) || (!is_read && val == -1))
992 f->lock_owner = NULL;
995 fio_mutex_up_read(f->lock);
997 fio_mutex_up_write(f->lock);
998 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) {
999 int val = fio_mutex_getval(f->lock);
1002 f->lock_owner = NULL;
1004 fio_mutex_up(f->lock);
1008 void unlock_file_all(struct thread_data *td, struct fio_file *f)
1010 if (f->lock_owner != td)
1017 static int recurse_dir(struct thread_data *td, const char *dirname)
1023 D = opendir(dirname);
1025 char buf[FIO_VERROR_SIZE];
1027 snprintf(buf, FIO_VERROR_SIZE - 1, "opendir(%s)", dirname);
1028 td_verror(td, errno, buf);
1032 while ((dir = readdir(D)) != NULL) {
1033 char full_path[PATH_MAX];
1036 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1039 sprintf(full_path, "%s/%s", dirname, dir->d_name);
1041 if (lstat(full_path, &sb) == -1) {
1042 if (errno != ENOENT) {
1043 td_verror(td, errno, "stat");
1048 if (S_ISREG(sb.st_mode)) {
1049 add_file(td, full_path);
1053 if (!S_ISDIR(sb.st_mode))
1056 ret = recurse_dir(td, full_path);
1065 int add_dir_files(struct thread_data *td, const char *path)
1067 int ret = recurse_dir(td, path);
1070 log_info("fio: opendir added %d files\n", td->o.nr_files);
1075 void dup_files(struct thread_data *td, struct thread_data *org)
1080 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1085 td->files = malloc(org->files_index * sizeof(f));
1087 for_each_file(org, f, i) {
1088 struct fio_file *__f;
1090 __f = smalloc(sizeof(*__f));
1092 log_err("fio: smalloc OOM\n");
1098 __f->file_name = smalloc_strdup(f->file_name);
1099 if (!__f->file_name) {
1100 log_err("fio: smalloc OOM\n");
1104 __f->filetype = f->filetype;
1112 * Returns the index that matches the filename, or -1 if not there
1114 int get_fileno(struct thread_data *td, const char *fname)
1119 for_each_file(td, f, i)
1120 if (!strcmp(f->file_name, fname))
1127 * For log usage, where we add/open/close files automatically
1129 void free_release_files(struct thread_data *td)
1132 td->files_index = 0;
1133 td->nr_normal_files = 0;