16 static inline void clear_error(struct thread_data *td)
23 * Leaves f->fd open on success, caller must close
25 static int extend_file(struct thread_data *td, struct fio_file *f)
27 int r, new_layout = 0, unlink_file = 0, flags;
28 unsigned long long left;
33 log_err("fio: refusing extend of file due to read-only\n");
38 * check if we need to lay the file out complete again. fio
39 * does that for operations involving reads, or for writes
40 * where overwrite is set
42 if (td_read(td) || (td_write(td) && td->o.overwrite) ||
43 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
45 if (td_write(td) && !td->o.overwrite)
48 if (unlink_file || new_layout) {
49 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
50 if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
51 td_verror(td, errno, "unlink");
56 flags = O_WRONLY | O_CREAT;
60 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
61 f->fd = open(f->file_name, flags, 0644);
63 td_verror(td, errno, "open");
67 #ifdef FIO_HAVE_FALLOCATE
68 if (td->o.fallocate && !td->o.fill_device) {
69 dprint(FD_FILE, "fallocate file %s size %llu\n", f->file_name,
72 r = posix_fallocate(f->fd, 0, f->real_file_size);
74 log_err("fio: posix_fallocate fails: %s\n",
84 * The size will be -1ULL when fill_device is used, so don't truncate
85 * or fallocate this file, just write it
87 if (!td->o.fill_device) {
88 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
90 if (ftruncate(f->fd, f->real_file_size) == -1) {
91 td_verror(td, errno, "ftruncate");
96 b = malloc(td->o.max_bs[DDIR_WRITE]);
97 memset(b, 0, td->o.max_bs[DDIR_WRITE]);
99 left = f->real_file_size;
100 while (left && !td->terminate) {
101 bs = td->o.max_bs[DDIR_WRITE];
105 r = write(f->fd, b, bs);
115 if (td->o.fill_device)
117 log_info("fio: ENOSPC on laying out "
121 td_verror(td, errno, "write");
123 td_verror(td, EIO, "write");
130 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
131 unlink(f->file_name);
132 } else if (td->o.create_fsync) {
133 if (fsync(f->fd) < 0) {
134 td_verror(td, errno, "fsync");
138 if (td->o.fill_device && !td_write(td)) {
139 fio_file_clear_size_known(f);
140 if (td_io_get_file_size(td, f))
142 if (f->io_size > f->real_file_size)
143 f->io_size = f->real_file_size;
155 static int pre_read_file(struct thread_data *td, struct fio_file *f)
157 int r, did_open = 0, old_runstate;
158 unsigned long long left;
162 if (td->io_ops->flags & FIO_PIPEIO)
165 if (!fio_file_open(f)) {
166 if (td->io_ops->open_file(td, f)) {
167 log_err("fio: cannot pre-read, failed to open file\n");
173 old_runstate = td->runstate;
174 td_set_runstate(td, TD_PRE_READING);
176 bs = td->o.max_bs[DDIR_READ];
180 lseek(f->fd, f->file_offset, SEEK_SET);
183 while (left && !td->terminate) {
187 r = read(f->fd, b, bs);
193 td_verror(td, EIO, "pre_read");
198 td_set_runstate(td, old_runstate);
201 td->io_ops->close_file(td, f);
206 static unsigned long long get_rand_file_size(struct thread_data *td)
208 unsigned long long ret, sized;
211 r = os_random_long(&td->file_size_state);
212 sized = td->o.file_size_high - td->o.file_size_low;
213 ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0)));
214 ret += td->o.file_size_low;
215 ret -= (ret % td->o.rw_min_bs);
219 static int file_size(struct thread_data *td, struct fio_file *f)
223 if (stat(f->file_name, &st) == -1) {
224 td_verror(td, errno, "fstat");
228 f->real_file_size = st.st_size;
232 static int bdev_size(struct thread_data *td, struct fio_file *f)
234 unsigned long long bytes;
237 if (td->io_ops->open_file(td, f)) {
238 log_err("fio: failed opening blockdev %s for size check\n",
243 r = blockdev_size(f->fd, &bytes);
245 td_verror(td, r, "blockdev_size");
250 log_err("%s: zero sized block device?\n", f->file_name);
254 f->real_file_size = bytes;
255 td->io_ops->close_file(td, f);
258 td->io_ops->close_file(td, f);
262 static int get_file_size(struct thread_data *td, struct fio_file *f)
266 if (fio_file_size_known(f))
269 if (f->filetype == FIO_TYPE_FILE)
270 ret = file_size(td, f);
271 else if (f->filetype == FIO_TYPE_BD)
272 ret = bdev_size(td, f);
274 f->real_file_size = -1;
279 if (f->file_offset > f->real_file_size) {
280 log_err("%s: offset extends end (%Lu > %Lu)\n", td->o.name,
281 f->file_offset, f->real_file_size);
285 fio_file_set_size_known(f);
289 static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
290 unsigned long long off,
291 unsigned long long len)
298 off = f->file_offset;
300 if (len == -1ULL || off == -1ULL)
303 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
307 * FIXME: add blockdev flushing too
310 ret = madvise(f->mmap_ptr, f->mmap_sz, MADV_DONTNEED);
312 (void) madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE);
314 } else if (f->filetype == FIO_TYPE_FILE) {
315 ret = fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
316 } else if (f->filetype == FIO_TYPE_BD) {
317 ret = blockdev_invalidate_cache(f->fd);
318 if (ret < 0 && errno == EACCES && geteuid()) {
320 log_err("fio: only root may flush block "
321 "devices. Cache flush bypassed!\n");
326 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
330 td_verror(td, errno, "invalidate_cache");
332 } else if (ret > 0) {
333 td_verror(td, ret, "invalidate_cache");
341 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
343 if (!fio_file_open(f))
346 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
349 int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
353 dprint(FD_FILE, "fd close %s\n", f->file_name);
357 if (close(f->fd) < 0)
364 static int file_lookup_open(struct fio_file *f, int flags)
366 struct fio_file *__f;
369 __f = lookup_file_hash(f->file_name);
371 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
373 * racy, need the __f->lock locked
376 f->lock_owner = __f->lock_owner;
377 f->lock_batch = __f->lock_batch;
378 f->lock_ddir = __f->lock_ddir;
381 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
385 f->fd = open(f->file_name, flags, 0600);
389 int generic_open_file(struct thread_data *td, struct fio_file *f)
395 dprint(FD_FILE, "fd open %s\n", f->file_name);
397 if (!strcmp(f->file_name, "-")) {
399 log_err("fio: can't read/write to stdin/out\n");
405 * move output logging to stderr, if we are writing to stdout
412 flags |= OS_O_DIRECT;
415 if (f->filetype != FIO_TYPE_FILE)
416 flags |= FIO_O_NOATIME;
417 if (td->o.create_on_open)
425 if (f->filetype == FIO_TYPE_FILE)
429 f->fd = dup(STDOUT_FILENO);
431 from_hash = file_lookup_open(f, flags);
433 if (f->filetype == FIO_TYPE_CHAR && !read_only)
439 f->fd = dup(STDIN_FILENO);
441 from_hash = file_lookup_open(f, flags);
445 char buf[FIO_VERROR_SIZE];
448 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
449 flags &= ~FIO_O_NOATIME;
453 snprintf(buf, sizeof(buf) - 1, "open(%s)", f->file_name);
455 td_verror(td, __e, buf);
458 if (!from_hash && f->fd != -1) {
459 if (add_file_hash(f)) {
463 * OK to ignore, we haven't done anything with it
465 ret = generic_close_file(td, f);
473 int generic_get_file_size(struct thread_data *td, struct fio_file *f)
475 return get_file_size(td, f);
479 * open/close all files, so that ->real_file_size gets set
481 static int get_file_sizes(struct thread_data *td)
487 for_each_file(td, f, i) {
488 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
491 if (td_io_get_file_size(td, f)) {
492 if (td->error != ENOENT) {
493 log_err("%s\n", td->verror);
499 if (f->real_file_size == -1ULL && td->o.size)
500 f->real_file_size = td->o.size / td->o.nr_files;
507 struct flist_head list;
514 * Get free number of bytes for each file on each unique mount.
516 static unsigned long long get_fs_free_counts(struct thread_data *td)
518 struct flist_head *n, *tmp;
519 unsigned long long ret;
520 struct fio_mount *fm;
525 for_each_file(td, f, i) {
529 strcpy(buf, f->file_name);
531 if (stat(buf, &sb) < 0) {
535 if (stat(buf, &sb) < 0)
540 flist_for_each(n, &list) {
541 fm = flist_entry(n, struct fio_mount, list);
542 if (fm->key == sb.st_dev)
551 fm = malloc(sizeof(*fm));
552 strcpy(fm->__base, buf);
553 fm->base = basename(fm->__base);
555 flist_add(&fm->list, &list);
559 flist_for_each_safe(n, tmp, &list) {
560 unsigned long long sz;
562 fm = flist_entry(n, struct fio_mount, list);
563 flist_del(&fm->list);
565 sz = get_fs_size(fm->base);
566 if (sz && sz != -1ULL)
576 * Open the files and setup files sizes, creating files if necessary.
578 int setup_files(struct thread_data *td)
580 unsigned long long total_size, extend_size;
583 int err = 0, need_extend;
585 dprint(FD_FILE, "setup files\n");
587 if (td->o.read_iolog_file)
591 * if ioengine defines a setup() method, it's responsible for
592 * opening the files and setting f->real_file_size to indicate
593 * the valid range for that file.
595 if (td->io_ops->setup)
596 err = td->io_ops->setup(td);
598 err = get_file_sizes(td);
604 * check sizes. if the files/devices do not exist and the size
605 * isn't passed to fio, abort.
608 for_each_file(td, f, i) {
609 if (f->real_file_size == -1ULL)
612 total_size += f->real_file_size;
615 if (td->o.fill_device)
616 td->fill_device_size = get_fs_free_counts(td);
619 * device/file sizes are zero and no size given, punt
621 if ((!total_size || total_size == -1ULL) && !td->o.size &&
622 !(td->io_ops->flags & FIO_NOIO) && !td->o.fill_device) {
623 log_err("%s: you need to specify size=\n", td->o.name);
624 td_verror(td, EINVAL, "total_file_size");
629 * now file sizes are known, so we can set ->io_size. if size= is
630 * not given, ->io_size is just equal to ->real_file_size. if size
631 * is given, ->io_size is size / nr_files.
633 extend_size = total_size = 0;
635 for_each_file(td, f, i) {
636 f->file_offset = td->o.start_offset;
638 if (!td->o.file_size_low) {
640 * no file size range given, file size is equal to
641 * total size divided by number of files. if that is
642 * zero, set it to the real file size.
644 f->io_size = td->o.size / td->o.nr_files;
646 f->io_size = f->real_file_size - f->file_offset;
647 } else if (f->real_file_size < td->o.file_size_low ||
648 f->real_file_size > td->o.file_size_high) {
649 if (f->file_offset > td->o.file_size_low)
652 * file size given. if it's fixed, use that. if it's a
653 * range, generate a random size in-between.
655 if (td->o.file_size_low == td->o.file_size_high) {
656 f->io_size = td->o.file_size_low
659 f->io_size = get_rand_file_size(td)
663 f->io_size = f->real_file_size - f->file_offset;
665 if (f->io_size == -1ULL)
668 total_size += f->io_size;
670 if (f->filetype == FIO_TYPE_FILE &&
671 (f->io_size + f->file_offset) > f->real_file_size &&
672 !(td->io_ops->flags & FIO_DISKLESSIO)) {
673 if (!td->o.create_on_open) {
675 extend_size += (f->io_size + f->file_offset);
677 f->real_file_size = f->io_size + f->file_offset;
678 fio_file_set_extend(f);
682 if (!td->o.size || td->o.size > total_size)
683 td->o.size = total_size;
686 * See if we need to extend some files
691 log_info("%s: Laying out IO file(s) (%u file(s) /"
692 " %LuMB)\n", td->o.name, need_extend,
695 for_each_file(td, f, i) {
696 unsigned long long old_len = -1ULL, extend_len = -1ULL;
698 if (!fio_file_extend(f))
701 assert(f->filetype == FIO_TYPE_FILE);
702 fio_file_clear_extend(f);
703 if (!td->o.fill_device) {
704 old_len = f->real_file_size;
705 extend_len = f->io_size + f->file_offset -
708 f->real_file_size = (f->io_size + f->file_offset);
709 err = extend_file(td, f);
713 err = __file_invalidate_cache(td, f, old_len,
726 if (!td->o.zone_size)
727 td->o.zone_size = td->o.size;
730 * iolog already set the total io size, if we read back
733 if (!td->o.read_iolog_file)
734 td->total_io_size = td->o.size * td->o.loops;
737 log_err("%s: you need to specify valid offset=\n", td->o.name);
741 int pre_read_files(struct thread_data *td)
746 dprint(FD_FILE, "pre_read files\n");
748 for_each_file(td, f, i) {
749 pre_read_file(td, f);
755 int init_random_map(struct thread_data *td)
757 unsigned long long blocks, num_maps;
761 if (td->o.norandommap || !td_random(td))
764 for_each_file(td, f, i) {
765 blocks = (f->real_file_size + td->o.rw_min_bs - 1) /
766 (unsigned long long) td->o.rw_min_bs;
767 num_maps = (blocks + BLOCKS_PER_MAP - 1) /
768 (unsigned long long) BLOCKS_PER_MAP;
769 f->file_map = smalloc(num_maps * sizeof(int));
771 f->num_maps = num_maps;
774 if (!td->o.softrandommap) {
775 log_err("fio: failed allocating random map. If running"
776 " a large number of jobs, try the 'norandommap'"
777 " option or set 'softrandommap'. Or give"
778 " a larger --alloc-size to fio.\n");
782 log_info("fio: file %s failed allocating random map. Running "
783 "job without.\n", f->file_name);
790 void close_files(struct thread_data *td)
795 for_each_file(td, f, i) {
796 if (fio_file_open(f))
797 td_io_close_file(td, f);
801 void close_and_free_files(struct thread_data *td)
806 dprint(FD_FILE, "close files\n");
808 for_each_file(td, f, i) {
809 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
810 dprint(FD_FILE, "free unlink %s\n", f->file_name);
811 unlink(f->file_name);
814 if (fio_file_open(f))
815 td_io_close_file(td, f);
826 td->o.filename = NULL;
833 static void get_file_type(struct fio_file *f)
837 if (!strcmp(f->file_name, "-"))
838 f->filetype = FIO_TYPE_PIPE;
840 f->filetype = FIO_TYPE_FILE;
842 if (!stat(f->file_name, &sb)) {
843 if (S_ISBLK(sb.st_mode))
844 f->filetype = FIO_TYPE_BD;
845 else if (S_ISCHR(sb.st_mode))
846 f->filetype = FIO_TYPE_CHAR;
847 else if (S_ISFIFO(sb.st_mode))
848 f->filetype = FIO_TYPE_PIPE;
852 int add_file(struct thread_data *td, const char *fname)
854 int cur_files = td->files_index;
855 char file_name[PATH_MAX];
859 dprint(FD_FILE, "add file %s\n", fname);
861 f = smalloc(sizeof(*f));
863 log_err("fio: smalloc OOM\n");
869 if (td->files_size <= td->files_index) {
870 int new_size = td->o.nr_files + 1;
872 dprint(FD_FILE, "resize file array to %d files\n", new_size);
874 td->files = realloc(td->files, new_size * sizeof(f));
875 td->files_size = new_size;
877 td->files[cur_files] = f;
880 * init function, io engine may not be loaded yet
882 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
883 f->real_file_size = -1ULL;
886 len = sprintf(file_name, "%s/", td->o.directory);
888 sprintf(file_name + len, "%s", fname);
889 f->file_name = smalloc_strdup(file_name);
891 log_err("fio: smalloc OOM\n");
897 switch (td->o.file_lock_mode) {
900 case FILE_LOCK_READWRITE:
901 f->lock = fio_mutex_rw_init();
903 case FILE_LOCK_EXCLUSIVE:
904 f->lock = fio_mutex_init(1);
907 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
912 if (f->filetype == FIO_TYPE_FILE)
913 td->nr_normal_files++;
915 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
921 void get_file(struct fio_file *f)
923 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
924 assert(fio_file_open(f));
928 int put_file(struct thread_data *td, struct fio_file *f)
930 int f_ret = 0, ret = 0;
932 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
934 if (!fio_file_open(f)) {
939 assert(f->references);
943 if (should_fsync(td) && td->o.fsync_on_close)
944 f_ret = fsync(f->fd);
946 if (td->io_ops->close_file)
947 ret = td->io_ops->close_file(td, f);
953 fio_file_clear_open(f);
958 void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
960 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
963 if (f->lock_owner == td && f->lock_batch--)
966 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
967 if (ddir == DDIR_READ)
968 fio_mutex_down_read(f->lock);
970 fio_mutex_down_write(f->lock);
971 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
972 fio_mutex_down(f->lock);
975 f->lock_batch = td->o.lockfile_batch;
979 void unlock_file(struct thread_data *td, struct fio_file *f)
981 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
986 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
987 const int is_read = f->lock_ddir == DDIR_READ;
988 int val = fio_mutex_getval(f->lock);
990 if ((is_read && val == 1) || (!is_read && val == -1))
991 f->lock_owner = NULL;
994 fio_mutex_up_read(f->lock);
996 fio_mutex_up_write(f->lock);
997 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) {
998 int val = fio_mutex_getval(f->lock);
1001 f->lock_owner = NULL;
1003 fio_mutex_up(f->lock);
1007 void unlock_file_all(struct thread_data *td, struct fio_file *f)
1009 if (f->lock_owner != td)
1016 static int recurse_dir(struct thread_data *td, const char *dirname)
1022 D = opendir(dirname);
1024 char buf[FIO_VERROR_SIZE];
1026 snprintf(buf, FIO_VERROR_SIZE - 1, "opendir(%s)", dirname);
1027 td_verror(td, errno, buf);
1031 while ((dir = readdir(D)) != NULL) {
1032 char full_path[PATH_MAX];
1035 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1038 sprintf(full_path, "%s/%s", dirname, dir->d_name);
1040 if (lstat(full_path, &sb) == -1) {
1041 if (errno != ENOENT) {
1042 td_verror(td, errno, "stat");
1047 if (S_ISREG(sb.st_mode)) {
1048 add_file(td, full_path);
1052 if (!S_ISDIR(sb.st_mode))
1055 ret = recurse_dir(td, full_path);
1064 int add_dir_files(struct thread_data *td, const char *path)
1066 int ret = recurse_dir(td, path);
1069 log_info("fio: opendir added %d files\n", td->o.nr_files);
1074 void dup_files(struct thread_data *td, struct thread_data *org)
1079 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1084 td->files = malloc(org->files_index * sizeof(f));
1086 for_each_file(org, f, i) {
1087 struct fio_file *__f;
1089 __f = smalloc(sizeof(*__f));
1091 log_err("fio: smalloc OOM\n");
1097 __f->file_name = smalloc_strdup(f->file_name);
1098 if (!__f->file_name) {
1099 log_err("fio: smalloc OOM\n");
1103 __f->filetype = f->filetype;
1111 * Returns the index that matches the filename, or -1 if not there
1113 int get_fileno(struct thread_data *td, const char *fname)
1118 for_each_file(td, f, i)
1119 if (!strcmp(f->file_name, fname))
1126 * For log usage, where we add/open/close files automatically
1128 void free_release_files(struct thread_data *td)
1131 td->files_index = 0;
1132 td->nr_normal_files = 0;