17 * Leaves f->fd open on success, caller must close
19 static int extend_file(struct thread_data *td, struct fio_file *f)
21 int r, new_layout = 0, unlink_file = 0, flags;
22 unsigned long long left;
27 log_err("fio: refusing extend of file due to read-only\n");
32 * check if we need to lay the file out complete again. fio
33 * does that for operations involving reads, or for writes
34 * where overwrite is set
36 if (td_read(td) || (td_write(td) && td->o.overwrite) ||
37 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
39 if (td_write(td) && !td->o.overwrite)
42 if (unlink_file || new_layout) {
43 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
44 if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
45 td_verror(td, errno, "unlink");
50 flags = O_WRONLY | O_CREAT;
54 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
55 f->fd = open(f->file_name, flags, 0644);
57 td_verror(td, errno, "open");
64 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
66 if (ftruncate(f->fd, f->real_file_size) == -1) {
67 td_verror(td, errno, "ftruncate");
71 #ifdef FIO_HAVE_FALLOCATE
72 dprint(FD_FILE, "fallocate file %s, size %llu\n", f->file_name,
74 r = posix_fallocate(f->fd, 0, f->real_file_size);
76 log_err("fio: posix_fallocate fails: %s\n", strerror(-r));
79 b = malloc(td->o.max_bs[DDIR_WRITE]);
80 memset(b, 0, td->o.max_bs[DDIR_WRITE]);
82 left = f->real_file_size;
83 while (left && !td->terminate) {
84 bs = td->o.max_bs[DDIR_WRITE];
88 r = write(f->fd, b, bs);
95 td_verror(td, errno, "write");
97 td_verror(td, EIO, "write");
104 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
105 unlink(f->file_name);
106 } else if (td->o.create_fsync) {
107 if (fsync(f->fd) < 0) {
108 td_verror(td, errno, "fsync");
122 static unsigned long long get_rand_file_size(struct thread_data *td)
124 unsigned long long ret, sized;
127 r = os_random_long(&td->file_size_state);
128 sized = td->o.file_size_high - td->o.file_size_low;
129 ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0)));
130 ret += td->o.file_size_low;
131 ret -= (ret % td->o.rw_min_bs);
135 static int file_size(struct thread_data *td, struct fio_file *f)
139 if (fstat(f->fd, &st) == -1) {
140 td_verror(td, errno, "fstat");
144 f->real_file_size = st.st_size;
148 static int bdev_size(struct thread_data *td, struct fio_file *f)
150 unsigned long long bytes;
153 r = blockdev_size(f->fd, &bytes);
155 td_verror(td, r, "blockdev_size");
159 f->real_file_size = bytes;
163 static int get_file_size(struct thread_data *td, struct fio_file *f)
167 if (f->flags & FIO_SIZE_KNOWN)
170 if (f->filetype == FIO_TYPE_FILE)
171 ret = file_size(td, f);
172 else if (f->filetype == FIO_TYPE_BD)
173 ret = bdev_size(td, f);
175 f->real_file_size = -1;
180 if (f->file_offset > f->real_file_size) {
181 log_err("%s: offset extends end (%Lu > %Lu)\n", td->o.name,
182 f->file_offset, f->real_file_size);
186 f->flags |= FIO_SIZE_KNOWN;
190 static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
191 unsigned long long off,
192 unsigned long long len)
199 off = f->file_offset;
201 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
205 * FIXME: add blockdev flushing too
208 ret = madvise(f->mmap, len, MADV_DONTNEED);
209 else if (f->filetype == FIO_TYPE_FILE) {
210 ret = fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
211 } else if (f->filetype == FIO_TYPE_BD) {
212 ret = blockdev_invalidate_cache(f->fd);
213 if (ret < 0 && errno == EACCES && geteuid()) {
215 log_err("fio: only root may flush block "
216 "devices. Cache flush bypassed!\n");
221 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
225 td_verror(td, errno, "invalidate_cache");
227 } else if (ret > 0) {
228 td_verror(td, ret, "invalidate_cache");
236 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
238 if (!(f->flags & FIO_FILE_OPEN))
241 return __file_invalidate_cache(td, f, -1, -1);
244 int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
248 dprint(FD_FILE, "fd close %s\n", f->file_name);
252 if (close(f->fd) < 0)
259 static int file_lookup_open(struct fio_file *f, int flags)
261 struct fio_file *__f;
264 __f = lookup_file_hash(f->file_name);
266 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
268 * racy, need the __f->lock locked
271 f->lock_owner = __f->lock_owner;
272 f->lock_batch = __f->lock_batch;
273 f->lock_ddir = __f->lock_ddir;
276 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
280 f->fd = open(f->file_name, flags, 0600);
284 int generic_open_file(struct thread_data *td, struct fio_file *f)
290 dprint(FD_FILE, "fd open %s\n", f->file_name);
292 if (!strcmp(f->file_name, "-")) {
294 log_err("fio: can't read/write to stdin/out\n");
300 * move output logging to stderr, if we are writing to stdout
307 flags |= OS_O_DIRECT;
310 if (f->filetype != FIO_TYPE_FILE)
311 flags |= FIO_O_NOATIME;
318 if (f->filetype == FIO_TYPE_FILE)
322 f->fd = dup(STDOUT_FILENO);
324 from_hash = file_lookup_open(f, flags);
326 if (f->filetype == FIO_TYPE_CHAR && !read_only)
332 f->fd = dup(STDIN_FILENO);
334 from_hash = file_lookup_open(f, flags);
338 char buf[FIO_VERROR_SIZE];
341 if (errno == EPERM && (flags & FIO_O_NOATIME)) {
342 flags &= ~FIO_O_NOATIME;
346 snprintf(buf, sizeof(buf) - 1, "open(%s)", f->file_name);
348 td_verror(td, __e, buf);
351 if (get_file_size(td, f))
354 if (!from_hash && f->fd != -1) {
355 if (add_file_hash(f)) {
359 * OK to ignore, we haven't done anything with it
361 ret = generic_close_file(td, f);
372 int open_files(struct thread_data *td)
378 dprint(FD_FILE, "open files\n");
380 for_each_file(td, f, i) {
381 err = td_io_open_file(td, f);
383 if (td->error == EMFILE) {
384 log_err("fio: limited open files to: %d\n",
386 td->o.open_files = td->nr_open_files;
393 if (td->o.open_files == td->nr_open_files)
400 for_each_file(td, f, i)
401 td_io_close_file(td, f);
407 * open/close all files, so that ->real_file_size gets set
409 static int get_file_sizes(struct thread_data *td)
415 for_each_file(td, f, i) {
416 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
419 if (td->io_ops->open_file(td, f)) {
420 if (td->error != ENOENT) {
421 log_err("%s\n", td->verror);
426 if (td->io_ops->close_file)
427 td->io_ops->close_file(td, f);
430 if (f->real_file_size == -1ULL && td->o.size)
431 f->real_file_size = td->o.size / td->o.nr_files;
438 * Open the files and setup files sizes, creating files if necessary.
440 int setup_files(struct thread_data *td)
442 unsigned long long total_size, extend_size;
445 int err = 0, need_extend;
447 dprint(FD_FILE, "setup files\n");
449 if (td->o.read_iolog_file)
453 * if ioengine defines a setup() method, it's responsible for
454 * opening the files and setting f->real_file_size to indicate
455 * the valid range for that file.
457 if (td->io_ops->setup)
458 err = td->io_ops->setup(td);
460 err = get_file_sizes(td);
466 * check sizes. if the files/devices do not exist and the size
467 * isn't passed to fio, abort.
470 for_each_file(td, f, i) {
471 if (f->real_file_size == -1ULL)
474 total_size += f->real_file_size;
478 * device/file sizes are zero and no size given, punt
480 if ((!total_size || total_size == -1ULL) && !td->o.size &&
481 !(td->io_ops->flags & FIO_NOIO) && !td->o.fill_device) {
482 log_err("%s: you need to specify size=\n", td->o.name);
483 td_verror(td, EINVAL, "total_file_size");
488 * now file sizes are known, so we can set ->io_size. if size= is
489 * not given, ->io_size is just equal to ->real_file_size. if size
490 * is given, ->io_size is size / nr_files.
492 extend_size = total_size = 0;
494 for_each_file(td, f, i) {
495 f->file_offset = td->o.start_offset;
497 if (!td->o.file_size_low) {
499 * no file size range given, file size is equal to
500 * total size divided by number of files. if that is
501 * zero, set it to the real file size.
503 f->io_size = td->o.size / td->o.nr_files;
505 f->io_size = f->real_file_size - f->file_offset;
506 } else if (f->real_file_size < td->o.file_size_low ||
507 f->real_file_size > td->o.file_size_high) {
508 if (f->file_offset > td->o.file_size_low)
511 * file size given. if it's fixed, use that. if it's a
512 * range, generate a random size in-between.
514 if (td->o.file_size_low == td->o.file_size_high) {
515 f->io_size = td->o.file_size_low
518 f->io_size = get_rand_file_size(td)
522 f->io_size = f->real_file_size - f->file_offset;
524 if (f->io_size == -1ULL)
527 total_size += f->io_size;
529 if (f->filetype == FIO_TYPE_FILE &&
530 (f->io_size + f->file_offset) > f->real_file_size &&
531 !(td->io_ops->flags & FIO_DISKLESSIO)) {
533 extend_size += (f->io_size + f->file_offset);
534 f->flags |= FIO_FILE_EXTEND;
538 if (!td->o.size || td->o.size > total_size)
539 td->o.size = total_size;
542 * See if we need to extend some files
547 log_info("%s: Laying out IO file(s) (%u file(s) /"
548 " %LuMiB)\n", td->o.name, need_extend,
551 for_each_file(td, f, i) {
552 unsigned long long old_len, extend_len;
554 if (!(f->flags & FIO_FILE_EXTEND))
557 assert(f->filetype == FIO_TYPE_FILE);
558 f->flags &= ~FIO_FILE_EXTEND;
559 old_len = f->real_file_size;
560 extend_len = f->io_size + f->file_offset - old_len;
561 f->real_file_size = (f->io_size + f->file_offset);
562 err = extend_file(td, f);
566 err = __file_invalidate_cache(td, f, old_len,
579 if (!td->o.zone_size)
580 td->o.zone_size = td->o.size;
583 * iolog already set the total io size, if we read back
586 if (!td->o.read_iolog_file)
587 td->total_io_size = td->o.size * td->o.loops;
590 log_err("%s: you need to specify valid offset=\n", td->o.name);
594 int init_random_map(struct thread_data *td)
596 unsigned long long blocks, num_maps;
600 if (td->o.norandommap || !td_random(td))
603 for_each_file(td, f, i) {
604 blocks = (f->real_file_size + td->o.rw_min_bs - 1) /
605 (unsigned long long) td->o.rw_min_bs;
606 num_maps = (blocks + BLOCKS_PER_MAP - 1) /
607 (unsigned long long) BLOCKS_PER_MAP;
608 f->file_map = smalloc(num_maps * sizeof(int));
610 f->num_maps = num_maps;
613 if (!td->o.softrandommap) {
614 log_err("fio: failed allocating random map. If running"
615 " a large number of jobs, try the 'norandommap'"
616 " option or set 'softrandommap'. Or give"
617 " a larger --alloc-size to fio.\n");
621 log_info("fio: file %s failed allocating random map. Running "
622 "job without.\n", f->file_name);
629 void close_files(struct thread_data *td)
634 for_each_file(td, f, i)
635 td_io_close_file(td, f);
638 void close_and_free_files(struct thread_data *td)
643 dprint(FD_FILE, "close files\n");
645 for_each_file(td, f, i) {
646 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
647 dprint(FD_FILE, "free unlink %s\n", f->file_name);
648 unlink(f->file_name);
651 td_io_close_file(td, f);
664 td->o.filename = NULL;
671 static void get_file_type(struct fio_file *f)
675 if (!strcmp(f->file_name, "-"))
676 f->filetype = FIO_TYPE_PIPE;
678 f->filetype = FIO_TYPE_FILE;
680 if (!lstat(f->file_name, &sb)) {
681 if (S_ISBLK(sb.st_mode))
682 f->filetype = FIO_TYPE_BD;
683 else if (S_ISCHR(sb.st_mode))
684 f->filetype = FIO_TYPE_CHAR;
685 else if (S_ISFIFO(sb.st_mode))
686 f->filetype = FIO_TYPE_PIPE;
690 int add_file(struct thread_data *td, const char *fname)
692 int cur_files = td->files_index;
693 char file_name[PATH_MAX];
697 dprint(FD_FILE, "add file %s\n", fname);
699 f = smalloc(sizeof(*f));
701 log_err("fio: smalloc OOM\n");
707 dprint(FD_FILE, "resize file array to %d files\n", cur_files + 1);
709 td->files = realloc(td->files, (cur_files + 1) * sizeof(f));
710 td->files[cur_files] = f;
713 * init function, io engine may not be loaded yet
715 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
716 f->real_file_size = -1ULL;
719 len = sprintf(file_name, "%s/", td->o.directory);
721 sprintf(file_name + len, "%s", fname);
722 f->file_name = smalloc_strdup(file_name);
724 log_err("fio: smalloc OOM\n");
730 switch (td->o.file_lock_mode) {
733 case FILE_LOCK_READWRITE:
734 f->lock = fio_mutex_rw_init();
736 case FILE_LOCK_EXCLUSIVE:
737 f->lock = fio_mutex_init(1);
740 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
745 if (f->filetype == FIO_TYPE_FILE)
746 td->nr_normal_files++;
748 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
754 void get_file(struct fio_file *f)
756 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
757 assert(f->flags & FIO_FILE_OPEN);
761 int put_file(struct thread_data *td, struct fio_file *f)
763 int f_ret = 0, ret = 0;
765 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
767 if (!(f->flags & FIO_FILE_OPEN))
770 assert(f->references);
774 if (should_fsync(td) && td->o.fsync_on_close)
775 f_ret = fsync(f->fd);
777 if (td->io_ops->close_file)
778 ret = td->io_ops->close_file(td, f);
784 f->flags &= ~FIO_FILE_OPEN;
788 void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
790 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
793 if (f->lock_owner == td && f->lock_batch--)
796 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
797 if (ddir == DDIR_READ)
798 fio_mutex_down_read(f->lock);
800 fio_mutex_down_write(f->lock);
801 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
802 fio_mutex_down(f->lock);
805 f->lock_batch = td->o.lockfile_batch;
809 void unlock_file(struct thread_data *td, struct fio_file *f)
811 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
816 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
817 const int is_read = f->lock_ddir == DDIR_READ;
818 int val = fio_mutex_getval(f->lock);
820 if ((is_read && val == 1) || (!is_read && val == -1))
821 f->lock_owner = NULL;
824 fio_mutex_up_read(f->lock);
826 fio_mutex_up_write(f->lock);
827 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) {
828 int val = fio_mutex_getval(f->lock);
831 f->lock_owner = NULL;
833 fio_mutex_up(f->lock);
837 void unlock_file_all(struct thread_data *td, struct fio_file *f)
839 if (f->lock_owner != td)
846 static int recurse_dir(struct thread_data *td, const char *dirname)
852 D = opendir(dirname);
854 char buf[FIO_VERROR_SIZE];
856 snprintf(buf, FIO_VERROR_SIZE - 1, "opendir(%s)", dirname);
857 td_verror(td, errno, buf);
861 while ((dir = readdir(D)) != NULL) {
862 char full_path[PATH_MAX];
865 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
868 sprintf(full_path, "%s/%s", dirname, dir->d_name);
870 if (lstat(full_path, &sb) == -1) {
871 if (errno != ENOENT) {
872 td_verror(td, errno, "stat");
877 if (S_ISREG(sb.st_mode)) {
878 add_file(td, full_path);
882 if (!S_ISDIR(sb.st_mode))
885 ret = recurse_dir(td, full_path);
894 int add_dir_files(struct thread_data *td, const char *path)
896 int ret = recurse_dir(td, path);
899 log_info("fio: opendir added %d files\n", td->o.nr_files);
904 void dup_files(struct thread_data *td, struct thread_data *org)
909 dprint(FD_FILE, "dup files: %d\n", org->files_index);
914 td->files = malloc(org->files_index * sizeof(f));
916 for_each_file(org, f, i) {
917 struct fio_file *__f;
919 __f = smalloc(sizeof(*__f));
921 log_err("fio: smalloc OOM\n");
926 __f->file_name = smalloc_strdup(f->file_name);
927 if (!__f->file_name) {
928 log_err("fio: smalloc OOM\n");
932 __f->filetype = f->filetype;
940 * Returns the index that matches the filename, or -1 if not there
942 int get_fileno(struct thread_data *td, const char *fname)
947 for_each_file(td, f, i)
948 if (!strcmp(f->file_name, fname))
955 * For log usage, where we add/open/close files automatically
957 void free_release_files(struct thread_data *td)
961 td->nr_normal_files = 0;