16 static int extend_file(struct thread_data *td, struct fio_file *f)
18 int r, new_layout = 0, unlink_file = 0, flags;
19 unsigned long long left;
24 log_err("fio: refusing extend of file due to read-only\n");
29 * check if we need to lay the file out complete again. fio
30 * does that for operations involving reads, or for writes
31 * where overwrite is set
33 if (td_read(td) || (td_write(td) && td->o.overwrite))
35 if (td_write(td) && !td->o.overwrite)
38 if (unlink_file || new_layout) {
39 if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
40 td_verror(td, errno, "unlink");
45 flags = O_WRONLY | O_CREAT;
49 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
50 f->fd = open(f->file_name, flags, 0644);
52 td_verror(td, errno, "open");
59 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
61 if (ftruncate(f->fd, f->real_file_size) == -1) {
62 td_verror(td, errno, "ftruncate");
66 dprint(FD_FILE, "fallocate file %s, size %llu\n", f->file_name,
68 if (posix_fallocate(f->fd, 0, f->real_file_size) < 0) {
69 td_verror(td, errno, "posix_fallocate");
73 b = malloc(td->o.max_bs[DDIR_WRITE]);
74 memset(b, 0, td->o.max_bs[DDIR_WRITE]);
76 left = f->real_file_size;
77 while (left && !td->terminate) {
78 bs = td->o.max_bs[DDIR_WRITE];
82 r = write(f->fd, b, bs);
89 td_verror(td, errno, "write");
91 td_verror(td, EIO, "write");
99 else if (td->o.create_fsync) {
100 if (fsync(f->fd) < 0) {
101 td_verror(td, errno, "fsync");
117 static unsigned long long get_rand_file_size(struct thread_data *td)
119 unsigned long long ret;
122 r = os_random_long(&td->file_size_state);
123 ret = td->o.file_size_low + (unsigned long long) ((double) (td->o.file_size_high - td->o.file_size_low) * (r / (RAND_MAX + 1.0)));
124 ret -= (ret % td->o.rw_min_bs);
128 static int file_size(struct thread_data *td, struct fio_file *f)
132 if (fstat(f->fd, &st) == -1) {
133 td_verror(td, errno, "fstat");
137 f->real_file_size = st.st_size;
141 static int bdev_size(struct thread_data *td, struct fio_file *f)
143 unsigned long long bytes;
146 r = blockdev_size(f->fd, &bytes);
148 td_verror(td, r, "blockdev_size");
152 f->real_file_size = bytes;
156 static int get_file_size(struct thread_data *td, struct fio_file *f)
160 if (f->flags & FIO_SIZE_KNOWN)
163 if (f->filetype == FIO_TYPE_FILE)
164 ret = file_size(td, f);
165 else if (f->filetype == FIO_TYPE_BD)
166 ret = bdev_size(td, f);
168 f->real_file_size = -1;
173 if (f->file_offset > f->real_file_size) {
174 log_err("%s: offset extends end (%Lu > %Lu)\n", td->o.name, f->file_offset, f->real_file_size);
178 f->flags |= FIO_SIZE_KNOWN;
182 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
186 dprint(FD_IO, "invalidate cache (%d)\n", td->o.odirect);
192 * FIXME: add blockdev flushing too
195 ret = madvise(f->mmap, f->io_size, MADV_DONTNEED);
196 else if (f->filetype == FIO_TYPE_FILE)
197 ret = fadvise(f->fd, f->file_offset, f->io_size, POSIX_FADV_DONTNEED);
198 else if (f->filetype == FIO_TYPE_BD) {
199 ret = blockdev_invalidate_cache(f->fd);
200 if (ret < 0 && errno == EACCES && geteuid()) {
202 log_err("fio: only root may flush block devices. Cache flush bypassed!\n");
207 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
211 td_verror(td, errno, "invalidate_cache");
218 int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
222 dprint(FD_FILE, "fd close %s\n", f->file_name);
226 if (close(f->fd) < 0)
233 static int file_lookup_open(struct fio_file *f, int flags)
235 struct fio_file *__f;
238 __f = lookup_file_hash(f->file_name);
240 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
242 * racy, need the __f->lock locked
245 f->lock_owner = __f->lock_owner;
246 f->lock_batch = __f->lock_batch;
247 f->lock_ddir = __f->lock_ddir;
248 f->fd = dup(__f->fd);
252 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
253 f->fd = open(f->file_name, flags, 0600);
260 int generic_open_file(struct thread_data *td, struct fio_file *f)
266 dprint(FD_FILE, "fd open %s\n", f->file_name);
268 if (!strcmp(f->file_name, "-")) {
270 log_err("fio: can't read/write to stdin/out\n");
276 * move output logging to stderr, if we are writing to stdout
283 flags |= OS_O_DIRECT;
286 if (f->filetype != FIO_TYPE_FILE)
295 if (f->filetype == FIO_TYPE_FILE)
299 f->fd = dup(STDOUT_FILENO);
301 from_hash = file_lookup_open(f, flags);
303 if (f->filetype == FIO_TYPE_CHAR && !read_only)
309 f->fd = dup(STDIN_FILENO);
311 from_hash = file_lookup_open(f, flags);
315 char buf[FIO_VERROR_SIZE];
318 if (errno == EPERM && (flags & O_NOATIME)) {
323 snprintf(buf, sizeof(buf) - 1, "open(%s)", f->file_name);
325 td_verror(td, __e, buf);
328 if (get_file_size(td, f))
331 if (!from_hash && f->fd != -1) {
332 if (add_file_hash(f)) {
336 * OK to ignore, we haven't done anything with it
338 ret = generic_close_file(td, f);
349 int open_files(struct thread_data *td)
355 dprint(FD_FILE, "open files\n");
357 for_each_file(td, f, i) {
358 err = td_io_open_file(td, f);
360 if (td->error == EMFILE) {
361 log_err("fio: limited open files to: %d\n", td->nr_open_files);
362 td->o.open_files = td->nr_open_files;
369 if (td->o.open_files == td->nr_open_files)
376 for_each_file(td, f, i)
377 td_io_close_file(td, f);
383 * open/close all files, so that ->real_file_size gets set
385 static int get_file_sizes(struct thread_data *td)
391 for_each_file(td, f, i) {
392 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i, f->file_name);
394 if (td->io_ops->open_file(td, f)) {
395 if (td->error != ENOENT) {
396 log_err("%s\n", td->verror);
401 if (td->io_ops->close_file)
402 td->io_ops->close_file(td, f);
405 if (f->real_file_size == -1ULL && td->o.size)
406 f->real_file_size = td->o.size / td->o.nr_files;
413 * Open the files and setup files sizes, creating files if necessary.
415 int setup_files(struct thread_data *td)
417 unsigned long long total_size, extend_size;
420 int err = 0, need_extend;
422 dprint(FD_FILE, "setup files\n");
425 * if ioengine defines a setup() method, it's responsible for
426 * opening the files and setting f->real_file_size to indicate
427 * the valid range for that file.
429 if (td->io_ops->setup)
430 err = td->io_ops->setup(td);
432 err = get_file_sizes(td);
438 * check sizes. if the files/devices do not exist and the size
439 * isn't passed to fio, abort.
442 for_each_file(td, f, i) {
443 if (f->real_file_size == -1ULL)
446 total_size += f->real_file_size;
450 * device/file sizes are zero and no size given, punt
452 if ((!total_size || total_size == -1ULL) && !td->o.size &&
453 !(td->io_ops->flags & FIO_NOIO) && !td->o.fill_device) {
454 log_err("%s: you need to specify size=\n", td->o.name);
455 td_verror(td, EINVAL, "total_file_size");
460 * now file sizes are known, so we can set ->io_size. if size= is
461 * not given, ->io_size is just equal to ->real_file_size. if size
462 * is given, ->io_size is size / nr_files.
464 extend_size = total_size = 0;
466 for_each_file(td, f, i) {
467 f->file_offset = td->o.start_offset;
469 if (!td->o.file_size_low) {
471 * no file size range given, file size is equal to
472 * total size divided by number of files. if that is
473 * zero, set it to the real file size.
475 f->io_size = td->o.size / td->o.nr_files;
477 f->io_size = f->real_file_size - f->file_offset;
478 } else if (f->real_file_size < td->o.file_size_low ||
479 f->real_file_size > td->o.file_size_high) {
480 if (f->file_offset > td->o.file_size_low)
483 * file size given. if it's fixed, use that. if it's a
484 * range, generate a random size in-between.
486 if (td->o.file_size_low == td->o.file_size_high)
487 f->io_size = td->o.file_size_low - f->file_offset;
489 f->io_size = get_rand_file_size(td) - f->file_offset;
491 f->io_size = f->real_file_size - f->file_offset;
493 if (f->io_size == -1ULL)
496 total_size += f->io_size;
498 if (f->filetype == FIO_TYPE_FILE &&
499 (f->io_size + f->file_offset) > f->real_file_size &&
500 !(td->io_ops->flags & FIO_DISKLESSIO)) {
502 extend_size += (f->io_size + f->file_offset);
503 f->flags |= FIO_FILE_EXTEND;
507 if (!td->o.size || td->o.size > total_size)
508 td->o.size = total_size;
511 * See if we need to extend some files
515 log_info("%s: Laying out IO file(s) (%u file(s) / %LuMiB)\n",
516 td->o.name, need_extend, extend_size >> 20);
518 for_each_file(td, f, i) {
519 if (!(f->flags & FIO_FILE_EXTEND))
522 assert(f->filetype == FIO_TYPE_FILE);
523 f->flags &= ~FIO_FILE_EXTEND;
524 f->real_file_size = (f->io_size + f->file_offset);
525 err = extend_file(td, f);
535 if (!td->o.zone_size)
536 td->o.zone_size = td->o.size;
539 * iolog already set the total io size, if we read back
542 if (!td->o.read_iolog_file)
543 td->total_io_size = td->o.size * td->o.loops;
546 log_err("%s: you need to specify valid offset=\n", td->o.name);
550 int init_random_map(struct thread_data *td)
552 unsigned long long blocks, num_maps;
556 if (td->o.norandommap || !td_random(td))
559 for_each_file(td, f, i) {
560 blocks = (f->real_file_size + td->o.rw_min_bs - 1) / (unsigned long long) td->o.rw_min_bs;
561 num_maps = (blocks + BLOCKS_PER_MAP-1)/ (unsigned long long) BLOCKS_PER_MAP;
562 f->file_map = smalloc(num_maps * sizeof(long));
564 log_err("fio: failed allocating random map. If running a large number of jobs, try the 'norandommap' option\n");
567 f->num_maps = num_maps;
573 void close_files(struct thread_data *td)
578 for_each_file(td, f, i)
579 td_io_close_file(td, f);
582 void close_and_free_files(struct thread_data *td)
587 dprint(FD_FILE, "close files\n");
589 for_each_file(td, f, i) {
590 if (td->o.unlink && f->filetype == FIO_TYPE_FILE)
591 unlink(f->file_name);
593 td_io_close_file(td, f);
605 td->o.filename = NULL;
612 static void get_file_type(struct fio_file *f)
616 if (!strcmp(f->file_name, "-"))
617 f->filetype = FIO_TYPE_PIPE;
619 f->filetype = FIO_TYPE_FILE;
621 if (!lstat(f->file_name, &sb)) {
622 if (S_ISBLK(sb.st_mode))
623 f->filetype = FIO_TYPE_BD;
624 else if (S_ISCHR(sb.st_mode))
625 f->filetype = FIO_TYPE_CHAR;
626 else if (S_ISFIFO(sb.st_mode))
627 f->filetype = FIO_TYPE_PIPE;
631 int add_file(struct thread_data *td, const char *fname)
633 int cur_files = td->files_index;
634 char file_name[PATH_MAX];
638 dprint(FD_FILE, "add file %s\n", fname);
640 f = smalloc(sizeof(*f));
643 dprint(FD_FILE, "resize file array to %d files\n", cur_files + 1);
645 td->files = realloc(td->files, (cur_files + 1) * sizeof(f));
646 td->files[cur_files] = f;
649 * init function, io engine may not be loaded yet
651 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
652 f->real_file_size = -1ULL;
655 len = sprintf(file_name, "%s/", td->o.directory);
657 sprintf(file_name + len, "%s", fname);
658 f->file_name = smalloc_strdup(file_name);
662 switch (td->o.file_lock_mode) {
665 case FILE_LOCK_READWRITE:
666 f->lock = fio_mutex_rw_init();
668 case FILE_LOCK_EXCLUSIVE:
669 f->lock = fio_mutex_init(1);
672 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
677 if (f->filetype == FIO_TYPE_FILE)
678 td->nr_normal_files++;
680 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name, cur_files);
685 void get_file(struct fio_file *f)
687 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
688 assert(f->flags & FIO_FILE_OPEN);
692 int put_file(struct thread_data *td, struct fio_file *f)
694 int f_ret = 0, ret = 0;
696 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
698 if (!(f->flags & FIO_FILE_OPEN))
701 assert(f->references);
705 if (should_fsync(td) && td->o.fsync_on_close)
706 f_ret = fsync(f->fd);
708 if (td->io_ops->close_file)
709 ret = td->io_ops->close_file(td, f);
715 f->flags &= ~FIO_FILE_OPEN;
719 void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
721 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
724 if (f->lock_owner == td && f->lock_batch--)
727 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
728 if (ddir == DDIR_READ)
729 fio_mutex_down_read(f->lock);
731 fio_mutex_down_write(f->lock);
732 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
733 fio_mutex_down(f->lock);
736 f->lock_batch = td->o.lockfile_batch;
740 void unlock_file(struct thread_data *td, struct fio_file *f)
742 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
747 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
748 const int is_read = f->lock_ddir == DDIR_READ;
749 int val = fio_mutex_getval(f->lock);
751 if ((is_read && val == 1) || (!is_read && val == -1))
752 f->lock_owner = NULL;
755 fio_mutex_up_read(f->lock);
757 fio_mutex_up_write(f->lock);
758 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) {
759 int val = fio_mutex_getval(f->lock);
762 f->lock_owner = NULL;
764 fio_mutex_up(f->lock);
768 void unlock_file_all(struct thread_data *td, struct fio_file *f)
770 if (f->lock_owner != td)
777 static int recurse_dir(struct thread_data *td, const char *dirname)
783 D = opendir(dirname);
785 char buf[FIO_VERROR_SIZE];
787 snprintf(buf, FIO_VERROR_SIZE - 1, "opendir(%s)", dirname);
788 td_verror(td, errno, buf);
792 while ((dir = readdir(D)) != NULL) {
793 char full_path[PATH_MAX];
796 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
799 sprintf(full_path, "%s/%s", dirname, dir->d_name);
801 if (lstat(full_path, &sb) == -1) {
802 if (errno != ENOENT) {
803 td_verror(td, errno, "stat");
808 if (S_ISREG(sb.st_mode)) {
809 add_file(td, full_path);
813 if (!S_ISDIR(sb.st_mode))
816 if ((ret = recurse_dir(td, full_path)) != 0)
824 int add_dir_files(struct thread_data *td, const char *path)
826 int ret = recurse_dir(td, path);
829 log_info("fio: opendir added %d files\n", td->o.nr_files);
834 void dup_files(struct thread_data *td, struct thread_data *org)
839 dprint(FD_FILE, "dup files: %d\n", org->files_index);
844 td->files = malloc(org->files_index * sizeof(f));
846 for_each_file(org, f, i) {
847 struct fio_file *__f;
849 __f = smalloc(sizeof(*__f));
852 __f->file_name = smalloc_strdup(f->file_name);
859 * Returns the index that matches the filename, or -1 if not there
861 int get_fileno(struct thread_data *td, const char *fname)
866 for_each_file(td, f, i)
867 if (!strcmp(f->file_name, fname))
874 * For log usage, where we add/open/close files automatically
876 void free_release_files(struct thread_data *td)
880 td->nr_normal_files = 0;