14 static int extend_file(struct thread_data *td, struct fio_file *f)
16 int r, new_layout = 0, unlink_file = 0, flags;
17 unsigned long long left;
22 log_err("fio: refusing extend of file due to read-only\n");
27 * check if we need to lay the file out complete again. fio
28 * does that for operations involving reads, or for writes
29 * where overwrite is set
31 if (td_read(td) || (td_write(td) && td->o.overwrite))
33 if (td_write(td) && !td->o.overwrite)
36 if (unlink_file || new_layout) {
37 if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
38 td_verror(td, errno, "unlink");
43 flags = O_WRONLY | O_CREAT;
47 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
48 f->fd = open(f->file_name, flags, 0644);
50 td_verror(td, errno, "open");
57 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
59 if (ftruncate(f->fd, f->real_file_size) == -1) {
60 td_verror(td, errno, "ftruncate");
64 dprint(FD_FILE, "fallocate file %s, size %llu\n", f->file_name,
66 if (posix_fallocate(f->fd, 0, f->real_file_size) < 0) {
67 td_verror(td, errno, "posix_fallocate");
71 b = malloc(td->o.max_bs[DDIR_WRITE]);
72 memset(b, 0, td->o.max_bs[DDIR_WRITE]);
74 left = f->real_file_size;
75 while (left && !td->terminate) {
76 bs = td->o.max_bs[DDIR_WRITE];
80 r = write(f->fd, b, bs);
87 td_verror(td, errno, "write");
89 td_verror(td, EIO, "write");
97 else if (td->o.create_fsync)
111 static unsigned long long get_rand_file_size(struct thread_data *td)
113 unsigned long long ret;
116 r = os_random_long(&td->file_size_state);
117 ret = td->o.file_size_low + (unsigned long long) ((double) (td->o.file_size_high - td->o.file_size_low) * (r / (RAND_MAX + 1.0)));
118 ret -= (ret % td->o.rw_min_bs);
122 static int file_size(struct thread_data *td, struct fio_file *f)
126 if (fstat(f->fd, &st) == -1) {
127 td_verror(td, errno, "fstat");
131 f->real_file_size = st.st_size;
135 static int bdev_size(struct thread_data *td, struct fio_file *f)
137 unsigned long long bytes;
140 r = blockdev_size(f->fd, &bytes);
142 td_verror(td, r, "blockdev_size");
146 f->real_file_size = bytes;
150 static int get_file_size(struct thread_data *td, struct fio_file *f)
154 if (f->flags & FIO_SIZE_KNOWN)
157 if (f->filetype == FIO_TYPE_FILE)
158 ret = file_size(td, f);
159 else if (f->filetype == FIO_TYPE_BD)
160 ret = bdev_size(td, f);
162 f->real_file_size = -1;
167 if (f->file_offset > f->real_file_size) {
168 log_err("%s: offset extends end (%Lu > %Lu)\n", td->o.name, f->file_offset, f->real_file_size);
172 f->flags |= FIO_SIZE_KNOWN;
176 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
180 dprint(FD_IO, "invalidate cache (%d)\n", td->o.odirect);
186 * FIXME: add blockdev flushing too
189 ret = madvise(f->mmap, f->io_size, MADV_DONTNEED);
190 else if (f->filetype == FIO_TYPE_FILE)
191 ret = fadvise(f->fd, f->file_offset, f->io_size, POSIX_FADV_DONTNEED);
192 else if (f->filetype == FIO_TYPE_BD) {
193 ret = blockdev_invalidate_cache(f->fd);
194 if (ret < 0 && errno == EACCES && geteuid()) {
196 log_err("fio: only root may flush block devices. Cache flush bypassed!\n");
201 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
205 td_verror(td, errno, "invalidate_cache");
212 int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
216 dprint(FD_FILE, "fd close %s\n", f->file_name);
217 if (close(f->fd) < 0)
224 int generic_open_file(struct thread_data *td, struct fio_file *f)
229 dprint(FD_FILE, "fd open %s\n", f->file_name);
231 if (!strcmp(f->file_name, "-")) {
233 log_err("fio: can't read/write to stdin/out\n");
239 * move output logging to stderr, if we are writing to stdout
246 flags |= OS_O_DIRECT;
249 if (f->filetype != FIO_TYPE_FILE)
258 if (f->filetype == FIO_TYPE_FILE)
262 f->fd = dup(STDOUT_FILENO);
264 f->fd = open(f->file_name, flags, 0600);
266 if (f->filetype == FIO_TYPE_CHAR && !read_only)
272 f->fd = dup(STDIN_FILENO);
274 f->fd = open(f->file_name, flags);
278 char buf[FIO_VERROR_SIZE];
281 if (errno == EPERM && (flags & O_NOATIME)) {
286 snprintf(buf, sizeof(buf) - 1, "open(%s)", f->file_name);
288 td_verror(td, __e, buf);
291 if (get_file_size(td, f))
300 int open_files(struct thread_data *td)
306 dprint(FD_FILE, "open files\n");
308 for_each_file(td, f, i) {
309 err = td_io_open_file(td, f);
311 if (td->error == EMFILE) {
312 log_err("fio: limited open files to: %d\n", td->nr_open_files);
313 td->o.open_files = td->nr_open_files;
320 if (td->o.open_files == td->nr_open_files)
327 for_each_file(td, f, i)
328 td_io_close_file(td, f);
334 * open/close all files, so that ->real_file_size gets set
336 static int get_file_sizes(struct thread_data *td)
342 for_each_file(td, f, i) {
343 if (td->io_ops->open_file(td, f)) {
344 if (td->error != ENOENT) {
345 log_err("%s\n", td->verror);
350 if (td->io_ops->close_file)
351 td->io_ops->close_file(td, f);
354 if (f->real_file_size == -1ULL && td->o.size)
355 f->real_file_size = td->o.size / td->o.nr_files;
362 * Open the files and setup files sizes, creating files if necessary.
364 int setup_files(struct thread_data *td)
366 unsigned long long total_size, extend_size;
369 int err = 0, need_extend;
371 dprint(FD_FILE, "setup files\n");
374 * if ioengine defines a setup() method, it's responsible for
375 * opening the files and setting f->real_file_size to indicate
376 * the valid range for that file.
378 if (td->io_ops->setup)
379 err = td->io_ops->setup(td);
381 err = get_file_sizes(td);
387 * check sizes. if the files/devices do not exist and the size
388 * isn't passed to fio, abort.
391 for_each_file(td, f, i) {
392 if (f->real_file_size == -1ULL)
395 total_size += f->real_file_size;
399 * device/file sizes are zero and no size given, punt
401 if ((!total_size || total_size == -1ULL) && !td->o.size &&
402 !(td->io_ops->flags & FIO_NOIO) && !td->o.fill_device) {
403 log_err("%s: you need to specify size=\n", td->o.name);
404 td_verror(td, EINVAL, "total_file_size");
409 * now file sizes are known, so we can set ->io_size. if size= is
410 * not given, ->io_size is just equal to ->real_file_size. if size
411 * is given, ->io_size is size / nr_files.
413 extend_size = total_size = 0;
415 for_each_file(td, f, i) {
416 f->file_offset = td->o.start_offset;
418 if (!td->o.file_size_low) {
420 * no file size range given, file size is equal to
421 * total size divided by number of files. if that is
422 * zero, set it to the real file size.
424 f->io_size = td->o.size / td->o.nr_files;
426 f->io_size = f->real_file_size - f->file_offset;
427 } else if (f->real_file_size < td->o.file_size_low ||
428 f->real_file_size > td->o.file_size_high) {
429 if (f->file_offset > td->o.file_size_low)
432 * file size given. if it's fixed, use that. if it's a
433 * range, generate a random size in-between.
435 if (td->o.file_size_low == td->o.file_size_high)
436 f->io_size = td->o.file_size_low - f->file_offset;
438 f->io_size = get_rand_file_size(td) - f->file_offset;
440 f->io_size = f->real_file_size - f->file_offset;
442 if (f->io_size == -1ULL)
445 total_size += f->io_size;
447 if (f->filetype == FIO_TYPE_FILE &&
448 (f->io_size + f->file_offset) > f->real_file_size &&
449 !(td->io_ops->flags & FIO_DISKLESSIO)) {
451 extend_size += (f->io_size + f->file_offset);
452 f->flags |= FIO_FILE_EXTEND;
456 if (!td->o.size || td->o.size > total_size)
457 td->o.size = total_size;
460 * See if we need to extend some files
464 log_info("%s: Laying out IO file(s) (%u file(s) / %LuMiB)\n",
465 td->o.name, need_extend, extend_size >> 20);
467 for_each_file(td, f, i) {
468 if (!(f->flags & FIO_FILE_EXTEND))
471 assert(f->filetype == FIO_TYPE_FILE);
472 f->flags &= ~FIO_FILE_EXTEND;
473 f->real_file_size = (f->io_size + f->file_offset);
474 err = extend_file(td, f);
484 if (!td->o.zone_size)
485 td->o.zone_size = td->o.size;
488 * iolog already set the total io size, if we read back
491 if (!td->o.read_iolog_file)
492 td->total_io_size = td->o.size * td->o.loops;
495 log_err("%s: you need to specify valid offset=\n", td->o.name);
499 int init_random_map(struct thread_data *td)
501 unsigned long long blocks, num_maps;
505 if (td->o.norandommap || !td_random(td))
508 for_each_file(td, f, i) {
509 blocks = (f->real_file_size + td->o.rw_min_bs - 1) / (unsigned long long) td->o.rw_min_bs;
510 num_maps = (blocks + BLOCKS_PER_MAP-1)/ (unsigned long long) BLOCKS_PER_MAP;
511 f->file_map = malloc(num_maps * sizeof(long));
513 log_err("fio: failed allocating random map. If running a large number of jobs, try the 'norandommap' option\n");
516 f->num_maps = num_maps;
517 memset(f->file_map, 0, num_maps * sizeof(long));
523 void close_files(struct thread_data *td)
528 for_each_file(td, f, i)
529 td_io_close_file(td, f);
532 void close_and_free_files(struct thread_data *td)
537 dprint(FD_FILE, "close files\n");
539 for_each_file(td, f, i) {
540 if (td->o.unlink && f->filetype == FIO_TYPE_FILE)
541 unlink(f->file_name);
543 td_io_close_file(td, f);
554 td->o.filename = NULL;
560 static void get_file_type(struct fio_file *f)
564 if (!strcmp(f->file_name, "-"))
565 f->filetype = FIO_TYPE_PIPE;
567 f->filetype = FIO_TYPE_FILE;
569 if (!lstat(f->file_name, &sb)) {
570 if (S_ISBLK(sb.st_mode))
571 f->filetype = FIO_TYPE_BD;
572 else if (S_ISCHR(sb.st_mode))
573 f->filetype = FIO_TYPE_CHAR;
574 else if (S_ISFIFO(sb.st_mode))
575 f->filetype = FIO_TYPE_PIPE;
579 int add_file(struct thread_data *td, const char *fname)
581 int cur_files = td->files_index;
582 char file_name[PATH_MAX];
586 dprint(FD_FILE, "add file %s\n", fname);
588 f = malloc(sizeof(*f));
589 memset(f, 0, sizeof(*f));
592 td->files = realloc(td->files, (cur_files + 1) * sizeof(f));
594 td->files[cur_files] = f;
597 * init function, io engine may not be loaded yet
599 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
600 f->real_file_size = -1ULL;
603 len = sprintf(file_name, "%s/", td->o.directory);
605 sprintf(file_name + len, "%s", fname);
606 f->file_name = strdup(file_name);
611 if (f->filetype == FIO_TYPE_FILE)
612 td->nr_normal_files++;
617 void get_file(struct fio_file *f)
619 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
620 assert(f->flags & FIO_FILE_OPEN);
624 int put_file(struct thread_data *td, struct fio_file *f)
628 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
630 if (!(f->flags & FIO_FILE_OPEN))
633 assert(f->references);
637 if (should_fsync(td) && td->o.fsync_on_close)
640 if (td->io_ops->close_file)
641 ret = td->io_ops->close_file(td, f);
644 f->flags &= ~FIO_FILE_OPEN;
648 void lock_file(struct thread_data *td, struct fio_file *f)
652 void unlock_file(struct fio_file *f)
656 static int recurse_dir(struct thread_data *td, const char *dirname)
662 D = opendir(dirname);
664 char buf[FIO_VERROR_SIZE];
666 snprintf(buf, FIO_VERROR_SIZE - 1, "opendir(%s)", dirname);
667 td_verror(td, errno, buf);
671 while ((dir = readdir(D)) != NULL) {
672 char full_path[PATH_MAX];
675 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
678 sprintf(full_path, "%s/%s", dirname, dir->d_name);
680 if (lstat(full_path, &sb) == -1) {
681 if (errno != ENOENT) {
682 td_verror(td, errno, "stat");
687 if (S_ISREG(sb.st_mode)) {
688 add_file(td, full_path);
692 if (!S_ISDIR(sb.st_mode))
695 if ((ret = recurse_dir(td, full_path)) != 0)
703 int add_dir_files(struct thread_data *td, const char *path)
705 int ret = recurse_dir(td, path);
708 log_info("fio: opendir added %d files\n", td->o.nr_files);
713 void dup_files(struct thread_data *td, struct thread_data *org)
722 bytes = org->files_index * sizeof(f);
723 td->files = malloc(bytes);
724 memcpy(td->files, org->files, bytes);
726 for_each_file(td, f, i) {
727 struct fio_file *__f;
729 __f = malloc(sizeof(*__f));
730 memset(f, 0, sizeof(*__f));
733 __f->file_name = strdup(f->file_name);
740 * Returns the index that matches the filename, or -1 if not there
742 int get_fileno(struct thread_data *td, const char *fname)
747 for_each_file(td, f, i)
748 if (!strcmp(f->file_name, fname))
755 * For log usage, where we add/open/close files automatically
757 void free_release_files(struct thread_data *td)
761 td->nr_normal_files = 0;