Merge branch 'sphinx-doc' of https://github.com/termim/fio into sphinx
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27static inline void clear_error(struct thread_data *td)
28{
29 td->error = 0;
30 td->verror[0] = '\0';
31}
32
33/*
34 * Leaves f->fd open on success, caller must close
35 */
36static int extend_file(struct thread_data *td, struct fio_file *f)
37{
38 int r, new_layout = 0, unlink_file = 0, flags;
39 unsigned long long left;
40 unsigned int bs;
41 char *b = NULL;
42
43 if (read_only) {
44 log_err("fio: refusing extend of file due to read-only\n");
45 return 0;
46 }
47
48 /*
49 * check if we need to lay the file out complete again. fio
50 * does that for operations involving reads, or for writes
51 * where overwrite is set
52 */
53 if (td_read(td) ||
54 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55 (td_write(td) && td_ioengine_flagged(td, FIO_NOEXTEND)))
56 new_layout = 1;
57 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
58 unlink_file = 1;
59
60 if (unlink_file || new_layout) {
61 int ret;
62
63 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
64
65 ret = td_io_unlink_file(td, f);
66 if (ret != 0 && ret != ENOENT) {
67 td_verror(td, errno, "unlink");
68 return 1;
69 }
70 }
71
72 flags = O_WRONLY;
73 if (td->o.allow_create)
74 flags |= O_CREAT;
75 if (new_layout)
76 flags |= O_TRUNC;
77
78#ifdef WIN32
79 flags |= _O_BINARY;
80#endif
81
82 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
83 f->fd = open(f->file_name, flags, 0644);
84 if (f->fd < 0) {
85 int err = errno;
86
87 if (err == ENOENT && !td->o.allow_create)
88 log_err("fio: file creation disallowed by "
89 "allow_file_create=0\n");
90 else
91 td_verror(td, err, "open");
92 return 1;
93 }
94
95#ifdef CONFIG_POSIX_FALLOCATE
96 if (!td->o.fill_device) {
97 switch (td->o.fallocate_mode) {
98 case FIO_FALLOCATE_NONE:
99 break;
100 case FIO_FALLOCATE_POSIX:
101 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
102 f->file_name,
103 (unsigned long long) f->real_file_size);
104
105 r = posix_fallocate(f->fd, 0, f->real_file_size);
106 if (r > 0) {
107 log_err("fio: posix_fallocate fails: %s\n",
108 strerror(r));
109 }
110 break;
111#ifdef CONFIG_LINUX_FALLOCATE
112 case FIO_FALLOCATE_KEEP_SIZE:
113 dprint(FD_FILE,
114 "fallocate(FALLOC_FL_KEEP_SIZE) "
115 "file %s size %llu\n", f->file_name,
116 (unsigned long long) f->real_file_size);
117
118 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
119 f->real_file_size);
120 if (r != 0)
121 td_verror(td, errno, "fallocate");
122
123 break;
124#endif /* CONFIG_LINUX_FALLOCATE */
125 default:
126 log_err("fio: unknown fallocate mode: %d\n",
127 td->o.fallocate_mode);
128 assert(0);
129 }
130 }
131#endif /* CONFIG_POSIX_FALLOCATE */
132
133 if (!new_layout)
134 goto done;
135
136 /*
137 * The size will be -1ULL when fill_device is used, so don't truncate
138 * or fallocate this file, just write it
139 */
140 if (!td->o.fill_device) {
141 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
142 (unsigned long long) f->real_file_size);
143 if (ftruncate(f->fd, f->real_file_size) == -1) {
144 if (errno != EFBIG) {
145 td_verror(td, errno, "ftruncate");
146 goto err;
147 }
148 }
149 }
150
151 b = malloc(td->o.max_bs[DDIR_WRITE]);
152
153 left = f->real_file_size;
154 while (left && !td->terminate) {
155 bs = td->o.max_bs[DDIR_WRITE];
156 if (bs > left)
157 bs = left;
158
159 fill_io_buffer(td, b, bs, bs);
160
161 r = write(f->fd, b, bs);
162
163 if (r > 0) {
164 left -= r;
165 continue;
166 } else {
167 if (r < 0) {
168 int __e = errno;
169
170 if (__e == ENOSPC) {
171 if (td->o.fill_device)
172 break;
173 log_info("fio: ENOSPC on laying out "
174 "file, stopping\n");
175 break;
176 }
177 td_verror(td, errno, "write");
178 } else
179 td_verror(td, EIO, "write");
180
181 break;
182 }
183 }
184
185 if (td->terminate) {
186 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
187 td_io_unlink_file(td, f);
188 } else if (td->o.create_fsync) {
189 if (fsync(f->fd) < 0) {
190 td_verror(td, errno, "fsync");
191 goto err;
192 }
193 }
194 if (td->o.fill_device && !td_write(td)) {
195 fio_file_clear_size_known(f);
196 if (td_io_get_file_size(td, f))
197 goto err;
198 if (f->io_size > f->real_file_size)
199 f->io_size = f->real_file_size;
200 }
201
202 free(b);
203done:
204 return 0;
205err:
206 close(f->fd);
207 f->fd = -1;
208 if (b)
209 free(b);
210 return 1;
211}
212
213static int pre_read_file(struct thread_data *td, struct fio_file *f)
214{
215 int ret = 0, r, did_open = 0, old_runstate;
216 unsigned long long left;
217 unsigned int bs;
218 char *b;
219
220 if (td_ioengine_flagged(td, FIO_PIPEIO))
221 return 0;
222
223 if (!fio_file_open(f)) {
224 if (td->io_ops->open_file(td, f)) {
225 log_err("fio: cannot pre-read, failed to open file\n");
226 return 1;
227 }
228 did_open = 1;
229 }
230
231 old_runstate = td_bump_runstate(td, TD_PRE_READING);
232
233 bs = td->o.max_bs[DDIR_READ];
234 b = malloc(bs);
235 memset(b, 0, bs);
236
237 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
238 td_verror(td, errno, "lseek");
239 log_err("fio: failed to lseek pre-read file\n");
240 ret = 1;
241 goto error;
242 }
243
244 left = f->io_size;
245
246 while (left && !td->terminate) {
247 if (bs > left)
248 bs = left;
249
250 r = read(f->fd, b, bs);
251
252 if (r == (int) bs) {
253 left -= bs;
254 continue;
255 } else {
256 td_verror(td, EIO, "pre_read");
257 break;
258 }
259 }
260
261error:
262 td_restore_runstate(td, old_runstate);
263
264 if (did_open)
265 td->io_ops->close_file(td, f);
266
267 free(b);
268 return ret;
269}
270
271unsigned long long get_rand_file_size(struct thread_data *td)
272{
273 unsigned long long ret, sized;
274 uint64_t frand_max;
275 unsigned long r;
276
277 frand_max = rand_max(&td->file_size_state);
278 r = __rand(&td->file_size_state);
279 sized = td->o.file_size_high - td->o.file_size_low;
280 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
281 ret += td->o.file_size_low;
282 ret -= (ret % td->o.rw_min_bs);
283 return ret;
284}
285
286static int file_size(struct thread_data *td, struct fio_file *f)
287{
288 struct stat st;
289
290 if (stat(f->file_name, &st) == -1) {
291 td_verror(td, errno, "fstat");
292 return 1;
293 }
294
295 f->real_file_size = st.st_size;
296 return 0;
297}
298
299static int bdev_size(struct thread_data *td, struct fio_file *f)
300{
301 unsigned long long bytes = 0;
302 int r;
303
304 if (td->io_ops->open_file(td, f)) {
305 log_err("fio: failed opening blockdev %s for size check\n",
306 f->file_name);
307 return 1;
308 }
309
310 r = blockdev_size(f, &bytes);
311 if (r) {
312 td_verror(td, r, "blockdev_size");
313 goto err;
314 }
315
316 if (!bytes) {
317 log_err("%s: zero sized block device?\n", f->file_name);
318 goto err;
319 }
320
321 f->real_file_size = bytes;
322 td->io_ops->close_file(td, f);
323 return 0;
324err:
325 td->io_ops->close_file(td, f);
326 return 1;
327}
328
329static int char_size(struct thread_data *td, struct fio_file *f)
330{
331#ifdef FIO_HAVE_CHARDEV_SIZE
332 unsigned long long bytes = 0;
333 int r;
334
335 if (td->io_ops->open_file(td, f)) {
336 log_err("fio: failed opening chardev %s for size check\n",
337 f->file_name);
338 return 1;
339 }
340
341 r = chardev_size(f, &bytes);
342 if (r) {
343 td_verror(td, r, "chardev_size");
344 goto err;
345 }
346
347 if (!bytes) {
348 log_err("%s: zero sized char device?\n", f->file_name);
349 goto err;
350 }
351
352 f->real_file_size = bytes;
353 td->io_ops->close_file(td, f);
354 return 0;
355err:
356 td->io_ops->close_file(td, f);
357 return 1;
358#else
359 f->real_file_size = -1ULL;
360 return 0;
361#endif
362}
363
364static int get_file_size(struct thread_data *td, struct fio_file *f)
365{
366 int ret = 0;
367
368 if (fio_file_size_known(f))
369 return 0;
370
371 if (f->filetype == FIO_TYPE_FILE)
372 ret = file_size(td, f);
373 else if (f->filetype == FIO_TYPE_BD)
374 ret = bdev_size(td, f);
375 else if (f->filetype == FIO_TYPE_CHAR)
376 ret = char_size(td, f);
377 else
378 f->real_file_size = -1;
379
380 if (ret)
381 return ret;
382
383 if (f->file_offset > f->real_file_size) {
384 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
385 (unsigned long long) f->file_offset,
386 (unsigned long long) f->real_file_size);
387 return 1;
388 }
389
390 fio_file_set_size_known(f);
391 return 0;
392}
393
394static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
395 unsigned long long off,
396 unsigned long long len)
397{
398 int errval = 0, ret = 0;
399
400#ifdef CONFIG_ESX
401 return 0;
402#endif
403
404 if (len == -1ULL)
405 len = f->io_size;
406 if (off == -1ULL)
407 off = f->file_offset;
408
409 if (len == -1ULL || off == -1ULL)
410 return 0;
411
412 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
413 len);
414
415 if (td->io_ops->invalidate) {
416 ret = td->io_ops->invalidate(td, f);
417 if (ret < 0)
418 errval = ret;
419 } else if (f->filetype == FIO_TYPE_FILE) {
420 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
421 if (ret)
422 errval = ret;
423 } else if (f->filetype == FIO_TYPE_BD) {
424 int retry_count = 0;
425
426 ret = blockdev_invalidate_cache(f);
427 while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
428 /*
429 * Linux multipath devices reject ioctl while
430 * the maps are being updated. That window can
431 * last tens of milliseconds; we'll try up to
432 * a quarter of a second.
433 */
434 usleep(10000);
435 ret = blockdev_invalidate_cache(f);
436 }
437 if (ret < 0 && errno == EACCES && geteuid()) {
438 if (!root_warn) {
439 log_err("fio: only root may flush block "
440 "devices. Cache flush bypassed!\n");
441 root_warn = 1;
442 }
443 ret = 0;
444 }
445 if (ret < 0)
446 errval = errno;
447 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
448 ret = 0;
449
450 /*
451 * Cache flushing isn't a fatal condition, and we know it will
452 * happen on some platforms where we don't have the proper
453 * function to flush eg block device caches. So just warn and
454 * continue on our way.
455 */
456 if (errval)
457 log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errval));
458
459 return 0;
460
461}
462
463int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
464{
465 if (!fio_file_open(f))
466 return 0;
467
468 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
469}
470
471int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
472{
473 int ret = 0;
474
475 dprint(FD_FILE, "fd close %s\n", f->file_name);
476
477 remove_file_hash(f);
478
479 if (close(f->fd) < 0)
480 ret = errno;
481
482 f->fd = -1;
483
484 if (f->shadow_fd != -1) {
485 close(f->shadow_fd);
486 f->shadow_fd = -1;
487 }
488
489 f->engine_data = 0;
490 return ret;
491}
492
493int file_lookup_open(struct fio_file *f, int flags)
494{
495 struct fio_file *__f;
496 int from_hash;
497
498 __f = lookup_file_hash(f->file_name);
499 if (__f) {
500 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
501 /*
502 * racy, need the __f->lock locked
503 */
504 f->lock = __f->lock;
505 from_hash = 1;
506 } else {
507 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
508 from_hash = 0;
509 }
510
511#ifdef WIN32
512 flags |= _O_BINARY;
513#endif
514
515 f->fd = open(f->file_name, flags, 0600);
516 return from_hash;
517}
518
519static int file_close_shadow_fds(struct thread_data *td)
520{
521 struct fio_file *f;
522 int num_closed = 0;
523 unsigned int i;
524
525 for_each_file(td, f, i) {
526 if (f->shadow_fd == -1)
527 continue;
528
529 close(f->shadow_fd);
530 f->shadow_fd = -1;
531 num_closed++;
532 }
533
534 return num_closed;
535}
536
537int generic_open_file(struct thread_data *td, struct fio_file *f)
538{
539 int is_std = 0;
540 int flags = 0;
541 int from_hash = 0;
542
543 dprint(FD_FILE, "fd open %s\n", f->file_name);
544
545 if (!strcmp(f->file_name, "-")) {
546 if (td_rw(td)) {
547 log_err("fio: can't read/write to stdin/out\n");
548 return 1;
549 }
550 is_std = 1;
551
552 /*
553 * move output logging to stderr, if we are writing to stdout
554 */
555 if (td_write(td))
556 f_out = stderr;
557 }
558
559 if (td_trim(td))
560 goto skip_flags;
561 if (td->o.odirect)
562 flags |= OS_O_DIRECT;
563 if (td->o.oatomic) {
564 if (!FIO_O_ATOMIC) {
565 td_verror(td, EINVAL, "OS does not support atomic IO");
566 return 1;
567 }
568 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
569 }
570 if (td->o.sync_io)
571 flags |= O_SYNC;
572 if (td->o.create_on_open && td->o.allow_create)
573 flags |= O_CREAT;
574skip_flags:
575 if (f->filetype != FIO_TYPE_FILE)
576 flags |= FIO_O_NOATIME;
577
578open_again:
579 if (td_write(td)) {
580 if (!read_only)
581 flags |= O_RDWR;
582
583 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
584 flags |= O_CREAT;
585
586 if (is_std)
587 f->fd = dup(STDOUT_FILENO);
588 else
589 from_hash = file_lookup_open(f, flags);
590 } else if (td_read(td)) {
591 if (f->filetype == FIO_TYPE_CHAR && !read_only)
592 flags |= O_RDWR;
593 else
594 flags |= O_RDONLY;
595
596 if (is_std)
597 f->fd = dup(STDIN_FILENO);
598 else
599 from_hash = file_lookup_open(f, flags);
600 } else { //td trim
601 flags |= O_RDWR;
602 from_hash = file_lookup_open(f, flags);
603 }
604
605 if (f->fd == -1) {
606 char buf[FIO_VERROR_SIZE];
607 int __e = errno;
608
609 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
610 flags &= ~FIO_O_NOATIME;
611 goto open_again;
612 }
613 if (__e == EMFILE && file_close_shadow_fds(td))
614 goto open_again;
615
616 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
617
618 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
619 log_err("fio: looks like your file system does not " \
620 "support direct=1/buffered=0\n");
621 }
622
623 td_verror(td, __e, buf);
624 return 1;
625 }
626
627 if (!from_hash && f->fd != -1) {
628 if (add_file_hash(f)) {
629 int fio_unused ret;
630
631 /*
632 * Stash away descriptor for later close. This is to
633 * work-around a "feature" on Linux, where a close of
634 * an fd that has been opened for write will trigger
635 * udev to call blkid to check partitions, fs id, etc.
636 * That pollutes the device cache, which can slow down
637 * unbuffered accesses.
638 */
639 if (f->shadow_fd == -1)
640 f->shadow_fd = f->fd;
641 else {
642 /*
643 * OK to ignore, we haven't done anything
644 * with it
645 */
646 ret = generic_close_file(td, f);
647 }
648 goto open_again;
649 }
650 }
651
652 return 0;
653}
654
655int generic_get_file_size(struct thread_data *td, struct fio_file *f)
656{
657 return get_file_size(td, f);
658}
659
660/*
661 * open/close all files, so that ->real_file_size gets set
662 */
663static int get_file_sizes(struct thread_data *td)
664{
665 struct fio_file *f;
666 unsigned int i;
667 int err = 0;
668
669 for_each_file(td, f, i) {
670 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
671 f->file_name);
672
673 if (td_io_get_file_size(td, f)) {
674 if (td->error != ENOENT) {
675 log_err("%s\n", td->verror);
676 err = 1;
677 break;
678 }
679 clear_error(td);
680 }
681
682 if (f->real_file_size == -1ULL && td->o.size)
683 f->real_file_size = td->o.size / td->o.nr_files;
684 }
685
686 return err;
687}
688
689struct fio_mount {
690 struct flist_head list;
691 const char *base;
692 char __base[256];
693 unsigned int key;
694};
695
696/*
697 * Get free number of bytes for each file on each unique mount.
698 */
699static unsigned long long get_fs_free_counts(struct thread_data *td)
700{
701 struct flist_head *n, *tmp;
702 unsigned long long ret = 0;
703 struct fio_mount *fm;
704 FLIST_HEAD(list);
705 struct fio_file *f;
706 unsigned int i;
707
708 for_each_file(td, f, i) {
709 struct stat sb;
710 char buf[256];
711
712 if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
713 if (f->real_file_size != -1ULL)
714 ret += f->real_file_size;
715 continue;
716 } else if (f->filetype != FIO_TYPE_FILE)
717 continue;
718
719 buf[255] = '\0';
720 strncpy(buf, f->file_name, 255);
721
722 if (stat(buf, &sb) < 0) {
723 if (errno != ENOENT)
724 break;
725 strcpy(buf, ".");
726 if (stat(buf, &sb) < 0)
727 break;
728 }
729
730 fm = NULL;
731 flist_for_each(n, &list) {
732 fm = flist_entry(n, struct fio_mount, list);
733 if (fm->key == sb.st_dev)
734 break;
735
736 fm = NULL;
737 }
738
739 if (fm)
740 continue;
741
742 fm = calloc(1, sizeof(*fm));
743 strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
744 fm->base = basename(fm->__base);
745 fm->key = sb.st_dev;
746 flist_add(&fm->list, &list);
747 }
748
749 flist_for_each_safe(n, tmp, &list) {
750 unsigned long long sz;
751
752 fm = flist_entry(n, struct fio_mount, list);
753 flist_del(&fm->list);
754
755 sz = get_fs_free_size(fm->base);
756 if (sz && sz != -1ULL)
757 ret += sz;
758
759 free(fm);
760 }
761
762 return ret;
763}
764
765uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
766{
767 struct thread_options *o = &td->o;
768
769 if (o->file_append && f->filetype == FIO_TYPE_FILE)
770 return f->real_file_size;
771
772 return td->o.start_offset +
773 td->subjob_number * td->o.offset_increment;
774}
775
776/*
777 * Open the files and setup files sizes, creating files if necessary.
778 */
779int setup_files(struct thread_data *td)
780{
781 unsigned long long total_size, extend_size;
782 struct thread_options *o = &td->o;
783 struct fio_file *f;
784 unsigned int i, nr_fs_extra = 0;
785 int err = 0, need_extend;
786 int old_state;
787 const unsigned int bs = td_min_bs(td);
788 uint64_t fs = 0;
789
790 dprint(FD_FILE, "setup files\n");
791
792 old_state = td_bump_runstate(td, TD_SETTING_UP);
793
794 if (o->read_iolog_file)
795 goto done;
796
797 /*
798 * if ioengine defines a setup() method, it's responsible for
799 * opening the files and setting f->real_file_size to indicate
800 * the valid range for that file.
801 */
802 if (td->io_ops->setup)
803 err = td->io_ops->setup(td);
804 else
805 err = get_file_sizes(td);
806
807 if (err)
808 goto err_out;
809
810 /*
811 * check sizes. if the files/devices do not exist and the size
812 * isn't passed to fio, abort.
813 */
814 total_size = 0;
815 for_each_file(td, f, i) {
816 f->fileno = i;
817 if (f->real_file_size == -1ULL)
818 total_size = -1ULL;
819 else
820 total_size += f->real_file_size;
821 }
822
823 if (o->fill_device)
824 td->fill_device_size = get_fs_free_counts(td);
825
826 /*
827 * device/file sizes are zero and no size given, punt
828 */
829 if ((!total_size || total_size == -1ULL) && !o->size &&
830 !td_ioengine_flagged(td, FIO_NOIO) && !o->fill_device &&
831 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
832 log_err("%s: you need to specify size=\n", o->name);
833 td_verror(td, EINVAL, "total_file_size");
834 goto err_out;
835 }
836
837 /*
838 * Calculate per-file size and potential extra size for the
839 * first files, if needed.
840 */
841 if (!o->file_size_low && o->nr_files) {
842 uint64_t all_fs;
843
844 fs = o->size / o->nr_files;
845 all_fs = fs * o->nr_files;
846
847 if (all_fs < o->size)
848 nr_fs_extra = (o->size - all_fs) / bs;
849 }
850
851 /*
852 * now file sizes are known, so we can set ->io_size. if size= is
853 * not given, ->io_size is just equal to ->real_file_size. if size
854 * is given, ->io_size is size / nr_files.
855 */
856 extend_size = total_size = 0;
857 need_extend = 0;
858 for_each_file(td, f, i) {
859 f->file_offset = get_start_offset(td, f);
860
861 if (!o->file_size_low) {
862 /*
863 * no file size range given, file size is equal to
864 * total size divided by number of files. If that is
865 * zero, set it to the real file size. If the size
866 * doesn't divide nicely with the min blocksize,
867 * make the first files bigger.
868 */
869 f->io_size = fs;
870 if (nr_fs_extra) {
871 nr_fs_extra--;
872 f->io_size += bs;
873 }
874
875 if (!f->io_size)
876 f->io_size = f->real_file_size - f->file_offset;
877 } else if (f->real_file_size < o->file_size_low ||
878 f->real_file_size > o->file_size_high) {
879 if (f->file_offset > o->file_size_low)
880 goto err_offset;
881 /*
882 * file size given. if it's fixed, use that. if it's a
883 * range, generate a random size in-between.
884 */
885 if (o->file_size_low == o->file_size_high)
886 f->io_size = o->file_size_low - f->file_offset;
887 else {
888 f->io_size = get_rand_file_size(td)
889 - f->file_offset;
890 }
891 } else
892 f->io_size = f->real_file_size - f->file_offset;
893
894 if (f->io_size == -1ULL)
895 total_size = -1ULL;
896 else {
897 if (o->size_percent) {
898 f->io_size = (f->io_size * o->size_percent) / 100;
899 f->io_size -= (f->io_size % td_min_bs(td));
900 }
901 total_size += f->io_size;
902 }
903
904 if (f->filetype == FIO_TYPE_FILE &&
905 (f->io_size + f->file_offset) > f->real_file_size &&
906 !td_ioengine_flagged(td, FIO_DISKLESSIO)) {
907 if (!o->create_on_open) {
908 need_extend++;
909 extend_size += (f->io_size + f->file_offset);
910 } else
911 f->real_file_size = f->io_size + f->file_offset;
912 fio_file_set_extend(f);
913 }
914 }
915
916 if (td->o.block_error_hist) {
917 int len;
918
919 assert(td->o.nr_files == 1); /* checked in fixup_options */
920 f = td->files[0];
921 len = f->io_size / td->o.bs[DDIR_TRIM];
922 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
923 log_err("fio: cannot calculate block histogram with "
924 "%d trim blocks, maximum %d\n",
925 len, MAX_NR_BLOCK_INFOS);
926 td_verror(td, EINVAL, "block_error_hist");
927 goto err_out;
928 }
929
930 td->ts.nr_block_infos = len;
931 for (i = 0; i < len; i++)
932 td->ts.block_infos[i] =
933 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
934 } else
935 td->ts.nr_block_infos = 0;
936
937 if (!o->size || (total_size && o->size > total_size))
938 o->size = total_size;
939
940 if (o->size < td_min_bs(td)) {
941 log_err("fio: blocksize too large for data set\n");
942 goto err_out;
943 }
944
945 /*
946 * See if we need to extend some files
947 */
948 if (need_extend) {
949 temp_stall_ts = 1;
950 if (output_format & FIO_OUTPUT_NORMAL)
951 log_info("%s: Laying out IO file(s) (%u file(s) / %lluMiB)\n",
952 o->name, need_extend, extend_size >> 20);
953
954 for_each_file(td, f, i) {
955 unsigned long long old_len = -1ULL, extend_len = -1ULL;
956
957 if (!fio_file_extend(f))
958 continue;
959
960 assert(f->filetype == FIO_TYPE_FILE);
961 fio_file_clear_extend(f);
962 if (!o->fill_device) {
963 old_len = f->real_file_size;
964 extend_len = f->io_size + f->file_offset -
965 old_len;
966 }
967 f->real_file_size = (f->io_size + f->file_offset);
968 err = extend_file(td, f);
969 if (err)
970 break;
971
972 err = __file_invalidate_cache(td, f, old_len,
973 extend_len);
974
975 /*
976 * Shut up static checker
977 */
978 if (f->fd != -1)
979 close(f->fd);
980
981 f->fd = -1;
982 if (err)
983 break;
984 }
985 temp_stall_ts = 0;
986 }
987
988 if (err)
989 goto err_out;
990
991 if (!o->zone_size)
992 o->zone_size = o->size;
993
994 /*
995 * iolog already set the total io size, if we read back
996 * stored entries.
997 */
998 if (!o->read_iolog_file) {
999 if (o->io_limit)
1000 td->total_io_size = o->io_limit * o->loops;
1001 else
1002 td->total_io_size = o->size * o->loops;
1003 }
1004
1005done:
1006 if (o->create_only)
1007 td->done = 1;
1008
1009 td_restore_runstate(td, old_state);
1010 return 0;
1011err_offset:
1012 log_err("%s: you need to specify valid offset=\n", o->name);
1013err_out:
1014 td_restore_runstate(td, old_state);
1015 return 1;
1016}
1017
1018int pre_read_files(struct thread_data *td)
1019{
1020 struct fio_file *f;
1021 unsigned int i;
1022
1023 dprint(FD_FILE, "pre_read files\n");
1024
1025 for_each_file(td, f, i) {
1026 pre_read_file(td, f);
1027 }
1028
1029 return 1;
1030}
1031
1032static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1033{
1034 unsigned int range_size, seed;
1035 unsigned long nranges;
1036 uint64_t fsize;
1037
1038 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1039 fsize = min(f->real_file_size, f->io_size);
1040
1041 nranges = (fsize + range_size - 1) / range_size;
1042
1043 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1044 if (!td->o.rand_repeatable)
1045 seed = td->rand_seeds[4];
1046
1047 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1048 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1049 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1050 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1051 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1052 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1053
1054 return 1;
1055}
1056
1057static int init_rand_distribution(struct thread_data *td)
1058{
1059 struct fio_file *f;
1060 unsigned int i;
1061 int state;
1062
1063 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1064 return 0;
1065
1066 state = td_bump_runstate(td, TD_SETTING_UP);
1067
1068 for_each_file(td, f, i)
1069 __init_rand_distribution(td, f);
1070
1071 td_restore_runstate(td, state);
1072
1073 return 1;
1074}
1075
1076/*
1077 * Check if the number of blocks exceeds the randomness capability of
1078 * the selected generator. Tausworthe is 32-bit, the others are fullly
1079 * 64-bit capable.
1080 */
1081static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1082 uint64_t blocks)
1083{
1084 if (blocks <= FRAND32_MAX)
1085 return 0;
1086 if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1087 return 0;
1088
1089 /*
1090 * If the user hasn't specified a random generator, switch
1091 * to tausworthe64 with informational warning. If the user did
1092 * specify one, just warn.
1093 */
1094 log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1095 f->file_name);
1096
1097 if (!fio_option_is_set(&td->o, random_generator)) {
1098 log_info("fio: Switching to tausworthe64. Use the "
1099 "random_generator= option to get rid of this "
1100 "warning.\n");
1101 td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1102 return 0;
1103 }
1104
1105 /*
1106 * Just make this information to avoid breaking scripts.
1107 */
1108 log_info("fio: Use the random_generator= option to switch to lfsr or "
1109 "tausworthe64.\n");
1110 return 0;
1111}
1112
1113int init_random_map(struct thread_data *td)
1114{
1115 unsigned long long blocks;
1116 struct fio_file *f;
1117 unsigned int i;
1118
1119 if (init_rand_distribution(td))
1120 return 0;
1121 if (!td_random(td))
1122 return 0;
1123
1124 for_each_file(td, f, i) {
1125 uint64_t fsize = min(f->real_file_size, f->io_size);
1126
1127 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1128
1129 if (check_rand_gen_limits(td, f, blocks))
1130 return 1;
1131
1132 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1133 unsigned long seed;
1134
1135 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1136
1137 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1138 fio_file_set_lfsr(f);
1139 continue;
1140 }
1141 } else if (!td->o.norandommap) {
1142 f->io_axmap = axmap_new(blocks);
1143 if (f->io_axmap) {
1144 fio_file_set_axmap(f);
1145 continue;
1146 }
1147 } else if (td->o.norandommap)
1148 continue;
1149
1150 if (!td->o.softrandommap) {
1151 log_err("fio: failed allocating random map. If running"
1152 " a large number of jobs, try the 'norandommap'"
1153 " option or set 'softrandommap'. Or give"
1154 " a larger --alloc-size to fio.\n");
1155 return 1;
1156 }
1157
1158 log_info("fio: file %s failed allocating random map. Running "
1159 "job without.\n", f->file_name);
1160 }
1161
1162 return 0;
1163}
1164
1165void close_files(struct thread_data *td)
1166{
1167 struct fio_file *f;
1168 unsigned int i;
1169
1170 for_each_file(td, f, i) {
1171 if (fio_file_open(f))
1172 td_io_close_file(td, f);
1173 }
1174}
1175
1176void close_and_free_files(struct thread_data *td)
1177{
1178 struct fio_file *f;
1179 unsigned int i;
1180
1181 dprint(FD_FILE, "close files\n");
1182
1183 for_each_file(td, f, i) {
1184 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1185 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1186 td_io_unlink_file(td, f);
1187 }
1188
1189 if (fio_file_open(f))
1190 td_io_close_file(td, f);
1191
1192 remove_file_hash(f);
1193
1194 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1195 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1196 td_io_unlink_file(td, f);
1197 }
1198
1199 sfree(f->file_name);
1200 f->file_name = NULL;
1201 if (fio_file_axmap(f)) {
1202 axmap_free(f->io_axmap);
1203 f->io_axmap = NULL;
1204 }
1205 sfree(f);
1206 }
1207
1208 td->o.filename = NULL;
1209 free(td->files);
1210 free(td->file_locks);
1211 td->files_index = 0;
1212 td->files = NULL;
1213 td->file_locks = NULL;
1214 td->o.file_lock_mode = FILE_LOCK_NONE;
1215 td->o.nr_files = 0;
1216}
1217
1218static void get_file_type(struct fio_file *f)
1219{
1220 struct stat sb;
1221
1222 if (!strcmp(f->file_name, "-"))
1223 f->filetype = FIO_TYPE_PIPE;
1224 else
1225 f->filetype = FIO_TYPE_FILE;
1226
1227#ifdef WIN32
1228 /* \\.\ is the device namespace in Windows, where every file is
1229 * a block device */
1230 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1231 f->filetype = FIO_TYPE_BD;
1232#endif
1233
1234 if (!stat(f->file_name, &sb)) {
1235 if (S_ISBLK(sb.st_mode))
1236 f->filetype = FIO_TYPE_BD;
1237 else if (S_ISCHR(sb.st_mode))
1238 f->filetype = FIO_TYPE_CHAR;
1239 else if (S_ISFIFO(sb.st_mode))
1240 f->filetype = FIO_TYPE_PIPE;
1241 }
1242}
1243
1244static bool __is_already_allocated(const char *fname, bool set)
1245{
1246 struct flist_head *entry;
1247 bool ret;
1248
1249 ret = file_bloom_exists(fname, set);
1250 if (!ret)
1251 return ret;
1252
1253 flist_for_each(entry, &filename_list) {
1254 struct file_name *fn;
1255
1256 fn = flist_entry(entry, struct file_name, list);
1257
1258 if (!strcmp(fn->filename, fname))
1259 return true;
1260 }
1261
1262 return false;
1263}
1264
1265static bool is_already_allocated(const char *fname)
1266{
1267 bool ret;
1268
1269 fio_file_hash_lock();
1270 ret = __is_already_allocated(fname, false);
1271 fio_file_hash_unlock();
1272
1273 return ret;
1274}
1275
1276static void set_already_allocated(const char *fname)
1277{
1278 struct file_name *fn;
1279
1280 fn = malloc(sizeof(struct file_name));
1281 fn->filename = strdup(fname);
1282
1283 fio_file_hash_lock();
1284 if (!__is_already_allocated(fname, true)) {
1285 flist_add_tail(&fn->list, &filename_list);
1286 fn = NULL;
1287 }
1288 fio_file_hash_unlock();
1289
1290 if (fn) {
1291 free(fn->filename);
1292 free(fn);
1293 }
1294}
1295
1296static void free_already_allocated(void)
1297{
1298 struct flist_head *entry, *tmp;
1299 struct file_name *fn;
1300
1301 if (flist_empty(&filename_list))
1302 return;
1303
1304 fio_file_hash_lock();
1305 flist_for_each_safe(entry, tmp, &filename_list) {
1306 fn = flist_entry(entry, struct file_name, list);
1307 free(fn->filename);
1308 flist_del(&fn->list);
1309 free(fn);
1310 }
1311
1312 fio_file_hash_unlock();
1313}
1314
1315static struct fio_file *alloc_new_file(struct thread_data *td)
1316{
1317 struct fio_file *f;
1318
1319 f = smalloc(sizeof(*f));
1320 if (!f) {
1321 assert(0);
1322 return NULL;
1323 }
1324
1325 f->fd = -1;
1326 f->shadow_fd = -1;
1327 fio_file_reset(td, f);
1328 return f;
1329}
1330
1331bool exists_and_not_regfile(const char *filename)
1332{
1333 struct stat sb;
1334
1335 if (lstat(filename, &sb) == -1)
1336 return false;
1337
1338#ifndef WIN32 /* NOT Windows */
1339 if (S_ISREG(sb.st_mode))
1340 return false;
1341#else
1342 /* \\.\ is the device namespace in Windows, where every file
1343 * is a device node */
1344 if (S_ISREG(sb.st_mode) && strncmp(filename, "\\\\.\\", 4) != 0)
1345 return false;
1346#endif
1347
1348 return true;
1349}
1350
1351int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1352{
1353 int cur_files = td->files_index;
1354 char file_name[PATH_MAX];
1355 struct fio_file *f;
1356 int len = 0;
1357
1358 dprint(FD_FILE, "add file %s\n", fname);
1359
1360 if (td->o.directory)
1361 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob,
1362 td->o.unique_filename);
1363
1364 sprintf(file_name + len, "%s", fname);
1365
1366 /* clean cloned siblings using existing files */
1367 if (numjob && is_already_allocated(file_name) &&
1368 !exists_and_not_regfile(fname))
1369 return 0;
1370
1371 f = alloc_new_file(td);
1372
1373 if (td->files_size <= td->files_index) {
1374 unsigned int new_size = td->o.nr_files + 1;
1375
1376 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1377
1378 td->files = realloc(td->files, new_size * sizeof(f));
1379 if (td->files == NULL) {
1380 log_err("fio: realloc OOM\n");
1381 assert(0);
1382 }
1383 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1384 td->file_locks = realloc(td->file_locks, new_size);
1385 if (!td->file_locks) {
1386 log_err("fio: realloc OOM\n");
1387 assert(0);
1388 }
1389 td->file_locks[cur_files] = FILE_LOCK_NONE;
1390 }
1391 td->files_size = new_size;
1392 }
1393 td->files[cur_files] = f;
1394 f->fileno = cur_files;
1395
1396 /*
1397 * init function, io engine may not be loaded yet
1398 */
1399 if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
1400 f->real_file_size = -1ULL;
1401
1402 f->file_name = smalloc_strdup(file_name);
1403 if (!f->file_name)
1404 assert(0);
1405
1406 get_file_type(f);
1407
1408 switch (td->o.file_lock_mode) {
1409 case FILE_LOCK_NONE:
1410 break;
1411 case FILE_LOCK_READWRITE:
1412 f->rwlock = fio_rwlock_init();
1413 break;
1414 case FILE_LOCK_EXCLUSIVE:
1415 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1416 break;
1417 default:
1418 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1419 assert(0);
1420 }
1421
1422 td->files_index++;
1423 if (f->filetype == FIO_TYPE_FILE)
1424 td->nr_normal_files++;
1425
1426 set_already_allocated(file_name);
1427
1428 if (inc)
1429 td->o.nr_files++;
1430
1431 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1432 cur_files);
1433
1434 return cur_files;
1435}
1436
1437int add_file_exclusive(struct thread_data *td, const char *fname)
1438{
1439 struct fio_file *f;
1440 unsigned int i;
1441
1442 for_each_file(td, f, i) {
1443 if (!strcmp(f->file_name, fname))
1444 return i;
1445 }
1446
1447 return add_file(td, fname, 0, 1);
1448}
1449
1450void get_file(struct fio_file *f)
1451{
1452 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1453 assert(fio_file_open(f));
1454 f->references++;
1455}
1456
1457int put_file(struct thread_data *td, struct fio_file *f)
1458{
1459 int f_ret = 0, ret = 0;
1460
1461 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1462
1463 if (!fio_file_open(f)) {
1464 assert(f->fd == -1);
1465 return 0;
1466 }
1467
1468 assert(f->references);
1469 if (--f->references)
1470 return 0;
1471
1472 if (should_fsync(td) && td->o.fsync_on_close) {
1473 f_ret = fsync(f->fd);
1474 if (f_ret < 0)
1475 f_ret = errno;
1476 }
1477
1478 if (td->io_ops->close_file)
1479 ret = td->io_ops->close_file(td, f);
1480
1481 if (!ret)
1482 ret = f_ret;
1483
1484 td->nr_open_files--;
1485 fio_file_clear_open(f);
1486 assert(f->fd == -1);
1487 return ret;
1488}
1489
1490void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1491{
1492 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1493 return;
1494
1495 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1496 if (ddir == DDIR_READ)
1497 fio_rwlock_read(f->rwlock);
1498 else
1499 fio_rwlock_write(f->rwlock);
1500 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1501 fio_mutex_down(f->lock);
1502
1503 td->file_locks[f->fileno] = td->o.file_lock_mode;
1504}
1505
1506void unlock_file(struct thread_data *td, struct fio_file *f)
1507{
1508 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1509 return;
1510
1511 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1512 fio_rwlock_unlock(f->rwlock);
1513 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1514 fio_mutex_up(f->lock);
1515
1516 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1517}
1518
1519void unlock_file_all(struct thread_data *td, struct fio_file *f)
1520{
1521 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1522 return;
1523 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1524 unlock_file(td, f);
1525}
1526
1527static int recurse_dir(struct thread_data *td, const char *dirname)
1528{
1529 struct dirent *dir;
1530 int ret = 0;
1531 DIR *D;
1532
1533 D = opendir(dirname);
1534 if (!D) {
1535 char buf[FIO_VERROR_SIZE];
1536
1537 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1538 td_verror(td, errno, buf);
1539 return 1;
1540 }
1541
1542 while ((dir = readdir(D)) != NULL) {
1543 char full_path[PATH_MAX];
1544 struct stat sb;
1545
1546 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1547 continue;
1548
1549 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1550
1551 if (lstat(full_path, &sb) == -1) {
1552 if (errno != ENOENT) {
1553 td_verror(td, errno, "stat");
1554 ret = 1;
1555 break;
1556 }
1557 }
1558
1559 if (S_ISREG(sb.st_mode)) {
1560 add_file(td, full_path, 0, 1);
1561 continue;
1562 }
1563 if (!S_ISDIR(sb.st_mode))
1564 continue;
1565
1566 ret = recurse_dir(td, full_path);
1567 if (ret)
1568 break;
1569 }
1570
1571 closedir(D);
1572 return ret;
1573}
1574
1575int add_dir_files(struct thread_data *td, const char *path)
1576{
1577 int ret = recurse_dir(td, path);
1578
1579 if (!ret)
1580 log_info("fio: opendir added %d files\n", td->o.nr_files);
1581
1582 return ret;
1583}
1584
1585void dup_files(struct thread_data *td, struct thread_data *org)
1586{
1587 struct fio_file *f;
1588 unsigned int i;
1589
1590 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1591
1592 if (!org->files)
1593 return;
1594
1595 td->files = malloc(org->files_index * sizeof(f));
1596
1597 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1598 td->file_locks = malloc(org->files_index);
1599
1600 for_each_file(org, f, i) {
1601 struct fio_file *__f;
1602
1603 __f = alloc_new_file(td);
1604
1605 if (f->file_name) {
1606 __f->file_name = smalloc_strdup(f->file_name);
1607 if (!__f->file_name)
1608 assert(0);
1609
1610 __f->filetype = f->filetype;
1611 }
1612
1613 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1614 __f->lock = f->lock;
1615 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1616 __f->rwlock = f->rwlock;
1617
1618 td->files[i] = __f;
1619 }
1620}
1621
1622/*
1623 * Returns the index that matches the filename, or -1 if not there
1624 */
1625int get_fileno(struct thread_data *td, const char *fname)
1626{
1627 struct fio_file *f;
1628 unsigned int i;
1629
1630 for_each_file(td, f, i)
1631 if (!strcmp(f->file_name, fname))
1632 return i;
1633
1634 return -1;
1635}
1636
1637/*
1638 * For log usage, where we add/open/close files automatically
1639 */
1640void free_release_files(struct thread_data *td)
1641{
1642 close_files(td);
1643 td->o.nr_files = 0;
1644 td->o.open_files = 0;
1645 td->files_index = 0;
1646 td->nr_normal_files = 0;
1647}
1648
1649void fio_file_reset(struct thread_data *td, struct fio_file *f)
1650{
1651 int i;
1652
1653 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1654 f->last_pos[i] = f->file_offset;
1655 f->last_start[i] = -1ULL;
1656 }
1657
1658 if (fio_file_axmap(f))
1659 axmap_reset(f->io_axmap);
1660 else if (fio_file_lfsr(f))
1661 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1662}
1663
1664bool fio_files_done(struct thread_data *td)
1665{
1666 struct fio_file *f;
1667 unsigned int i;
1668
1669 for_each_file(td, f, i)
1670 if (!fio_file_done(f))
1671 return false;
1672
1673 return true;
1674}
1675
1676/* free memory used in initialization phase only */
1677void filesetup_mem_free(void)
1678{
1679 free_already_allocated();
1680}