Ignore pre-read for FIO_NOIO td
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27/*
28 * List entry for filename_list
29 */
30struct file_name {
31 struct flist_head list;
32 char *filename;
33};
34
35static inline void clear_error(struct thread_data *td)
36{
37 td->error = 0;
38 td->verror[0] = '\0';
39}
40
41/*
42 * Leaves f->fd open on success, caller must close
43 */
44static int extend_file(struct thread_data *td, struct fio_file *f)
45{
46 int r, new_layout = 0, unlink_file = 0, flags;
47 unsigned long long left;
48 unsigned int bs;
49 char *b = NULL;
50
51 if (read_only) {
52 log_err("fio: refusing extend of file due to read-only\n");
53 return 0;
54 }
55
56 /*
57 * check if we need to lay the file out complete again. fio
58 * does that for operations involving reads, or for writes
59 * where overwrite is set
60 */
61 if (td_read(td) ||
62 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
63 (td_write(td) && td_ioengine_flagged(td, FIO_NOEXTEND)))
64 new_layout = 1;
65 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
66 unlink_file = 1;
67
68 if (unlink_file || new_layout) {
69 int ret;
70
71 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
72
73 ret = td_io_unlink_file(td, f);
74 if (ret != 0 && ret != ENOENT) {
75 td_verror(td, errno, "unlink");
76 return 1;
77 }
78 }
79
80 flags = O_WRONLY;
81 if (td->o.allow_create)
82 flags |= O_CREAT;
83 if (new_layout)
84 flags |= O_TRUNC;
85
86#ifdef WIN32
87 flags |= _O_BINARY;
88#endif
89
90 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
91 f->fd = open(f->file_name, flags, 0644);
92 if (f->fd < 0) {
93 int err = errno;
94
95 if (err == ENOENT && !td->o.allow_create)
96 log_err("fio: file creation disallowed by "
97 "allow_file_create=0\n");
98 else
99 td_verror(td, err, "open");
100 return 1;
101 }
102
103#ifdef CONFIG_POSIX_FALLOCATE
104 if (!td->o.fill_device) {
105 switch (td->o.fallocate_mode) {
106 case FIO_FALLOCATE_NONE:
107 break;
108 case FIO_FALLOCATE_POSIX:
109 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
110 f->file_name,
111 (unsigned long long) f->real_file_size);
112
113 r = posix_fallocate(f->fd, 0, f->real_file_size);
114 if (r > 0) {
115 log_err("fio: posix_fallocate fails: %s\n",
116 strerror(r));
117 }
118 break;
119#ifdef CONFIG_LINUX_FALLOCATE
120 case FIO_FALLOCATE_KEEP_SIZE:
121 dprint(FD_FILE,
122 "fallocate(FALLOC_FL_KEEP_SIZE) "
123 "file %s size %llu\n", f->file_name,
124 (unsigned long long) f->real_file_size);
125
126 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
127 f->real_file_size);
128 if (r != 0)
129 td_verror(td, errno, "fallocate");
130
131 break;
132#endif /* CONFIG_LINUX_FALLOCATE */
133 default:
134 log_err("fio: unknown fallocate mode: %d\n",
135 td->o.fallocate_mode);
136 assert(0);
137 }
138 }
139#endif /* CONFIG_POSIX_FALLOCATE */
140
141 /*
142 * If our jobs don't require regular files initially, we're done.
143 */
144 if (!new_layout)
145 goto done;
146
147 /*
148 * The size will be -1ULL when fill_device is used, so don't truncate
149 * or fallocate this file, just write it
150 */
151 if (!td->o.fill_device) {
152 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
153 (unsigned long long) f->real_file_size);
154 if (ftruncate(f->fd, f->real_file_size) == -1) {
155 if (errno != EFBIG) {
156 td_verror(td, errno, "ftruncate");
157 goto err;
158 }
159 }
160 }
161
162 left = f->real_file_size;
163 bs = td->o.max_bs[DDIR_WRITE];
164 if (bs > left)
165 bs = left;
166
167 b = malloc(bs);
168 if (!b) {
169 td_verror(td, errno, "malloc");
170 goto err;
171 }
172
173 while (left && !td->terminate) {
174 if (bs > left)
175 bs = left;
176
177 fill_io_buffer(td, b, bs, bs);
178
179 r = write(f->fd, b, bs);
180
181 if (r > 0) {
182 left -= r;
183 continue;
184 } else {
185 if (r < 0) {
186 int __e = errno;
187
188 if (__e == ENOSPC) {
189 if (td->o.fill_device)
190 break;
191 log_info("fio: ENOSPC on laying out "
192 "file, stopping\n");
193 break;
194 }
195 td_verror(td, errno, "write");
196 } else
197 td_verror(td, EIO, "write");
198
199 break;
200 }
201 }
202
203 if (td->terminate) {
204 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
205 td_io_unlink_file(td, f);
206 } else if (td->o.create_fsync) {
207 if (fsync(f->fd) < 0) {
208 td_verror(td, errno, "fsync");
209 goto err;
210 }
211 }
212 if (td->o.fill_device && !td_write(td)) {
213 fio_file_clear_size_known(f);
214 if (td_io_get_file_size(td, f))
215 goto err;
216 if (f->io_size > f->real_file_size)
217 f->io_size = f->real_file_size;
218 }
219
220 free(b);
221done:
222 return 0;
223err:
224 close(f->fd);
225 f->fd = -1;
226 if (b)
227 free(b);
228 return 1;
229}
230
231static int pre_read_file(struct thread_data *td, struct fio_file *f)
232{
233 int ret = 0, r, did_open = 0, old_runstate;
234 unsigned long long left;
235 unsigned int bs;
236 char *b;
237
238 if (td_ioengine_flagged(td, FIO_PIPEIO) ||
239 td_ioengine_flagged(td, FIO_NOIO))
240 return 0;
241
242 if (!fio_file_open(f)) {
243 if (td->io_ops->open_file(td, f)) {
244 log_err("fio: cannot pre-read, failed to open file\n");
245 return 1;
246 }
247 did_open = 1;
248 }
249
250 old_runstate = td_bump_runstate(td, TD_PRE_READING);
251
252 left = f->io_size;
253 bs = td->o.max_bs[DDIR_READ];
254 if (bs > left)
255 bs = left;
256
257 b = malloc(bs);
258 if (!b) {
259 td_verror(td, errno, "malloc");
260 ret = 1;
261 goto error;
262 }
263 memset(b, 0, bs);
264
265 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
266 td_verror(td, errno, "lseek");
267 log_err("fio: failed to lseek pre-read file\n");
268 ret = 1;
269 goto error;
270 }
271
272 while (left && !td->terminate) {
273 if (bs > left)
274 bs = left;
275
276 r = read(f->fd, b, bs);
277
278 if (r == (int) bs) {
279 left -= bs;
280 continue;
281 } else {
282 td_verror(td, EIO, "pre_read");
283 break;
284 }
285 }
286
287error:
288 td_restore_runstate(td, old_runstate);
289
290 if (did_open)
291 td->io_ops->close_file(td, f);
292
293 free(b);
294 return ret;
295}
296
297unsigned long long get_rand_file_size(struct thread_data *td)
298{
299 unsigned long long ret, sized;
300 uint64_t frand_max;
301 unsigned long r;
302
303 frand_max = rand_max(&td->file_size_state);
304 r = __rand(&td->file_size_state);
305 sized = td->o.file_size_high - td->o.file_size_low;
306 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
307 ret += td->o.file_size_low;
308 ret -= (ret % td->o.rw_min_bs);
309 return ret;
310}
311
312static int file_size(struct thread_data *td, struct fio_file *f)
313{
314 struct stat st;
315
316 if (stat(f->file_name, &st) == -1) {
317 td_verror(td, errno, "fstat");
318 return 1;
319 }
320
321 f->real_file_size = st.st_size;
322 return 0;
323}
324
325static int bdev_size(struct thread_data *td, struct fio_file *f)
326{
327 unsigned long long bytes = 0;
328 int r;
329
330 if (td->io_ops->open_file(td, f)) {
331 log_err("fio: failed opening blockdev %s for size check\n",
332 f->file_name);
333 return 1;
334 }
335
336 r = blockdev_size(f, &bytes);
337 if (r) {
338 td_verror(td, r, "blockdev_size");
339 goto err;
340 }
341
342 if (!bytes) {
343 log_err("%s: zero sized block device?\n", f->file_name);
344 goto err;
345 }
346
347 f->real_file_size = bytes;
348 td->io_ops->close_file(td, f);
349 return 0;
350err:
351 td->io_ops->close_file(td, f);
352 return 1;
353}
354
355static int char_size(struct thread_data *td, struct fio_file *f)
356{
357#ifdef FIO_HAVE_CHARDEV_SIZE
358 unsigned long long bytes = 0;
359 int r;
360
361 if (td->io_ops->open_file(td, f)) {
362 log_err("fio: failed opening chardev %s for size check\n",
363 f->file_name);
364 return 1;
365 }
366
367 r = chardev_size(f, &bytes);
368 if (r) {
369 td_verror(td, r, "chardev_size");
370 goto err;
371 }
372
373 if (!bytes) {
374 log_err("%s: zero sized char device?\n", f->file_name);
375 goto err;
376 }
377
378 f->real_file_size = bytes;
379 td->io_ops->close_file(td, f);
380 return 0;
381err:
382 td->io_ops->close_file(td, f);
383 return 1;
384#else
385 f->real_file_size = -1ULL;
386 return 0;
387#endif
388}
389
390static int get_file_size(struct thread_data *td, struct fio_file *f)
391{
392 int ret = 0;
393
394 if (fio_file_size_known(f))
395 return 0;
396
397 if (f->filetype == FIO_TYPE_FILE)
398 ret = file_size(td, f);
399 else if (f->filetype == FIO_TYPE_BLOCK)
400 ret = bdev_size(td, f);
401 else if (f->filetype == FIO_TYPE_CHAR)
402 ret = char_size(td, f);
403 else
404 f->real_file_size = -1ULL;
405
406 /*
407 * Leave ->real_file_size with 0 since it could be expectation
408 * of initial setup for regular files.
409 */
410 if (ret)
411 return ret;
412
413 /*
414 * If ->real_file_size is -1, a conditional for the message
415 * "offset extends end" is always true, but it makes no sense,
416 * so just return the same value here.
417 */
418 if (f->real_file_size == -1ULL) {
419 log_info("%s: failed to get file size of %s\n", td->o.name,
420 f->file_name);
421 return 1;
422 }
423
424 if (td->o.start_offset && f->file_offset == 0)
425 dprint(FD_FILE, "offset of file %s not initialized yet\n",
426 f->file_name);
427 /*
428 * ->file_offset normally hasn't been initialized yet, so this
429 * is basically always false.
430 */
431 if (f->file_offset > f->real_file_size) {
432 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
433 (unsigned long long) f->file_offset,
434 (unsigned long long) f->real_file_size);
435 return 1;
436 }
437
438 fio_file_set_size_known(f);
439 return 0;
440}
441
442static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
443 unsigned long long off,
444 unsigned long long len)
445{
446 int errval = 0, ret = 0;
447
448#ifdef CONFIG_ESX
449 return 0;
450#endif
451
452 if (len == -1ULL)
453 len = f->io_size;
454 if (off == -1ULL)
455 off = f->file_offset;
456
457 if (len == -1ULL || off == -1ULL)
458 return 0;
459
460 if (td->io_ops->invalidate) {
461 dprint(FD_IO, "invalidate %s cache %s\n", td->io_ops->name,
462 f->file_name);
463 ret = td->io_ops->invalidate(td, f);
464 if (ret < 0)
465 errval = -ret;
466 } else if (f->filetype == FIO_TYPE_FILE) {
467 dprint(FD_IO, "declare unneeded cache %s: %llu/%llu\n",
468 f->file_name, off, len);
469 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
470 if (ret)
471 errval = ret;
472 } else if (f->filetype == FIO_TYPE_BLOCK) {
473 int retry_count = 0;
474
475 dprint(FD_IO, "drop page cache %s\n", f->file_name);
476 ret = blockdev_invalidate_cache(f);
477 while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
478 /*
479 * Linux multipath devices reject ioctl while
480 * the maps are being updated. That window can
481 * last tens of milliseconds; we'll try up to
482 * a quarter of a second.
483 */
484 usleep(10000);
485 ret = blockdev_invalidate_cache(f);
486 }
487 if (ret < 0 && errno == EACCES && geteuid()) {
488 if (!root_warn) {
489 log_err("fio: only root may flush block "
490 "devices. Cache flush bypassed!\n");
491 root_warn = 1;
492 }
493 ret = 0;
494 }
495 if (ret < 0)
496 errval = errno;
497 else if (ret) /* probably not supported */
498 errval = ret;
499 } else if (f->filetype == FIO_TYPE_CHAR ||
500 f->filetype == FIO_TYPE_PIPE) {
501 dprint(FD_IO, "invalidate not supported %s\n", f->file_name);
502 ret = 0;
503 }
504
505 /*
506 * Cache flushing isn't a fatal condition, and we know it will
507 * happen on some platforms where we don't have the proper
508 * function to flush eg block device caches. So just warn and
509 * continue on our way.
510 */
511 if (errval)
512 log_info("fio: cache invalidation of %s failed: %s\n",
513 f->file_name, strerror(errval));
514
515 return 0;
516
517}
518
519int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
520{
521 if (!fio_file_open(f))
522 return 0;
523
524 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
525}
526
527int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
528{
529 int ret = 0;
530
531 dprint(FD_FILE, "fd close %s\n", f->file_name);
532
533 remove_file_hash(f);
534
535 if (close(f->fd) < 0)
536 ret = errno;
537
538 f->fd = -1;
539
540 if (f->shadow_fd != -1) {
541 close(f->shadow_fd);
542 f->shadow_fd = -1;
543 }
544
545 f->engine_pos = 0;
546 return ret;
547}
548
549int file_lookup_open(struct fio_file *f, int flags)
550{
551 struct fio_file *__f;
552 int from_hash;
553
554 __f = lookup_file_hash(f->file_name);
555 if (__f) {
556 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
557 f->lock = __f->lock;
558 from_hash = 1;
559 } else {
560 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
561 from_hash = 0;
562 }
563
564#ifdef WIN32
565 flags |= _O_BINARY;
566#endif
567
568 f->fd = open(f->file_name, flags, 0600);
569 return from_hash;
570}
571
572static int file_close_shadow_fds(struct thread_data *td)
573{
574 struct fio_file *f;
575 int num_closed = 0;
576 unsigned int i;
577
578 for_each_file(td, f, i) {
579 if (f->shadow_fd == -1)
580 continue;
581
582 close(f->shadow_fd);
583 f->shadow_fd = -1;
584 num_closed++;
585 }
586
587 return num_closed;
588}
589
590int generic_open_file(struct thread_data *td, struct fio_file *f)
591{
592 int is_std = 0;
593 int flags = 0;
594 int from_hash = 0;
595
596 dprint(FD_FILE, "fd open %s\n", f->file_name);
597
598 if (!strcmp(f->file_name, "-")) {
599 if (td_rw(td)) {
600 log_err("fio: can't read/write to stdin/out\n");
601 return 1;
602 }
603 is_std = 1;
604
605 /*
606 * move output logging to stderr, if we are writing to stdout
607 */
608 if (td_write(td))
609 f_out = stderr;
610 }
611
612 if (td_trim(td))
613 goto skip_flags;
614 if (td->o.odirect)
615 flags |= OS_O_DIRECT;
616 if (td->o.oatomic) {
617 if (!FIO_O_ATOMIC) {
618 td_verror(td, EINVAL, "OS does not support atomic IO");
619 return 1;
620 }
621 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
622 }
623 if (td->o.sync_io)
624 flags |= O_SYNC;
625 if (td->o.create_on_open && td->o.allow_create)
626 flags |= O_CREAT;
627skip_flags:
628 if (f->filetype != FIO_TYPE_FILE)
629 flags |= FIO_O_NOATIME;
630
631open_again:
632 if (td_write(td)) {
633 if (!read_only)
634 flags |= O_RDWR;
635
636 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
637 flags |= O_CREAT;
638
639 if (is_std)
640 f->fd = dup(STDOUT_FILENO);
641 else
642 from_hash = file_lookup_open(f, flags);
643 } else if (td_read(td)) {
644 if (f->filetype == FIO_TYPE_CHAR && !read_only)
645 flags |= O_RDWR;
646 else
647 flags |= O_RDONLY;
648
649 if (is_std)
650 f->fd = dup(STDIN_FILENO);
651 else
652 from_hash = file_lookup_open(f, flags);
653 } else if (td_trim(td)) {
654 assert(!td_rw(td)); /* should have matched above */
655 flags |= O_RDWR;
656 from_hash = file_lookup_open(f, flags);
657 }
658
659 if (f->fd == -1) {
660 char buf[FIO_VERROR_SIZE];
661 int __e = errno;
662
663 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
664 flags &= ~FIO_O_NOATIME;
665 goto open_again;
666 }
667 if (__e == EMFILE && file_close_shadow_fds(td))
668 goto open_again;
669
670 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
671
672 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
673 log_err("fio: looks like your file system does not " \
674 "support direct=1/buffered=0\n");
675 }
676
677 td_verror(td, __e, buf);
678 return 1;
679 }
680
681 if (!from_hash && f->fd != -1) {
682 if (add_file_hash(f)) {
683 int fio_unused ret;
684
685 /*
686 * Stash away descriptor for later close. This is to
687 * work-around a "feature" on Linux, where a close of
688 * an fd that has been opened for write will trigger
689 * udev to call blkid to check partitions, fs id, etc.
690 * That pollutes the device cache, which can slow down
691 * unbuffered accesses.
692 */
693 if (f->shadow_fd == -1)
694 f->shadow_fd = f->fd;
695 else {
696 /*
697 * OK to ignore, we haven't done anything
698 * with it
699 */
700 ret = generic_close_file(td, f);
701 }
702 goto open_again;
703 }
704 }
705
706 return 0;
707}
708
709/*
710 * This function i.e. get_file_size() is the default .get_file_size
711 * implementation of majority of I/O engines.
712 */
713int generic_get_file_size(struct thread_data *td, struct fio_file *f)
714{
715 return get_file_size(td, f);
716}
717
718/*
719 * open/close all files, so that ->real_file_size gets set
720 */
721static int get_file_sizes(struct thread_data *td)
722{
723 struct fio_file *f;
724 unsigned int i;
725 int err = 0;
726
727 for_each_file(td, f, i) {
728 dprint(FD_FILE, "get file size for %p/%d/%s\n", f, i,
729 f->file_name);
730
731 if (td_io_get_file_size(td, f)) {
732 if (td->error != ENOENT) {
733 log_err("%s\n", td->verror);
734 err = 1;
735 break;
736 }
737 clear_error(td);
738 }
739
740 /*
741 * There are corner cases where we end up with -1 for
742 * ->real_file_size due to unsupported file type, etc.
743 * We then just set to size option value divided by number
744 * of files, similar to the way file ->io_size is set.
745 * stat(2) failure doesn't set ->real_file_size to -1.
746 */
747 if (f->real_file_size == -1ULL && td->o.size)
748 f->real_file_size = td->o.size / td->o.nr_files;
749 }
750
751 return err;
752}
753
754struct fio_mount {
755 struct flist_head list;
756 const char *base;
757 char __base[256];
758 unsigned int key;
759};
760
761/*
762 * Get free number of bytes for each file on each unique mount.
763 */
764static unsigned long long get_fs_free_counts(struct thread_data *td)
765{
766 struct flist_head *n, *tmp;
767 unsigned long long ret = 0;
768 struct fio_mount *fm;
769 FLIST_HEAD(list);
770 struct fio_file *f;
771 unsigned int i;
772
773 for_each_file(td, f, i) {
774 struct stat sb;
775 char buf[256];
776
777 if (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_CHAR) {
778 if (f->real_file_size != -1ULL)
779 ret += f->real_file_size;
780 continue;
781 } else if (f->filetype != FIO_TYPE_FILE)
782 continue;
783
784 buf[255] = '\0';
785 strncpy(buf, f->file_name, 255);
786
787 if (stat(buf, &sb) < 0) {
788 if (errno != ENOENT)
789 break;
790 strcpy(buf, ".");
791 if (stat(buf, &sb) < 0)
792 break;
793 }
794
795 fm = NULL;
796 flist_for_each(n, &list) {
797 fm = flist_entry(n, struct fio_mount, list);
798 if (fm->key == sb.st_dev)
799 break;
800
801 fm = NULL;
802 }
803
804 if (fm)
805 continue;
806
807 fm = calloc(1, sizeof(*fm));
808 strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
809 fm->base = basename(fm->__base);
810 fm->key = sb.st_dev;
811 flist_add(&fm->list, &list);
812 }
813
814 flist_for_each_safe(n, tmp, &list) {
815 unsigned long long sz;
816
817 fm = flist_entry(n, struct fio_mount, list);
818 flist_del(&fm->list);
819
820 sz = get_fs_free_size(fm->base);
821 if (sz && sz != -1ULL)
822 ret += sz;
823
824 free(fm);
825 }
826
827 return ret;
828}
829
830uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
831{
832 struct thread_options *o = &td->o;
833
834 if (o->file_append && f->filetype == FIO_TYPE_FILE)
835 return f->real_file_size;
836
837 return td->o.start_offset +
838 td->subjob_number * td->o.offset_increment;
839}
840
841/*
842 * Open the files and setup files sizes, creating files if necessary.
843 */
844int setup_files(struct thread_data *td)
845{
846 unsigned long long total_size, extend_size;
847 struct thread_options *o = &td->o;
848 struct fio_file *f;
849 unsigned int i, nr_fs_extra = 0;
850 int err = 0, need_extend;
851 int old_state;
852 const unsigned int bs = td_min_bs(td);
853 uint64_t fs = 0;
854
855 dprint(FD_FILE, "setup files\n");
856
857 old_state = td_bump_runstate(td, TD_SETTING_UP);
858
859 if (o->read_iolog_file)
860 goto done;
861
862 /*
863 * Find out physical size of files or devices for this thread,
864 * before we determine I/O size and range of our targets.
865 * If ioengine defines a setup() method, it's responsible for
866 * opening the files and setting f->real_file_size to indicate
867 * the valid range for that file.
868 */
869 if (td->io_ops->setup)
870 err = td->io_ops->setup(td);
871 else
872 err = get_file_sizes(td);
873
874 if (err)
875 goto err_out;
876
877 /*
878 * check sizes. if the files/devices do not exist and the size
879 * isn't passed to fio, abort.
880 */
881 total_size = 0;
882 for_each_file(td, f, i) {
883 f->fileno = i;
884 if (f->real_file_size == -1ULL)
885 total_size = -1ULL;
886 else
887 total_size += f->real_file_size;
888 }
889
890 if (o->fill_device)
891 td->fill_device_size = get_fs_free_counts(td);
892
893 /*
894 * device/file sizes are zero and no size given, punt
895 */
896 if ((!total_size || total_size == -1ULL) && !o->size &&
897 !td_ioengine_flagged(td, FIO_NOIO) && !o->fill_device &&
898 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
899 log_err("%s: you need to specify size=\n", o->name);
900 td_verror(td, EINVAL, "total_file_size");
901 goto err_out;
902 }
903
904 /*
905 * Calculate per-file size and potential extra size for the
906 * first files, if needed (i.e. if we don't have a fixed size).
907 */
908 if (!o->file_size_low && o->nr_files) {
909 uint64_t all_fs;
910
911 fs = o->size / o->nr_files;
912 all_fs = fs * o->nr_files;
913
914 if (all_fs < o->size)
915 nr_fs_extra = (o->size - all_fs) / bs;
916 }
917
918 /*
919 * now file sizes are known, so we can set ->io_size. if size= is
920 * not given, ->io_size is just equal to ->real_file_size. if size
921 * is given, ->io_size is size / nr_files.
922 */
923 extend_size = total_size = 0;
924 need_extend = 0;
925 for_each_file(td, f, i) {
926 f->file_offset = get_start_offset(td, f);
927
928 /*
929 * Update ->io_size depending on options specified.
930 * ->file_size_low being 0 means filesize option isn't set.
931 * Non zero ->file_size_low equals ->file_size_high means
932 * filesize option is set in a fixed size format.
933 * Non zero ->file_size_low not equals ->file_size_high means
934 * filesize option is set in a range format.
935 */
936 if (!o->file_size_low) {
937 /*
938 * no file size or range given, file size is equal to
939 * total size divided by number of files. If the size
940 * doesn't divide nicely with the min blocksize,
941 * make the first files bigger.
942 */
943 f->io_size = fs;
944 if (nr_fs_extra) {
945 nr_fs_extra--;
946 f->io_size += bs;
947 }
948
949 /*
950 * We normally don't come here for regular files, but
951 * if the result is 0 for a regular file, set it to the
952 * real file size. This could be size of the existing
953 * one if it already exists, but otherwise will be set
954 * to 0. A new file won't be created because
955 * ->io_size + ->file_offset equals ->real_file_size.
956 */
957 if (!f->io_size) {
958 if (f->file_offset > f->real_file_size)
959 goto err_offset;
960 f->io_size = f->real_file_size - f->file_offset;
961 if (!f->io_size)
962 log_info("fio: file %s may be ignored\n",
963 f->file_name);
964 }
965 } else if (f->real_file_size < o->file_size_low ||
966 f->real_file_size > o->file_size_high) {
967 if (f->file_offset > o->file_size_low)
968 goto err_offset;
969 /*
970 * file size given. if it's fixed, use that. if it's a
971 * range, generate a random size in-between.
972 */
973 if (o->file_size_low == o->file_size_high)
974 f->io_size = o->file_size_low - f->file_offset;
975 else {
976 f->io_size = get_rand_file_size(td)
977 - f->file_offset;
978 }
979 } else
980 f->io_size = f->real_file_size - f->file_offset;
981
982 if (f->io_size == -1ULL)
983 total_size = -1ULL;
984 else {
985 if (o->size_percent) {
986 f->io_size = (f->io_size * o->size_percent) / 100;
987 f->io_size -= (f->io_size % td_min_bs(td));
988 }
989 total_size += f->io_size;
990 }
991
992 if (f->filetype == FIO_TYPE_FILE &&
993 (f->io_size + f->file_offset) > f->real_file_size &&
994 !td_ioengine_flagged(td, FIO_DISKLESSIO)) {
995 if (!o->create_on_open) {
996 need_extend++;
997 extend_size += (f->io_size + f->file_offset);
998 fio_file_set_extend(f);
999 } else
1000 f->real_file_size = f->io_size + f->file_offset;
1001 }
1002 }
1003
1004 if (td->o.block_error_hist) {
1005 int len;
1006
1007 assert(td->o.nr_files == 1); /* checked in fixup_options */
1008 f = td->files[0];
1009 len = f->io_size / td->o.bs[DDIR_TRIM];
1010 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
1011 log_err("fio: cannot calculate block histogram with "
1012 "%d trim blocks, maximum %d\n",
1013 len, MAX_NR_BLOCK_INFOS);
1014 td_verror(td, EINVAL, "block_error_hist");
1015 goto err_out;
1016 }
1017
1018 td->ts.nr_block_infos = len;
1019 for (i = 0; i < len; i++)
1020 td->ts.block_infos[i] =
1021 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
1022 } else
1023 td->ts.nr_block_infos = 0;
1024
1025 if (!o->size || (total_size && o->size > total_size))
1026 o->size = total_size;
1027
1028 if (o->size < td_min_bs(td)) {
1029 log_err("fio: blocksize too large for data set\n");
1030 goto err_out;
1031 }
1032
1033 /*
1034 * See if we need to extend some files, typically needed when our
1035 * target regular files don't exist yet, but our jobs require them
1036 * initially due to read I/Os.
1037 */
1038 if (need_extend) {
1039 temp_stall_ts = 1;
1040 if (output_format & FIO_OUTPUT_NORMAL) {
1041 log_info("%s: Laying out IO file%s (%u file%s / %s%lluMiB)\n",
1042 o->name,
1043 need_extend > 1 ? "s" : "",
1044 need_extend,
1045 need_extend > 1 ? "s" : "",
1046 need_extend > 1 ? "total " : "",
1047 extend_size >> 20);
1048 }
1049
1050 for_each_file(td, f, i) {
1051 unsigned long long old_len = -1ULL, extend_len = -1ULL;
1052
1053 if (!fio_file_extend(f))
1054 continue;
1055
1056 assert(f->filetype == FIO_TYPE_FILE);
1057 fio_file_clear_extend(f);
1058 if (!o->fill_device) {
1059 old_len = f->real_file_size;
1060 extend_len = f->io_size + f->file_offset -
1061 old_len;
1062 }
1063 f->real_file_size = (f->io_size + f->file_offset);
1064 err = extend_file(td, f);
1065 if (err)
1066 break;
1067
1068 err = __file_invalidate_cache(td, f, old_len,
1069 extend_len);
1070
1071 /*
1072 * Shut up static checker
1073 */
1074 if (f->fd != -1)
1075 close(f->fd);
1076
1077 f->fd = -1;
1078 if (err)
1079 break;
1080 }
1081 temp_stall_ts = 0;
1082 }
1083
1084 if (err)
1085 goto err_out;
1086
1087 if (!o->zone_size)
1088 o->zone_size = o->size;
1089
1090 /*
1091 * iolog already set the total io size, if we read back
1092 * stored entries.
1093 */
1094 if (!o->read_iolog_file) {
1095 if (o->io_size)
1096 td->total_io_size = o->io_size * o->loops;
1097 else
1098 td->total_io_size = o->size * o->loops;
1099 }
1100
1101done:
1102 if (o->create_only)
1103 td->done = 1;
1104
1105 td_restore_runstate(td, old_state);
1106 return 0;
1107err_offset:
1108 log_err("%s: you need to specify valid offset=\n", o->name);
1109err_out:
1110 td_restore_runstate(td, old_state);
1111 return 1;
1112}
1113
1114int pre_read_files(struct thread_data *td)
1115{
1116 struct fio_file *f;
1117 unsigned int i;
1118
1119 dprint(FD_FILE, "pre_read files\n");
1120
1121 for_each_file(td, f, i) {
1122 pre_read_file(td, f);
1123 }
1124
1125 return 1;
1126}
1127
1128static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1129{
1130 unsigned int range_size, seed;
1131 unsigned long nranges;
1132 uint64_t fsize;
1133
1134 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1135 fsize = min(f->real_file_size, f->io_size);
1136
1137 nranges = (fsize + range_size - 1) / range_size;
1138
1139 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1140 if (!td->o.rand_repeatable)
1141 seed = td->rand_seeds[4];
1142
1143 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1144 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1145 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1146 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1147 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1148 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1149
1150 return 1;
1151}
1152
1153static int init_rand_distribution(struct thread_data *td)
1154{
1155 struct fio_file *f;
1156 unsigned int i;
1157 int state;
1158
1159 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1160 return 0;
1161
1162 state = td_bump_runstate(td, TD_SETTING_UP);
1163
1164 for_each_file(td, f, i)
1165 __init_rand_distribution(td, f);
1166
1167 td_restore_runstate(td, state);
1168
1169 return 1;
1170}
1171
1172/*
1173 * Check if the number of blocks exceeds the randomness capability of
1174 * the selected generator. Tausworthe is 32-bit, the others are fullly
1175 * 64-bit capable.
1176 */
1177static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1178 uint64_t blocks)
1179{
1180 if (blocks <= FRAND32_MAX)
1181 return 0;
1182 if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1183 return 0;
1184
1185 /*
1186 * If the user hasn't specified a random generator, switch
1187 * to tausworthe64 with informational warning. If the user did
1188 * specify one, just warn.
1189 */
1190 log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1191 f->file_name);
1192
1193 if (!fio_option_is_set(&td->o, random_generator)) {
1194 log_info("fio: Switching to tausworthe64. Use the "
1195 "random_generator= option to get rid of this "
1196 "warning.\n");
1197 td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1198 return 0;
1199 }
1200
1201 /*
1202 * Just make this information to avoid breaking scripts.
1203 */
1204 log_info("fio: Use the random_generator= option to switch to lfsr or "
1205 "tausworthe64.\n");
1206 return 0;
1207}
1208
1209int init_random_map(struct thread_data *td)
1210{
1211 unsigned long long blocks;
1212 struct fio_file *f;
1213 unsigned int i;
1214
1215 if (init_rand_distribution(td))
1216 return 0;
1217 if (!td_random(td))
1218 return 0;
1219
1220 for_each_file(td, f, i) {
1221 uint64_t fsize = min(f->real_file_size, f->io_size);
1222
1223 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1224
1225 if (check_rand_gen_limits(td, f, blocks))
1226 return 1;
1227
1228 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1229 unsigned long seed;
1230
1231 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1232
1233 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1234 fio_file_set_lfsr(f);
1235 continue;
1236 }
1237 } else if (!td->o.norandommap) {
1238 f->io_axmap = axmap_new(blocks);
1239 if (f->io_axmap) {
1240 fio_file_set_axmap(f);
1241 continue;
1242 }
1243 } else if (td->o.norandommap)
1244 continue;
1245
1246 if (!td->o.softrandommap) {
1247 log_err("fio: failed allocating random map. If running"
1248 " a large number of jobs, try the 'norandommap'"
1249 " option or set 'softrandommap'. Or give"
1250 " a larger --alloc-size to fio.\n");
1251 return 1;
1252 }
1253
1254 log_info("fio: file %s failed allocating random map. Running "
1255 "job without.\n", f->file_name);
1256 }
1257
1258 return 0;
1259}
1260
1261void close_files(struct thread_data *td)
1262{
1263 struct fio_file *f;
1264 unsigned int i;
1265
1266 for_each_file(td, f, i) {
1267 if (fio_file_open(f))
1268 td_io_close_file(td, f);
1269 }
1270}
1271
1272void close_and_free_files(struct thread_data *td)
1273{
1274 struct fio_file *f;
1275 unsigned int i;
1276
1277 dprint(FD_FILE, "close files\n");
1278
1279 for_each_file(td, f, i) {
1280 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1281 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1282 td_io_unlink_file(td, f);
1283 }
1284
1285 if (fio_file_open(f))
1286 td_io_close_file(td, f);
1287
1288 remove_file_hash(f);
1289
1290 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1291 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1292 td_io_unlink_file(td, f);
1293 }
1294
1295 sfree(f->file_name);
1296 f->file_name = NULL;
1297 if (fio_file_axmap(f)) {
1298 axmap_free(f->io_axmap);
1299 f->io_axmap = NULL;
1300 }
1301 sfree(f);
1302 }
1303
1304 td->o.filename = NULL;
1305 free(td->files);
1306 free(td->file_locks);
1307 td->files_index = 0;
1308 td->files = NULL;
1309 td->file_locks = NULL;
1310 td->o.file_lock_mode = FILE_LOCK_NONE;
1311 td->o.nr_files = 0;
1312}
1313
1314static void get_file_type(struct fio_file *f)
1315{
1316 struct stat sb;
1317
1318 if (!strcmp(f->file_name, "-"))
1319 f->filetype = FIO_TYPE_PIPE;
1320 else
1321 f->filetype = FIO_TYPE_FILE;
1322
1323#ifdef WIN32
1324 /* \\.\ is the device namespace in Windows, where every file is
1325 * a block device */
1326 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1327 f->filetype = FIO_TYPE_BLOCK;
1328#endif
1329
1330 if (!stat(f->file_name, &sb)) {
1331 if (S_ISBLK(sb.st_mode))
1332 f->filetype = FIO_TYPE_BLOCK;
1333 else if (S_ISCHR(sb.st_mode))
1334 f->filetype = FIO_TYPE_CHAR;
1335 else if (S_ISFIFO(sb.st_mode))
1336 f->filetype = FIO_TYPE_PIPE;
1337 }
1338}
1339
1340static bool __is_already_allocated(const char *fname, bool set)
1341{
1342 struct flist_head *entry;
1343 bool ret;
1344
1345 ret = file_bloom_exists(fname, set);
1346 if (!ret)
1347 return ret;
1348
1349 flist_for_each(entry, &filename_list) {
1350 struct file_name *fn;
1351
1352 fn = flist_entry(entry, struct file_name, list);
1353
1354 if (!strcmp(fn->filename, fname))
1355 return true;
1356 }
1357
1358 return false;
1359}
1360
1361static bool is_already_allocated(const char *fname)
1362{
1363 bool ret;
1364
1365 fio_file_hash_lock();
1366 ret = __is_already_allocated(fname, false);
1367 fio_file_hash_unlock();
1368
1369 return ret;
1370}
1371
1372static void set_already_allocated(const char *fname)
1373{
1374 struct file_name *fn;
1375
1376 fn = malloc(sizeof(struct file_name));
1377 fn->filename = strdup(fname);
1378
1379 fio_file_hash_lock();
1380 if (!__is_already_allocated(fname, true)) {
1381 flist_add_tail(&fn->list, &filename_list);
1382 fn = NULL;
1383 }
1384 fio_file_hash_unlock();
1385
1386 if (fn) {
1387 free(fn->filename);
1388 free(fn);
1389 }
1390}
1391
1392static void free_already_allocated(void)
1393{
1394 struct flist_head *entry, *tmp;
1395 struct file_name *fn;
1396
1397 if (flist_empty(&filename_list))
1398 return;
1399
1400 fio_file_hash_lock();
1401 flist_for_each_safe(entry, tmp, &filename_list) {
1402 fn = flist_entry(entry, struct file_name, list);
1403 free(fn->filename);
1404 flist_del(&fn->list);
1405 free(fn);
1406 }
1407
1408 fio_file_hash_unlock();
1409}
1410
1411static struct fio_file *alloc_new_file(struct thread_data *td)
1412{
1413 struct fio_file *f;
1414
1415 f = smalloc(sizeof(*f));
1416 if (!f) {
1417 assert(0);
1418 return NULL;
1419 }
1420
1421 f->fd = -1;
1422 f->shadow_fd = -1;
1423 fio_file_reset(td, f);
1424 return f;
1425}
1426
1427bool exists_and_not_regfile(const char *filename)
1428{
1429 struct stat sb;
1430
1431 if (lstat(filename, &sb) == -1)
1432 return false;
1433
1434#ifndef WIN32 /* NOT Windows */
1435 if (S_ISREG(sb.st_mode))
1436 return false;
1437#else
1438 /* \\.\ is the device namespace in Windows, where every file
1439 * is a device node */
1440 if (S_ISREG(sb.st_mode) && strncmp(filename, "\\\\.\\", 4) != 0)
1441 return false;
1442#endif
1443
1444 return true;
1445}
1446
1447int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1448{
1449 int cur_files = td->files_index;
1450 char file_name[PATH_MAX];
1451 struct fio_file *f;
1452 int len = 0;
1453
1454 dprint(FD_FILE, "add file %s\n", fname);
1455
1456 if (td->o.directory)
1457 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob,
1458 td->o.unique_filename);
1459
1460 sprintf(file_name + len, "%s", fname);
1461
1462 /* clean cloned siblings using existing files */
1463 if (numjob && is_already_allocated(file_name) &&
1464 !exists_and_not_regfile(fname))
1465 return 0;
1466
1467 f = alloc_new_file(td);
1468
1469 if (td->files_size <= td->files_index) {
1470 unsigned int new_size = td->o.nr_files + 1;
1471
1472 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1473
1474 td->files = realloc(td->files, new_size * sizeof(f));
1475 if (td->files == NULL) {
1476 log_err("fio: realloc OOM\n");
1477 assert(0);
1478 }
1479 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1480 td->file_locks = realloc(td->file_locks, new_size);
1481 if (!td->file_locks) {
1482 log_err("fio: realloc OOM\n");
1483 assert(0);
1484 }
1485 td->file_locks[cur_files] = FILE_LOCK_NONE;
1486 }
1487 td->files_size = new_size;
1488 }
1489 td->files[cur_files] = f;
1490 f->fileno = cur_files;
1491
1492 /*
1493 * init function, io engine may not be loaded yet
1494 */
1495 if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
1496 f->real_file_size = -1ULL;
1497
1498 f->file_name = smalloc_strdup(file_name);
1499 if (!f->file_name)
1500 assert(0);
1501
1502 get_file_type(f);
1503
1504 switch (td->o.file_lock_mode) {
1505 case FILE_LOCK_NONE:
1506 break;
1507 case FILE_LOCK_READWRITE:
1508 f->rwlock = fio_rwlock_init();
1509 break;
1510 case FILE_LOCK_EXCLUSIVE:
1511 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1512 break;
1513 default:
1514 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1515 assert(0);
1516 }
1517
1518 td->files_index++;
1519 if (f->filetype == FIO_TYPE_FILE)
1520 td->nr_normal_files++;
1521
1522 set_already_allocated(file_name);
1523
1524 if (inc)
1525 td->o.nr_files++;
1526
1527 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1528 cur_files);
1529
1530 return cur_files;
1531}
1532
1533int add_file_exclusive(struct thread_data *td, const char *fname)
1534{
1535 struct fio_file *f;
1536 unsigned int i;
1537
1538 for_each_file(td, f, i) {
1539 if (!strcmp(f->file_name, fname))
1540 return i;
1541 }
1542
1543 return add_file(td, fname, 0, 1);
1544}
1545
1546void get_file(struct fio_file *f)
1547{
1548 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1549 assert(fio_file_open(f));
1550 f->references++;
1551}
1552
1553int put_file(struct thread_data *td, struct fio_file *f)
1554{
1555 int f_ret = 0, ret = 0;
1556
1557 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1558
1559 if (!fio_file_open(f)) {
1560 assert(f->fd == -1);
1561 return 0;
1562 }
1563
1564 assert(f->references);
1565 if (--f->references)
1566 return 0;
1567
1568 if (should_fsync(td) && td->o.fsync_on_close) {
1569 f_ret = fsync(f->fd);
1570 if (f_ret < 0)
1571 f_ret = errno;
1572 }
1573
1574 if (td->io_ops->close_file)
1575 ret = td->io_ops->close_file(td, f);
1576
1577 if (!ret)
1578 ret = f_ret;
1579
1580 td->nr_open_files--;
1581 fio_file_clear_open(f);
1582 assert(f->fd == -1);
1583 return ret;
1584}
1585
1586void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1587{
1588 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1589 return;
1590
1591 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1592 if (ddir == DDIR_READ)
1593 fio_rwlock_read(f->rwlock);
1594 else
1595 fio_rwlock_write(f->rwlock);
1596 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1597 fio_mutex_down(f->lock);
1598
1599 td->file_locks[f->fileno] = td->o.file_lock_mode;
1600}
1601
1602void unlock_file(struct thread_data *td, struct fio_file *f)
1603{
1604 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1605 return;
1606
1607 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1608 fio_rwlock_unlock(f->rwlock);
1609 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1610 fio_mutex_up(f->lock);
1611
1612 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1613}
1614
1615void unlock_file_all(struct thread_data *td, struct fio_file *f)
1616{
1617 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1618 return;
1619 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1620 unlock_file(td, f);
1621}
1622
1623static int recurse_dir(struct thread_data *td, const char *dirname)
1624{
1625 struct dirent *dir;
1626 int ret = 0;
1627 DIR *D;
1628
1629 D = opendir(dirname);
1630 if (!D) {
1631 char buf[FIO_VERROR_SIZE];
1632
1633 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1634 td_verror(td, errno, buf);
1635 return 1;
1636 }
1637
1638 while ((dir = readdir(D)) != NULL) {
1639 char full_path[PATH_MAX];
1640 struct stat sb;
1641
1642 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1643 continue;
1644
1645 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1646
1647 if (lstat(full_path, &sb) == -1) {
1648 if (errno != ENOENT) {
1649 td_verror(td, errno, "stat");
1650 ret = 1;
1651 break;
1652 }
1653 }
1654
1655 if (S_ISREG(sb.st_mode)) {
1656 add_file(td, full_path, 0, 1);
1657 continue;
1658 }
1659 if (!S_ISDIR(sb.st_mode))
1660 continue;
1661
1662 ret = recurse_dir(td, full_path);
1663 if (ret)
1664 break;
1665 }
1666
1667 closedir(D);
1668 return ret;
1669}
1670
1671int add_dir_files(struct thread_data *td, const char *path)
1672{
1673 int ret = recurse_dir(td, path);
1674
1675 if (!ret)
1676 log_info("fio: opendir added %d files\n", td->o.nr_files);
1677
1678 return ret;
1679}
1680
1681void dup_files(struct thread_data *td, struct thread_data *org)
1682{
1683 struct fio_file *f;
1684 unsigned int i;
1685
1686 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1687
1688 if (!org->files)
1689 return;
1690
1691 td->files = malloc(org->files_index * sizeof(f));
1692
1693 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1694 td->file_locks = malloc(org->files_index);
1695
1696 for_each_file(org, f, i) {
1697 struct fio_file *__f;
1698
1699 __f = alloc_new_file(td);
1700
1701 if (f->file_name) {
1702 __f->file_name = smalloc_strdup(f->file_name);
1703 if (!__f->file_name)
1704 assert(0);
1705
1706 __f->filetype = f->filetype;
1707 }
1708
1709 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1710 __f->lock = f->lock;
1711 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1712 __f->rwlock = f->rwlock;
1713
1714 td->files[i] = __f;
1715 }
1716}
1717
1718/*
1719 * Returns the index that matches the filename, or -1 if not there
1720 */
1721int get_fileno(struct thread_data *td, const char *fname)
1722{
1723 struct fio_file *f;
1724 unsigned int i;
1725
1726 for_each_file(td, f, i)
1727 if (!strcmp(f->file_name, fname))
1728 return i;
1729
1730 return -1;
1731}
1732
1733/*
1734 * For log usage, where we add/open/close files automatically
1735 */
1736void free_release_files(struct thread_data *td)
1737{
1738 close_files(td);
1739 td->o.nr_files = 0;
1740 td->o.open_files = 0;
1741 td->files_index = 0;
1742 td->nr_normal_files = 0;
1743}
1744
1745void fio_file_reset(struct thread_data *td, struct fio_file *f)
1746{
1747 int i;
1748
1749 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1750 f->last_pos[i] = f->file_offset;
1751 f->last_start[i] = -1ULL;
1752 }
1753
1754 if (fio_file_axmap(f))
1755 axmap_reset(f->io_axmap);
1756 else if (fio_file_lfsr(f))
1757 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1758}
1759
1760bool fio_files_done(struct thread_data *td)
1761{
1762 struct fio_file *f;
1763 unsigned int i;
1764
1765 for_each_file(td, f, i)
1766 if (!fio_file_done(f))
1767 return false;
1768
1769 return true;
1770}
1771
1772/* free memory used in initialization phase only */
1773void filesetup_mem_free(void)
1774{
1775 free_already_allocated();
1776}