Add details of file number/size related options to HOWTO
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27static inline void clear_error(struct thread_data *td)
28{
29 td->error = 0;
30 td->verror[0] = '\0';
31}
32
33/*
34 * Leaves f->fd open on success, caller must close
35 */
36static int extend_file(struct thread_data *td, struct fio_file *f)
37{
38 int r, new_layout = 0, unlink_file = 0, flags;
39 unsigned long long left;
40 unsigned int bs;
41 char *b = NULL;
42
43 if (read_only) {
44 log_err("fio: refusing extend of file due to read-only\n");
45 return 0;
46 }
47
48 /*
49 * check if we need to lay the file out complete again. fio
50 * does that for operations involving reads, or for writes
51 * where overwrite is set
52 */
53 if (td_read(td) ||
54 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55 (td_write(td) && td_ioengine_flagged(td, FIO_NOEXTEND)))
56 new_layout = 1;
57 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
58 unlink_file = 1;
59
60 if (unlink_file || new_layout) {
61 int ret;
62
63 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
64
65 ret = td_io_unlink_file(td, f);
66 if (ret != 0 && ret != ENOENT) {
67 td_verror(td, errno, "unlink");
68 return 1;
69 }
70 }
71
72 flags = O_WRONLY;
73 if (td->o.allow_create)
74 flags |= O_CREAT;
75 if (new_layout)
76 flags |= O_TRUNC;
77
78#ifdef WIN32
79 flags |= _O_BINARY;
80#endif
81
82 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
83 f->fd = open(f->file_name, flags, 0644);
84 if (f->fd < 0) {
85 int err = errno;
86
87 if (err == ENOENT && !td->o.allow_create)
88 log_err("fio: file creation disallowed by "
89 "allow_file_create=0\n");
90 else
91 td_verror(td, err, "open");
92 return 1;
93 }
94
95#ifdef CONFIG_POSIX_FALLOCATE
96 if (!td->o.fill_device) {
97 switch (td->o.fallocate_mode) {
98 case FIO_FALLOCATE_NONE:
99 break;
100 case FIO_FALLOCATE_POSIX:
101 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
102 f->file_name,
103 (unsigned long long) f->real_file_size);
104
105 r = posix_fallocate(f->fd, 0, f->real_file_size);
106 if (r > 0) {
107 log_err("fio: posix_fallocate fails: %s\n",
108 strerror(r));
109 }
110 break;
111#ifdef CONFIG_LINUX_FALLOCATE
112 case FIO_FALLOCATE_KEEP_SIZE:
113 dprint(FD_FILE,
114 "fallocate(FALLOC_FL_KEEP_SIZE) "
115 "file %s size %llu\n", f->file_name,
116 (unsigned long long) f->real_file_size);
117
118 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
119 f->real_file_size);
120 if (r != 0)
121 td_verror(td, errno, "fallocate");
122
123 break;
124#endif /* CONFIG_LINUX_FALLOCATE */
125 default:
126 log_err("fio: unknown fallocate mode: %d\n",
127 td->o.fallocate_mode);
128 assert(0);
129 }
130 }
131#endif /* CONFIG_POSIX_FALLOCATE */
132
133 /*
134 * If our jobs don't require regular files initially, we're done.
135 */
136 if (!new_layout)
137 goto done;
138
139 /*
140 * The size will be -1ULL when fill_device is used, so don't truncate
141 * or fallocate this file, just write it
142 */
143 if (!td->o.fill_device) {
144 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
145 (unsigned long long) f->real_file_size);
146 if (ftruncate(f->fd, f->real_file_size) == -1) {
147 if (errno != EFBIG) {
148 td_verror(td, errno, "ftruncate");
149 goto err;
150 }
151 }
152 }
153
154 b = malloc(td->o.max_bs[DDIR_WRITE]);
155
156 left = f->real_file_size;
157 while (left && !td->terminate) {
158 bs = td->o.max_bs[DDIR_WRITE];
159 if (bs > left)
160 bs = left;
161
162 fill_io_buffer(td, b, bs, bs);
163
164 r = write(f->fd, b, bs);
165
166 if (r > 0) {
167 left -= r;
168 continue;
169 } else {
170 if (r < 0) {
171 int __e = errno;
172
173 if (__e == ENOSPC) {
174 if (td->o.fill_device)
175 break;
176 log_info("fio: ENOSPC on laying out "
177 "file, stopping\n");
178 break;
179 }
180 td_verror(td, errno, "write");
181 } else
182 td_verror(td, EIO, "write");
183
184 break;
185 }
186 }
187
188 if (td->terminate) {
189 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
190 td_io_unlink_file(td, f);
191 } else if (td->o.create_fsync) {
192 if (fsync(f->fd) < 0) {
193 td_verror(td, errno, "fsync");
194 goto err;
195 }
196 }
197 if (td->o.fill_device && !td_write(td)) {
198 fio_file_clear_size_known(f);
199 if (td_io_get_file_size(td, f))
200 goto err;
201 if (f->io_size > f->real_file_size)
202 f->io_size = f->real_file_size;
203 }
204
205 free(b);
206done:
207 return 0;
208err:
209 close(f->fd);
210 f->fd = -1;
211 if (b)
212 free(b);
213 return 1;
214}
215
216static int pre_read_file(struct thread_data *td, struct fio_file *f)
217{
218 int ret = 0, r, did_open = 0, old_runstate;
219 unsigned long long left;
220 unsigned int bs;
221 char *b;
222
223 if (td_ioengine_flagged(td, FIO_PIPEIO))
224 return 0;
225
226 if (!fio_file_open(f)) {
227 if (td->io_ops->open_file(td, f)) {
228 log_err("fio: cannot pre-read, failed to open file\n");
229 return 1;
230 }
231 did_open = 1;
232 }
233
234 old_runstate = td_bump_runstate(td, TD_PRE_READING);
235
236 bs = td->o.max_bs[DDIR_READ];
237 b = malloc(bs);
238 memset(b, 0, bs);
239
240 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
241 td_verror(td, errno, "lseek");
242 log_err("fio: failed to lseek pre-read file\n");
243 ret = 1;
244 goto error;
245 }
246
247 left = f->io_size;
248
249 while (left && !td->terminate) {
250 if (bs > left)
251 bs = left;
252
253 r = read(f->fd, b, bs);
254
255 if (r == (int) bs) {
256 left -= bs;
257 continue;
258 } else {
259 td_verror(td, EIO, "pre_read");
260 break;
261 }
262 }
263
264error:
265 td_restore_runstate(td, old_runstate);
266
267 if (did_open)
268 td->io_ops->close_file(td, f);
269
270 free(b);
271 return ret;
272}
273
274unsigned long long get_rand_file_size(struct thread_data *td)
275{
276 unsigned long long ret, sized;
277 uint64_t frand_max;
278 unsigned long r;
279
280 frand_max = rand_max(&td->file_size_state);
281 r = __rand(&td->file_size_state);
282 sized = td->o.file_size_high - td->o.file_size_low;
283 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
284 ret += td->o.file_size_low;
285 ret -= (ret % td->o.rw_min_bs);
286 return ret;
287}
288
289static int file_size(struct thread_data *td, struct fio_file *f)
290{
291 struct stat st;
292
293 if (stat(f->file_name, &st) == -1) {
294 td_verror(td, errno, "fstat");
295 return 1;
296 }
297
298 f->real_file_size = st.st_size;
299 return 0;
300}
301
302static int bdev_size(struct thread_data *td, struct fio_file *f)
303{
304 unsigned long long bytes = 0;
305 int r;
306
307 if (td->io_ops->open_file(td, f)) {
308 log_err("fio: failed opening blockdev %s for size check\n",
309 f->file_name);
310 return 1;
311 }
312
313 r = blockdev_size(f, &bytes);
314 if (r) {
315 td_verror(td, r, "blockdev_size");
316 goto err;
317 }
318
319 if (!bytes) {
320 log_err("%s: zero sized block device?\n", f->file_name);
321 goto err;
322 }
323
324 f->real_file_size = bytes;
325 td->io_ops->close_file(td, f);
326 return 0;
327err:
328 td->io_ops->close_file(td, f);
329 return 1;
330}
331
332static int char_size(struct thread_data *td, struct fio_file *f)
333{
334#ifdef FIO_HAVE_CHARDEV_SIZE
335 unsigned long long bytes = 0;
336 int r;
337
338 if (td->io_ops->open_file(td, f)) {
339 log_err("fio: failed opening chardev %s for size check\n",
340 f->file_name);
341 return 1;
342 }
343
344 r = chardev_size(f, &bytes);
345 if (r) {
346 td_verror(td, r, "chardev_size");
347 goto err;
348 }
349
350 if (!bytes) {
351 log_err("%s: zero sized char device?\n", f->file_name);
352 goto err;
353 }
354
355 f->real_file_size = bytes;
356 td->io_ops->close_file(td, f);
357 return 0;
358err:
359 td->io_ops->close_file(td, f);
360 return 1;
361#else
362 f->real_file_size = -1ULL;
363 return 0;
364#endif
365}
366
367static int get_file_size(struct thread_data *td, struct fio_file *f)
368{
369 int ret = 0;
370
371 if (fio_file_size_known(f))
372 return 0;
373
374 if (f->filetype == FIO_TYPE_FILE)
375 ret = file_size(td, f);
376 else if (f->filetype == FIO_TYPE_BLOCK)
377 ret = bdev_size(td, f);
378 else if (f->filetype == FIO_TYPE_CHAR)
379 ret = char_size(td, f);
380 else {
381 f->real_file_size = -1;
382 log_info("%s: failed to get file size of %s\n", td->o.name,
383 f->file_name);
384 return 1; /* avoid offset extends end error message */
385 }
386
387 /*
388 * Leave ->real_file_size with 0 since it could be expectation
389 * of initial setup for regular files.
390 */
391 if (ret)
392 return ret;
393
394 /*
395 * ->file_offset normally hasn't been initialized yet, so this
396 * is basically always false unless ->real_file_size is -1, but
397 * if ->real_file_size is -1 this message doesn't make sense.
398 * As a result, this message is basically useless.
399 */
400 if (f->file_offset > f->real_file_size) {
401 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
402 (unsigned long long) f->file_offset,
403 (unsigned long long) f->real_file_size);
404 return 1;
405 }
406
407 fio_file_set_size_known(f);
408 return 0;
409}
410
411static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
412 unsigned long long off,
413 unsigned long long len)
414{
415 int errval = 0, ret = 0;
416
417#ifdef CONFIG_ESX
418 return 0;
419#endif
420
421 if (len == -1ULL)
422 len = f->io_size;
423 if (off == -1ULL)
424 off = f->file_offset;
425
426 if (len == -1ULL || off == -1ULL)
427 return 0;
428
429 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
430 len);
431
432 if (td->io_ops->invalidate) {
433 ret = td->io_ops->invalidate(td, f);
434 if (ret < 0)
435 errval = ret;
436 } else if (f->filetype == FIO_TYPE_FILE) {
437 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
438 if (ret)
439 errval = ret;
440 } else if (f->filetype == FIO_TYPE_BLOCK) {
441 int retry_count = 0;
442
443 ret = blockdev_invalidate_cache(f);
444 while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
445 /*
446 * Linux multipath devices reject ioctl while
447 * the maps are being updated. That window can
448 * last tens of milliseconds; we'll try up to
449 * a quarter of a second.
450 */
451 usleep(10000);
452 ret = blockdev_invalidate_cache(f);
453 }
454 if (ret < 0 && errno == EACCES && geteuid()) {
455 if (!root_warn) {
456 log_err("fio: only root may flush block "
457 "devices. Cache flush bypassed!\n");
458 root_warn = 1;
459 }
460 ret = 0;
461 }
462 if (ret < 0)
463 errval = errno;
464 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
465 ret = 0;
466
467 /*
468 * Cache flushing isn't a fatal condition, and we know it will
469 * happen on some platforms where we don't have the proper
470 * function to flush eg block device caches. So just warn and
471 * continue on our way.
472 */
473 if (errval)
474 log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errval));
475
476 return 0;
477
478}
479
480int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
481{
482 if (!fio_file_open(f))
483 return 0;
484
485 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
486}
487
488int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
489{
490 int ret = 0;
491
492 dprint(FD_FILE, "fd close %s\n", f->file_name);
493
494 remove_file_hash(f);
495
496 if (close(f->fd) < 0)
497 ret = errno;
498
499 f->fd = -1;
500
501 if (f->shadow_fd != -1) {
502 close(f->shadow_fd);
503 f->shadow_fd = -1;
504 }
505
506 f->engine_data = 0;
507 return ret;
508}
509
510int file_lookup_open(struct fio_file *f, int flags)
511{
512 struct fio_file *__f;
513 int from_hash;
514
515 __f = lookup_file_hash(f->file_name);
516 if (__f) {
517 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
518 f->lock = __f->lock;
519 from_hash = 1;
520 } else {
521 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
522 from_hash = 0;
523 }
524
525#ifdef WIN32
526 flags |= _O_BINARY;
527#endif
528
529 f->fd = open(f->file_name, flags, 0600);
530 return from_hash;
531}
532
533static int file_close_shadow_fds(struct thread_data *td)
534{
535 struct fio_file *f;
536 int num_closed = 0;
537 unsigned int i;
538
539 for_each_file(td, f, i) {
540 if (f->shadow_fd == -1)
541 continue;
542
543 close(f->shadow_fd);
544 f->shadow_fd = -1;
545 num_closed++;
546 }
547
548 return num_closed;
549}
550
551int generic_open_file(struct thread_data *td, struct fio_file *f)
552{
553 int is_std = 0;
554 int flags = 0;
555 int from_hash = 0;
556
557 dprint(FD_FILE, "fd open %s\n", f->file_name);
558
559 if (!strcmp(f->file_name, "-")) {
560 if (td_rw(td)) {
561 log_err("fio: can't read/write to stdin/out\n");
562 return 1;
563 }
564 is_std = 1;
565
566 /*
567 * move output logging to stderr, if we are writing to stdout
568 */
569 if (td_write(td))
570 f_out = stderr;
571 }
572
573 if (td_trim(td))
574 goto skip_flags;
575 if (td->o.odirect)
576 flags |= OS_O_DIRECT;
577 if (td->o.oatomic) {
578 if (!FIO_O_ATOMIC) {
579 td_verror(td, EINVAL, "OS does not support atomic IO");
580 return 1;
581 }
582 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
583 }
584 if (td->o.sync_io)
585 flags |= O_SYNC;
586 if (td->o.create_on_open && td->o.allow_create)
587 flags |= O_CREAT;
588skip_flags:
589 if (f->filetype != FIO_TYPE_FILE)
590 flags |= FIO_O_NOATIME;
591
592open_again:
593 if (td_write(td)) {
594 if (!read_only)
595 flags |= O_RDWR;
596
597 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
598 flags |= O_CREAT;
599
600 if (is_std)
601 f->fd = dup(STDOUT_FILENO);
602 else
603 from_hash = file_lookup_open(f, flags);
604 } else if (td_read(td)) {
605 if (f->filetype == FIO_TYPE_CHAR && !read_only)
606 flags |= O_RDWR;
607 else
608 flags |= O_RDONLY;
609
610 if (is_std)
611 f->fd = dup(STDIN_FILENO);
612 else
613 from_hash = file_lookup_open(f, flags);
614 } else { //td trim
615 flags |= O_RDWR;
616 from_hash = file_lookup_open(f, flags);
617 }
618
619 if (f->fd == -1) {
620 char buf[FIO_VERROR_SIZE];
621 int __e = errno;
622
623 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
624 flags &= ~FIO_O_NOATIME;
625 goto open_again;
626 }
627 if (__e == EMFILE && file_close_shadow_fds(td))
628 goto open_again;
629
630 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
631
632 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
633 log_err("fio: looks like your file system does not " \
634 "support direct=1/buffered=0\n");
635 }
636
637 td_verror(td, __e, buf);
638 return 1;
639 }
640
641 if (!from_hash && f->fd != -1) {
642 if (add_file_hash(f)) {
643 int fio_unused ret;
644
645 /*
646 * Stash away descriptor for later close. This is to
647 * work-around a "feature" on Linux, where a close of
648 * an fd that has been opened for write will trigger
649 * udev to call blkid to check partitions, fs id, etc.
650 * That pollutes the device cache, which can slow down
651 * unbuffered accesses.
652 */
653 if (f->shadow_fd == -1)
654 f->shadow_fd = f->fd;
655 else {
656 /*
657 * OK to ignore, we haven't done anything
658 * with it
659 */
660 ret = generic_close_file(td, f);
661 }
662 goto open_again;
663 }
664 }
665
666 return 0;
667}
668
669/*
670 * This function i.e. get_file_size() is the default .get_file_size
671 * implementation of majority of I/O engines.
672 */
673int generic_get_file_size(struct thread_data *td, struct fio_file *f)
674{
675 return get_file_size(td, f);
676}
677
678/*
679 * open/close all files, so that ->real_file_size gets set
680 */
681static int get_file_sizes(struct thread_data *td)
682{
683 struct fio_file *f;
684 unsigned int i;
685 int err = 0;
686
687 for_each_file(td, f, i) {
688 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
689 f->file_name);
690
691 if (td_io_get_file_size(td, f)) {
692 if (td->error != ENOENT) {
693 log_err("%s\n", td->verror);
694 err = 1;
695 break;
696 }
697 clear_error(td);
698 }
699
700 /*
701 * There are corner cases where we end up with -1 for
702 * ->real_file_size due to unsupported file type, etc.
703 * We then just set to size option value divided by number
704 * of files, similar to the way file ->io_size is set.
705 * stat(2) failure doesn't set ->real_file_size to -1.
706 */
707 if (f->real_file_size == -1ULL && td->o.size)
708 f->real_file_size = td->o.size / td->o.nr_files;
709 }
710
711 return err;
712}
713
714struct fio_mount {
715 struct flist_head list;
716 const char *base;
717 char __base[256];
718 unsigned int key;
719};
720
721/*
722 * Get free number of bytes for each file on each unique mount.
723 */
724static unsigned long long get_fs_free_counts(struct thread_data *td)
725{
726 struct flist_head *n, *tmp;
727 unsigned long long ret = 0;
728 struct fio_mount *fm;
729 FLIST_HEAD(list);
730 struct fio_file *f;
731 unsigned int i;
732
733 for_each_file(td, f, i) {
734 struct stat sb;
735 char buf[256];
736
737 if (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_CHAR) {
738 if (f->real_file_size != -1ULL)
739 ret += f->real_file_size;
740 continue;
741 } else if (f->filetype != FIO_TYPE_FILE)
742 continue;
743
744 buf[255] = '\0';
745 strncpy(buf, f->file_name, 255);
746
747 if (stat(buf, &sb) < 0) {
748 if (errno != ENOENT)
749 break;
750 strcpy(buf, ".");
751 if (stat(buf, &sb) < 0)
752 break;
753 }
754
755 fm = NULL;
756 flist_for_each(n, &list) {
757 fm = flist_entry(n, struct fio_mount, list);
758 if (fm->key == sb.st_dev)
759 break;
760
761 fm = NULL;
762 }
763
764 if (fm)
765 continue;
766
767 fm = calloc(1, sizeof(*fm));
768 strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
769 fm->base = basename(fm->__base);
770 fm->key = sb.st_dev;
771 flist_add(&fm->list, &list);
772 }
773
774 flist_for_each_safe(n, tmp, &list) {
775 unsigned long long sz;
776
777 fm = flist_entry(n, struct fio_mount, list);
778 flist_del(&fm->list);
779
780 sz = get_fs_free_size(fm->base);
781 if (sz && sz != -1ULL)
782 ret += sz;
783
784 free(fm);
785 }
786
787 return ret;
788}
789
790uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
791{
792 struct thread_options *o = &td->o;
793
794 if (o->file_append && f->filetype == FIO_TYPE_FILE)
795 return f->real_file_size;
796
797 return td->o.start_offset +
798 td->subjob_number * td->o.offset_increment;
799}
800
801/*
802 * Open the files and setup files sizes, creating files if necessary.
803 */
804int setup_files(struct thread_data *td)
805{
806 unsigned long long total_size, extend_size;
807 struct thread_options *o = &td->o;
808 struct fio_file *f;
809 unsigned int i, nr_fs_extra = 0;
810 int err = 0, need_extend;
811 int old_state;
812 const unsigned int bs = td_min_bs(td);
813 uint64_t fs = 0;
814
815 dprint(FD_FILE, "setup files\n");
816
817 old_state = td_bump_runstate(td, TD_SETTING_UP);
818
819 if (o->read_iolog_file)
820 goto done;
821
822 /*
823 * Find out physical size of files or devices for this thread,
824 * before we determine I/O size and range of our targets.
825 * If ioengine defines a setup() method, it's responsible for
826 * opening the files and setting f->real_file_size to indicate
827 * the valid range for that file.
828 */
829 if (td->io_ops->setup)
830 err = td->io_ops->setup(td);
831 else
832 err = get_file_sizes(td);
833
834 if (err)
835 goto err_out;
836
837 /*
838 * check sizes. if the files/devices do not exist and the size
839 * isn't passed to fio, abort.
840 */
841 total_size = 0;
842 for_each_file(td, f, i) {
843 f->fileno = i;
844 if (f->real_file_size == -1ULL)
845 total_size = -1ULL;
846 else
847 total_size += f->real_file_size;
848 }
849
850 if (o->fill_device)
851 td->fill_device_size = get_fs_free_counts(td);
852
853 /*
854 * device/file sizes are zero and no size given, punt
855 */
856 if ((!total_size || total_size == -1ULL) && !o->size &&
857 !td_ioengine_flagged(td, FIO_NOIO) && !o->fill_device &&
858 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
859 log_err("%s: you need to specify size=\n", o->name);
860 td_verror(td, EINVAL, "total_file_size");
861 goto err_out;
862 }
863
864 /*
865 * Calculate per-file size and potential extra size for the
866 * first files, if needed (i.e. if we don't have a fixed size).
867 */
868 if (!o->file_size_low && o->nr_files) {
869 uint64_t all_fs;
870
871 fs = o->size / o->nr_files;
872 all_fs = fs * o->nr_files;
873
874 if (all_fs < o->size)
875 nr_fs_extra = (o->size - all_fs) / bs;
876 }
877
878 /*
879 * now file sizes are known, so we can set ->io_size. if size= is
880 * not given, ->io_size is just equal to ->real_file_size. if size
881 * is given, ->io_size is size / nr_files.
882 */
883 extend_size = total_size = 0;
884 need_extend = 0;
885 for_each_file(td, f, i) {
886 f->file_offset = get_start_offset(td, f);
887
888 /*
889 * Update ->io_size depending on options specified.
890 * ->file_size_low being 0 means filesize option isn't set.
891 * Non zero ->file_size_low equals ->file_size_high means
892 * filesize option is set in a fixed size format.
893 * Non zero ->file_size_low not equals ->file_size_high means
894 * filesize option is set in a range format.
895 */
896 if (!o->file_size_low) {
897 /*
898 * no file size or range given, file size is equal to
899 * total size divided by number of files. If that is
900 * zero, set it to the real file size. If the size
901 * doesn't divide nicely with the min blocksize,
902 * make the first files bigger.
903 */
904 f->io_size = fs;
905 if (nr_fs_extra) {
906 nr_fs_extra--;
907 f->io_size += bs;
908 }
909
910 if (!f->io_size)
911 f->io_size = f->real_file_size - f->file_offset;
912 } else if (f->real_file_size < o->file_size_low ||
913 f->real_file_size > o->file_size_high) {
914 if (f->file_offset > o->file_size_low)
915 goto err_offset;
916 /*
917 * file size given. if it's fixed, use that. if it's a
918 * range, generate a random size in-between.
919 */
920 if (o->file_size_low == o->file_size_high)
921 f->io_size = o->file_size_low - f->file_offset;
922 else {
923 f->io_size = get_rand_file_size(td)
924 - f->file_offset;
925 }
926 } else
927 f->io_size = f->real_file_size - f->file_offset;
928
929 if (f->io_size == -1ULL)
930 total_size = -1ULL;
931 else {
932 if (o->size_percent) {
933 f->io_size = (f->io_size * o->size_percent) / 100;
934 f->io_size -= (f->io_size % td_min_bs(td));
935 }
936 total_size += f->io_size;
937 }
938
939 if (f->filetype == FIO_TYPE_FILE &&
940 (f->io_size + f->file_offset) > f->real_file_size &&
941 !td_ioengine_flagged(td, FIO_DISKLESSIO)) {
942 if (!o->create_on_open) {
943 need_extend++;
944 extend_size += (f->io_size + f->file_offset);
945 } else
946 f->real_file_size = f->io_size + f->file_offset;
947 fio_file_set_extend(f);
948 }
949 }
950
951 if (td->o.block_error_hist) {
952 int len;
953
954 assert(td->o.nr_files == 1); /* checked in fixup_options */
955 f = td->files[0];
956 len = f->io_size / td->o.bs[DDIR_TRIM];
957 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
958 log_err("fio: cannot calculate block histogram with "
959 "%d trim blocks, maximum %d\n",
960 len, MAX_NR_BLOCK_INFOS);
961 td_verror(td, EINVAL, "block_error_hist");
962 goto err_out;
963 }
964
965 td->ts.nr_block_infos = len;
966 for (i = 0; i < len; i++)
967 td->ts.block_infos[i] =
968 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
969 } else
970 td->ts.nr_block_infos = 0;
971
972 if (!o->size || (total_size && o->size > total_size))
973 o->size = total_size;
974
975 if (o->size < td_min_bs(td)) {
976 log_err("fio: blocksize too large for data set\n");
977 goto err_out;
978 }
979
980 /*
981 * See if we need to extend some files, typically needed when our
982 * target regular files don't exist yet, but our jobs require them
983 * initially due to read I/Os.
984 */
985 if (need_extend) {
986 temp_stall_ts = 1;
987 if (output_format & FIO_OUTPUT_NORMAL)
988 log_info("%s: Laying out IO file(s) (%u file(s) / %lluMiB)\n",
989 o->name, need_extend, extend_size >> 20);
990
991 for_each_file(td, f, i) {
992 unsigned long long old_len = -1ULL, extend_len = -1ULL;
993
994 if (!fio_file_extend(f))
995 continue;
996
997 assert(f->filetype == FIO_TYPE_FILE);
998 fio_file_clear_extend(f);
999 if (!o->fill_device) {
1000 old_len = f->real_file_size;
1001 extend_len = f->io_size + f->file_offset -
1002 old_len;
1003 }
1004 f->real_file_size = (f->io_size + f->file_offset);
1005 err = extend_file(td, f);
1006 if (err)
1007 break;
1008
1009 err = __file_invalidate_cache(td, f, old_len,
1010 extend_len);
1011
1012 /*
1013 * Shut up static checker
1014 */
1015 if (f->fd != -1)
1016 close(f->fd);
1017
1018 f->fd = -1;
1019 if (err)
1020 break;
1021 }
1022 temp_stall_ts = 0;
1023 }
1024
1025 if (err)
1026 goto err_out;
1027
1028 if (!o->zone_size)
1029 o->zone_size = o->size;
1030
1031 /*
1032 * iolog already set the total io size, if we read back
1033 * stored entries.
1034 */
1035 if (!o->read_iolog_file) {
1036 if (o->io_size)
1037 td->total_io_size = o->io_size * o->loops;
1038 else
1039 td->total_io_size = o->size * o->loops;
1040 }
1041
1042done:
1043 if (o->create_only)
1044 td->done = 1;
1045
1046 td_restore_runstate(td, old_state);
1047 return 0;
1048err_offset:
1049 log_err("%s: you need to specify valid offset=\n", o->name);
1050err_out:
1051 td_restore_runstate(td, old_state);
1052 return 1;
1053}
1054
1055int pre_read_files(struct thread_data *td)
1056{
1057 struct fio_file *f;
1058 unsigned int i;
1059
1060 dprint(FD_FILE, "pre_read files\n");
1061
1062 for_each_file(td, f, i) {
1063 pre_read_file(td, f);
1064 }
1065
1066 return 1;
1067}
1068
1069static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1070{
1071 unsigned int range_size, seed;
1072 unsigned long nranges;
1073 uint64_t fsize;
1074
1075 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1076 fsize = min(f->real_file_size, f->io_size);
1077
1078 nranges = (fsize + range_size - 1) / range_size;
1079
1080 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1081 if (!td->o.rand_repeatable)
1082 seed = td->rand_seeds[4];
1083
1084 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1085 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1086 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1087 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1088 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1089 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1090
1091 return 1;
1092}
1093
1094static int init_rand_distribution(struct thread_data *td)
1095{
1096 struct fio_file *f;
1097 unsigned int i;
1098 int state;
1099
1100 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1101 return 0;
1102
1103 state = td_bump_runstate(td, TD_SETTING_UP);
1104
1105 for_each_file(td, f, i)
1106 __init_rand_distribution(td, f);
1107
1108 td_restore_runstate(td, state);
1109
1110 return 1;
1111}
1112
1113/*
1114 * Check if the number of blocks exceeds the randomness capability of
1115 * the selected generator. Tausworthe is 32-bit, the others are fullly
1116 * 64-bit capable.
1117 */
1118static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1119 uint64_t blocks)
1120{
1121 if (blocks <= FRAND32_MAX)
1122 return 0;
1123 if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1124 return 0;
1125
1126 /*
1127 * If the user hasn't specified a random generator, switch
1128 * to tausworthe64 with informational warning. If the user did
1129 * specify one, just warn.
1130 */
1131 log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1132 f->file_name);
1133
1134 if (!fio_option_is_set(&td->o, random_generator)) {
1135 log_info("fio: Switching to tausworthe64. Use the "
1136 "random_generator= option to get rid of this "
1137 "warning.\n");
1138 td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1139 return 0;
1140 }
1141
1142 /*
1143 * Just make this information to avoid breaking scripts.
1144 */
1145 log_info("fio: Use the random_generator= option to switch to lfsr or "
1146 "tausworthe64.\n");
1147 return 0;
1148}
1149
1150int init_random_map(struct thread_data *td)
1151{
1152 unsigned long long blocks;
1153 struct fio_file *f;
1154 unsigned int i;
1155
1156 if (init_rand_distribution(td))
1157 return 0;
1158 if (!td_random(td))
1159 return 0;
1160
1161 for_each_file(td, f, i) {
1162 uint64_t fsize = min(f->real_file_size, f->io_size);
1163
1164 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1165
1166 if (check_rand_gen_limits(td, f, blocks))
1167 return 1;
1168
1169 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1170 unsigned long seed;
1171
1172 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1173
1174 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1175 fio_file_set_lfsr(f);
1176 continue;
1177 }
1178 } else if (!td->o.norandommap) {
1179 f->io_axmap = axmap_new(blocks);
1180 if (f->io_axmap) {
1181 fio_file_set_axmap(f);
1182 continue;
1183 }
1184 } else if (td->o.norandommap)
1185 continue;
1186
1187 if (!td->o.softrandommap) {
1188 log_err("fio: failed allocating random map. If running"
1189 " a large number of jobs, try the 'norandommap'"
1190 " option or set 'softrandommap'. Or give"
1191 " a larger --alloc-size to fio.\n");
1192 return 1;
1193 }
1194
1195 log_info("fio: file %s failed allocating random map. Running "
1196 "job without.\n", f->file_name);
1197 }
1198
1199 return 0;
1200}
1201
1202void close_files(struct thread_data *td)
1203{
1204 struct fio_file *f;
1205 unsigned int i;
1206
1207 for_each_file(td, f, i) {
1208 if (fio_file_open(f))
1209 td_io_close_file(td, f);
1210 }
1211}
1212
1213void close_and_free_files(struct thread_data *td)
1214{
1215 struct fio_file *f;
1216 unsigned int i;
1217
1218 dprint(FD_FILE, "close files\n");
1219
1220 for_each_file(td, f, i) {
1221 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1222 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1223 td_io_unlink_file(td, f);
1224 }
1225
1226 if (fio_file_open(f))
1227 td_io_close_file(td, f);
1228
1229 remove_file_hash(f);
1230
1231 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1232 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1233 td_io_unlink_file(td, f);
1234 }
1235
1236 sfree(f->file_name);
1237 f->file_name = NULL;
1238 if (fio_file_axmap(f)) {
1239 axmap_free(f->io_axmap);
1240 f->io_axmap = NULL;
1241 }
1242 sfree(f);
1243 }
1244
1245 td->o.filename = NULL;
1246 free(td->files);
1247 free(td->file_locks);
1248 td->files_index = 0;
1249 td->files = NULL;
1250 td->file_locks = NULL;
1251 td->o.file_lock_mode = FILE_LOCK_NONE;
1252 td->o.nr_files = 0;
1253}
1254
1255static void get_file_type(struct fio_file *f)
1256{
1257 struct stat sb;
1258
1259 if (!strcmp(f->file_name, "-"))
1260 f->filetype = FIO_TYPE_PIPE;
1261 else
1262 f->filetype = FIO_TYPE_FILE;
1263
1264#ifdef WIN32
1265 /* \\.\ is the device namespace in Windows, where every file is
1266 * a block device */
1267 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1268 f->filetype = FIO_TYPE_BLOCK;
1269#endif
1270
1271 if (!stat(f->file_name, &sb)) {
1272 if (S_ISBLK(sb.st_mode))
1273 f->filetype = FIO_TYPE_BLOCK;
1274 else if (S_ISCHR(sb.st_mode))
1275 f->filetype = FIO_TYPE_CHAR;
1276 else if (S_ISFIFO(sb.st_mode))
1277 f->filetype = FIO_TYPE_PIPE;
1278 }
1279}
1280
1281static bool __is_already_allocated(const char *fname, bool set)
1282{
1283 struct flist_head *entry;
1284 bool ret;
1285
1286 ret = file_bloom_exists(fname, set);
1287 if (!ret)
1288 return ret;
1289
1290 flist_for_each(entry, &filename_list) {
1291 struct file_name *fn;
1292
1293 fn = flist_entry(entry, struct file_name, list);
1294
1295 if (!strcmp(fn->filename, fname))
1296 return true;
1297 }
1298
1299 return false;
1300}
1301
1302static bool is_already_allocated(const char *fname)
1303{
1304 bool ret;
1305
1306 fio_file_hash_lock();
1307 ret = __is_already_allocated(fname, false);
1308 fio_file_hash_unlock();
1309
1310 return ret;
1311}
1312
1313static void set_already_allocated(const char *fname)
1314{
1315 struct file_name *fn;
1316
1317 fn = malloc(sizeof(struct file_name));
1318 fn->filename = strdup(fname);
1319
1320 fio_file_hash_lock();
1321 if (!__is_already_allocated(fname, true)) {
1322 flist_add_tail(&fn->list, &filename_list);
1323 fn = NULL;
1324 }
1325 fio_file_hash_unlock();
1326
1327 if (fn) {
1328 free(fn->filename);
1329 free(fn);
1330 }
1331}
1332
1333static void free_already_allocated(void)
1334{
1335 struct flist_head *entry, *tmp;
1336 struct file_name *fn;
1337
1338 if (flist_empty(&filename_list))
1339 return;
1340
1341 fio_file_hash_lock();
1342 flist_for_each_safe(entry, tmp, &filename_list) {
1343 fn = flist_entry(entry, struct file_name, list);
1344 free(fn->filename);
1345 flist_del(&fn->list);
1346 free(fn);
1347 }
1348
1349 fio_file_hash_unlock();
1350}
1351
1352static struct fio_file *alloc_new_file(struct thread_data *td)
1353{
1354 struct fio_file *f;
1355
1356 f = smalloc(sizeof(*f));
1357 if (!f) {
1358 assert(0);
1359 return NULL;
1360 }
1361
1362 f->fd = -1;
1363 f->shadow_fd = -1;
1364 fio_file_reset(td, f);
1365 return f;
1366}
1367
1368bool exists_and_not_regfile(const char *filename)
1369{
1370 struct stat sb;
1371
1372 if (lstat(filename, &sb) == -1)
1373 return false;
1374
1375#ifndef WIN32 /* NOT Windows */
1376 if (S_ISREG(sb.st_mode))
1377 return false;
1378#else
1379 /* \\.\ is the device namespace in Windows, where every file
1380 * is a device node */
1381 if (S_ISREG(sb.st_mode) && strncmp(filename, "\\\\.\\", 4) != 0)
1382 return false;
1383#endif
1384
1385 return true;
1386}
1387
1388int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1389{
1390 int cur_files = td->files_index;
1391 char file_name[PATH_MAX];
1392 struct fio_file *f;
1393 int len = 0;
1394
1395 dprint(FD_FILE, "add file %s\n", fname);
1396
1397 if (td->o.directory)
1398 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob,
1399 td->o.unique_filename);
1400
1401 sprintf(file_name + len, "%s", fname);
1402
1403 /* clean cloned siblings using existing files */
1404 if (numjob && is_already_allocated(file_name) &&
1405 !exists_and_not_regfile(fname))
1406 return 0;
1407
1408 f = alloc_new_file(td);
1409
1410 if (td->files_size <= td->files_index) {
1411 unsigned int new_size = td->o.nr_files + 1;
1412
1413 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1414
1415 td->files = realloc(td->files, new_size * sizeof(f));
1416 if (td->files == NULL) {
1417 log_err("fio: realloc OOM\n");
1418 assert(0);
1419 }
1420 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1421 td->file_locks = realloc(td->file_locks, new_size);
1422 if (!td->file_locks) {
1423 log_err("fio: realloc OOM\n");
1424 assert(0);
1425 }
1426 td->file_locks[cur_files] = FILE_LOCK_NONE;
1427 }
1428 td->files_size = new_size;
1429 }
1430 td->files[cur_files] = f;
1431 f->fileno = cur_files;
1432
1433 /*
1434 * init function, io engine may not be loaded yet
1435 */
1436 if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
1437 f->real_file_size = -1ULL;
1438
1439 f->file_name = smalloc_strdup(file_name);
1440 if (!f->file_name)
1441 assert(0);
1442
1443 get_file_type(f);
1444
1445 switch (td->o.file_lock_mode) {
1446 case FILE_LOCK_NONE:
1447 break;
1448 case FILE_LOCK_READWRITE:
1449 f->rwlock = fio_rwlock_init();
1450 break;
1451 case FILE_LOCK_EXCLUSIVE:
1452 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1453 break;
1454 default:
1455 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1456 assert(0);
1457 }
1458
1459 td->files_index++;
1460 if (f->filetype == FIO_TYPE_FILE)
1461 td->nr_normal_files++;
1462
1463 set_already_allocated(file_name);
1464
1465 if (inc)
1466 td->o.nr_files++;
1467
1468 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1469 cur_files);
1470
1471 return cur_files;
1472}
1473
1474int add_file_exclusive(struct thread_data *td, const char *fname)
1475{
1476 struct fio_file *f;
1477 unsigned int i;
1478
1479 for_each_file(td, f, i) {
1480 if (!strcmp(f->file_name, fname))
1481 return i;
1482 }
1483
1484 return add_file(td, fname, 0, 1);
1485}
1486
1487void get_file(struct fio_file *f)
1488{
1489 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1490 assert(fio_file_open(f));
1491 f->references++;
1492}
1493
1494int put_file(struct thread_data *td, struct fio_file *f)
1495{
1496 int f_ret = 0, ret = 0;
1497
1498 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1499
1500 if (!fio_file_open(f)) {
1501 assert(f->fd == -1);
1502 return 0;
1503 }
1504
1505 assert(f->references);
1506 if (--f->references)
1507 return 0;
1508
1509 if (should_fsync(td) && td->o.fsync_on_close) {
1510 f_ret = fsync(f->fd);
1511 if (f_ret < 0)
1512 f_ret = errno;
1513 }
1514
1515 if (td->io_ops->close_file)
1516 ret = td->io_ops->close_file(td, f);
1517
1518 if (!ret)
1519 ret = f_ret;
1520
1521 td->nr_open_files--;
1522 fio_file_clear_open(f);
1523 assert(f->fd == -1);
1524 return ret;
1525}
1526
1527void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1528{
1529 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1530 return;
1531
1532 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1533 if (ddir == DDIR_READ)
1534 fio_rwlock_read(f->rwlock);
1535 else
1536 fio_rwlock_write(f->rwlock);
1537 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1538 fio_mutex_down(f->lock);
1539
1540 td->file_locks[f->fileno] = td->o.file_lock_mode;
1541}
1542
1543void unlock_file(struct thread_data *td, struct fio_file *f)
1544{
1545 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1546 return;
1547
1548 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1549 fio_rwlock_unlock(f->rwlock);
1550 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1551 fio_mutex_up(f->lock);
1552
1553 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1554}
1555
1556void unlock_file_all(struct thread_data *td, struct fio_file *f)
1557{
1558 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1559 return;
1560 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1561 unlock_file(td, f);
1562}
1563
1564static int recurse_dir(struct thread_data *td, const char *dirname)
1565{
1566 struct dirent *dir;
1567 int ret = 0;
1568 DIR *D;
1569
1570 D = opendir(dirname);
1571 if (!D) {
1572 char buf[FIO_VERROR_SIZE];
1573
1574 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1575 td_verror(td, errno, buf);
1576 return 1;
1577 }
1578
1579 while ((dir = readdir(D)) != NULL) {
1580 char full_path[PATH_MAX];
1581 struct stat sb;
1582
1583 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1584 continue;
1585
1586 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1587
1588 if (lstat(full_path, &sb) == -1) {
1589 if (errno != ENOENT) {
1590 td_verror(td, errno, "stat");
1591 ret = 1;
1592 break;
1593 }
1594 }
1595
1596 if (S_ISREG(sb.st_mode)) {
1597 add_file(td, full_path, 0, 1);
1598 continue;
1599 }
1600 if (!S_ISDIR(sb.st_mode))
1601 continue;
1602
1603 ret = recurse_dir(td, full_path);
1604 if (ret)
1605 break;
1606 }
1607
1608 closedir(D);
1609 return ret;
1610}
1611
1612int add_dir_files(struct thread_data *td, const char *path)
1613{
1614 int ret = recurse_dir(td, path);
1615
1616 if (!ret)
1617 log_info("fio: opendir added %d files\n", td->o.nr_files);
1618
1619 return ret;
1620}
1621
1622void dup_files(struct thread_data *td, struct thread_data *org)
1623{
1624 struct fio_file *f;
1625 unsigned int i;
1626
1627 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1628
1629 if (!org->files)
1630 return;
1631
1632 td->files = malloc(org->files_index * sizeof(f));
1633
1634 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1635 td->file_locks = malloc(org->files_index);
1636
1637 for_each_file(org, f, i) {
1638 struct fio_file *__f;
1639
1640 __f = alloc_new_file(td);
1641
1642 if (f->file_name) {
1643 __f->file_name = smalloc_strdup(f->file_name);
1644 if (!__f->file_name)
1645 assert(0);
1646
1647 __f->filetype = f->filetype;
1648 }
1649
1650 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1651 __f->lock = f->lock;
1652 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1653 __f->rwlock = f->rwlock;
1654
1655 td->files[i] = __f;
1656 }
1657}
1658
1659/*
1660 * Returns the index that matches the filename, or -1 if not there
1661 */
1662int get_fileno(struct thread_data *td, const char *fname)
1663{
1664 struct fio_file *f;
1665 unsigned int i;
1666
1667 for_each_file(td, f, i)
1668 if (!strcmp(f->file_name, fname))
1669 return i;
1670
1671 return -1;
1672}
1673
1674/*
1675 * For log usage, where we add/open/close files automatically
1676 */
1677void free_release_files(struct thread_data *td)
1678{
1679 close_files(td);
1680 td->o.nr_files = 0;
1681 td->o.open_files = 0;
1682 td->files_index = 0;
1683 td->nr_normal_files = 0;
1684}
1685
1686void fio_file_reset(struct thread_data *td, struct fio_file *f)
1687{
1688 int i;
1689
1690 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1691 f->last_pos[i] = f->file_offset;
1692 f->last_start[i] = -1ULL;
1693 }
1694
1695 if (fio_file_axmap(f))
1696 axmap_reset(f->io_axmap);
1697 else if (fio_file_lfsr(f))
1698 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1699}
1700
1701bool fio_files_done(struct thread_data *td)
1702{
1703 struct fio_file *f;
1704 unsigned int i;
1705
1706 for_each_file(td, f, i)
1707 if (!fio_file_done(f))
1708 return false;
1709
1710 return true;
1711}
1712
1713/* free memory used in initialization phase only */
1714void filesetup_mem_free(void)
1715{
1716 free_already_allocated();
1717}