Fixup for a minor 0 byte file size case
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27static inline void clear_error(struct thread_data *td)
28{
29 td->error = 0;
30 td->verror[0] = '\0';
31}
32
33/*
34 * Leaves f->fd open on success, caller must close
35 */
36static int extend_file(struct thread_data *td, struct fio_file *f)
37{
38 int r, new_layout = 0, unlink_file = 0, flags;
39 unsigned long long left;
40 unsigned int bs;
41 char *b = NULL;
42
43 if (read_only) {
44 log_err("fio: refusing extend of file due to read-only\n");
45 return 0;
46 }
47
48 /*
49 * check if we need to lay the file out complete again. fio
50 * does that for operations involving reads, or for writes
51 * where overwrite is set
52 */
53 if (td_read(td) ||
54 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55 (td_write(td) && td_ioengine_flagged(td, FIO_NOEXTEND)))
56 new_layout = 1;
57 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
58 unlink_file = 1;
59
60 if (unlink_file || new_layout) {
61 int ret;
62
63 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
64
65 ret = td_io_unlink_file(td, f);
66 if (ret != 0 && ret != ENOENT) {
67 td_verror(td, errno, "unlink");
68 return 1;
69 }
70 }
71
72 flags = O_WRONLY;
73 if (td->o.allow_create)
74 flags |= O_CREAT;
75 if (new_layout)
76 flags |= O_TRUNC;
77
78#ifdef WIN32
79 flags |= _O_BINARY;
80#endif
81
82 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
83 f->fd = open(f->file_name, flags, 0644);
84 if (f->fd < 0) {
85 int err = errno;
86
87 if (err == ENOENT && !td->o.allow_create)
88 log_err("fio: file creation disallowed by "
89 "allow_file_create=0\n");
90 else
91 td_verror(td, err, "open");
92 return 1;
93 }
94
95#ifdef CONFIG_POSIX_FALLOCATE
96 if (!td->o.fill_device) {
97 switch (td->o.fallocate_mode) {
98 case FIO_FALLOCATE_NONE:
99 break;
100 case FIO_FALLOCATE_POSIX:
101 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
102 f->file_name,
103 (unsigned long long) f->real_file_size);
104
105 r = posix_fallocate(f->fd, 0, f->real_file_size);
106 if (r > 0) {
107 log_err("fio: posix_fallocate fails: %s\n",
108 strerror(r));
109 }
110 break;
111#ifdef CONFIG_LINUX_FALLOCATE
112 case FIO_FALLOCATE_KEEP_SIZE:
113 dprint(FD_FILE,
114 "fallocate(FALLOC_FL_KEEP_SIZE) "
115 "file %s size %llu\n", f->file_name,
116 (unsigned long long) f->real_file_size);
117
118 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
119 f->real_file_size);
120 if (r != 0)
121 td_verror(td, errno, "fallocate");
122
123 break;
124#endif /* CONFIG_LINUX_FALLOCATE */
125 default:
126 log_err("fio: unknown fallocate mode: %d\n",
127 td->o.fallocate_mode);
128 assert(0);
129 }
130 }
131#endif /* CONFIG_POSIX_FALLOCATE */
132
133 /*
134 * If our jobs don't require regular files initially, we're done.
135 */
136 if (!new_layout)
137 goto done;
138
139 /*
140 * The size will be -1ULL when fill_device is used, so don't truncate
141 * or fallocate this file, just write it
142 */
143 if (!td->o.fill_device) {
144 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
145 (unsigned long long) f->real_file_size);
146 if (ftruncate(f->fd, f->real_file_size) == -1) {
147 if (errno != EFBIG) {
148 td_verror(td, errno, "ftruncate");
149 goto err;
150 }
151 }
152 }
153
154 b = malloc(td->o.max_bs[DDIR_WRITE]);
155
156 left = f->real_file_size;
157 while (left && !td->terminate) {
158 bs = td->o.max_bs[DDIR_WRITE];
159 if (bs > left)
160 bs = left;
161
162 fill_io_buffer(td, b, bs, bs);
163
164 r = write(f->fd, b, bs);
165
166 if (r > 0) {
167 left -= r;
168 continue;
169 } else {
170 if (r < 0) {
171 int __e = errno;
172
173 if (__e == ENOSPC) {
174 if (td->o.fill_device)
175 break;
176 log_info("fio: ENOSPC on laying out "
177 "file, stopping\n");
178 break;
179 }
180 td_verror(td, errno, "write");
181 } else
182 td_verror(td, EIO, "write");
183
184 break;
185 }
186 }
187
188 if (td->terminate) {
189 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
190 td_io_unlink_file(td, f);
191 } else if (td->o.create_fsync) {
192 if (fsync(f->fd) < 0) {
193 td_verror(td, errno, "fsync");
194 goto err;
195 }
196 }
197 if (td->o.fill_device && !td_write(td)) {
198 fio_file_clear_size_known(f);
199 if (td_io_get_file_size(td, f))
200 goto err;
201 if (f->io_size > f->real_file_size)
202 f->io_size = f->real_file_size;
203 }
204
205 free(b);
206done:
207 return 0;
208err:
209 close(f->fd);
210 f->fd = -1;
211 if (b)
212 free(b);
213 return 1;
214}
215
216static int pre_read_file(struct thread_data *td, struct fio_file *f)
217{
218 int ret = 0, r, did_open = 0, old_runstate;
219 unsigned long long left;
220 unsigned int bs;
221 char *b;
222
223 if (td_ioengine_flagged(td, FIO_PIPEIO))
224 return 0;
225
226 if (!fio_file_open(f)) {
227 if (td->io_ops->open_file(td, f)) {
228 log_err("fio: cannot pre-read, failed to open file\n");
229 return 1;
230 }
231 did_open = 1;
232 }
233
234 old_runstate = td_bump_runstate(td, TD_PRE_READING);
235
236 bs = td->o.max_bs[DDIR_READ];
237 b = malloc(bs);
238 memset(b, 0, bs);
239
240 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
241 td_verror(td, errno, "lseek");
242 log_err("fio: failed to lseek pre-read file\n");
243 ret = 1;
244 goto error;
245 }
246
247 left = f->io_size;
248
249 while (left && !td->terminate) {
250 if (bs > left)
251 bs = left;
252
253 r = read(f->fd, b, bs);
254
255 if (r == (int) bs) {
256 left -= bs;
257 continue;
258 } else {
259 td_verror(td, EIO, "pre_read");
260 break;
261 }
262 }
263
264error:
265 td_restore_runstate(td, old_runstate);
266
267 if (did_open)
268 td->io_ops->close_file(td, f);
269
270 free(b);
271 return ret;
272}
273
274unsigned long long get_rand_file_size(struct thread_data *td)
275{
276 unsigned long long ret, sized;
277 uint64_t frand_max;
278 unsigned long r;
279
280 frand_max = rand_max(&td->file_size_state);
281 r = __rand(&td->file_size_state);
282 sized = td->o.file_size_high - td->o.file_size_low;
283 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
284 ret += td->o.file_size_low;
285 ret -= (ret % td->o.rw_min_bs);
286 return ret;
287}
288
289static int file_size(struct thread_data *td, struct fio_file *f)
290{
291 struct stat st;
292
293 if (stat(f->file_name, &st) == -1) {
294 td_verror(td, errno, "fstat");
295 return 1;
296 }
297
298 f->real_file_size = st.st_size;
299 return 0;
300}
301
302static int bdev_size(struct thread_data *td, struct fio_file *f)
303{
304 unsigned long long bytes = 0;
305 int r;
306
307 if (td->io_ops->open_file(td, f)) {
308 log_err("fio: failed opening blockdev %s for size check\n",
309 f->file_name);
310 return 1;
311 }
312
313 r = blockdev_size(f, &bytes);
314 if (r) {
315 td_verror(td, r, "blockdev_size");
316 goto err;
317 }
318
319 if (!bytes) {
320 log_err("%s: zero sized block device?\n", f->file_name);
321 goto err;
322 }
323
324 f->real_file_size = bytes;
325 td->io_ops->close_file(td, f);
326 return 0;
327err:
328 td->io_ops->close_file(td, f);
329 return 1;
330}
331
332static int char_size(struct thread_data *td, struct fio_file *f)
333{
334#ifdef FIO_HAVE_CHARDEV_SIZE
335 unsigned long long bytes = 0;
336 int r;
337
338 if (td->io_ops->open_file(td, f)) {
339 log_err("fio: failed opening chardev %s for size check\n",
340 f->file_name);
341 return 1;
342 }
343
344 r = chardev_size(f, &bytes);
345 if (r) {
346 td_verror(td, r, "chardev_size");
347 goto err;
348 }
349
350 if (!bytes) {
351 log_err("%s: zero sized char device?\n", f->file_name);
352 goto err;
353 }
354
355 f->real_file_size = bytes;
356 td->io_ops->close_file(td, f);
357 return 0;
358err:
359 td->io_ops->close_file(td, f);
360 return 1;
361#else
362 f->real_file_size = -1ULL;
363 return 0;
364#endif
365}
366
367static int get_file_size(struct thread_data *td, struct fio_file *f)
368{
369 int ret = 0;
370
371 if (fio_file_size_known(f))
372 return 0;
373
374 if (f->filetype == FIO_TYPE_FILE)
375 ret = file_size(td, f);
376 else if (f->filetype == FIO_TYPE_BLOCK)
377 ret = bdev_size(td, f);
378 else if (f->filetype == FIO_TYPE_CHAR)
379 ret = char_size(td, f);
380 else
381 f->real_file_size = -1ULL;
382
383 /*
384 * Leave ->real_file_size with 0 since it could be expectation
385 * of initial setup for regular files.
386 */
387 if (ret)
388 return ret;
389
390 /*
391 * If ->real_file_size is -1, a conditional for the message
392 * "offset extends end" is always true, but it makes no sense,
393 * so just return the same value here.
394 */
395 if (f->real_file_size == -1ULL) {
396 log_info("%s: failed to get file size of %s\n", td->o.name,
397 f->file_name);
398 return 1;
399 }
400
401 if (td->o.start_offset && f->file_offset == 0)
402 dprint(FD_FILE, "offset of file %s not initialized yet\n",
403 f->file_name);
404 /*
405 * ->file_offset normally hasn't been initialized yet, so this
406 * is basically always false.
407 */
408 if (f->file_offset > f->real_file_size) {
409 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
410 (unsigned long long) f->file_offset,
411 (unsigned long long) f->real_file_size);
412 return 1;
413 }
414
415 fio_file_set_size_known(f);
416 return 0;
417}
418
419static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
420 unsigned long long off,
421 unsigned long long len)
422{
423 int errval = 0, ret = 0;
424
425#ifdef CONFIG_ESX
426 return 0;
427#endif
428
429 if (len == -1ULL)
430 len = f->io_size;
431 if (off == -1ULL)
432 off = f->file_offset;
433
434 if (len == -1ULL || off == -1ULL)
435 return 0;
436
437 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
438 len);
439
440 if (td->io_ops->invalidate) {
441 ret = td->io_ops->invalidate(td, f);
442 if (ret < 0)
443 errval = ret;
444 } else if (f->filetype == FIO_TYPE_FILE) {
445 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
446 if (ret)
447 errval = ret;
448 } else if (f->filetype == FIO_TYPE_BLOCK) {
449 int retry_count = 0;
450
451 ret = blockdev_invalidate_cache(f);
452 while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
453 /*
454 * Linux multipath devices reject ioctl while
455 * the maps are being updated. That window can
456 * last tens of milliseconds; we'll try up to
457 * a quarter of a second.
458 */
459 usleep(10000);
460 ret = blockdev_invalidate_cache(f);
461 }
462 if (ret < 0 && errno == EACCES && geteuid()) {
463 if (!root_warn) {
464 log_err("fio: only root may flush block "
465 "devices. Cache flush bypassed!\n");
466 root_warn = 1;
467 }
468 ret = 0;
469 }
470 if (ret < 0)
471 errval = errno;
472 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
473 ret = 0;
474
475 /*
476 * Cache flushing isn't a fatal condition, and we know it will
477 * happen on some platforms where we don't have the proper
478 * function to flush eg block device caches. So just warn and
479 * continue on our way.
480 */
481 if (errval)
482 log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errval));
483
484 return 0;
485
486}
487
488int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
489{
490 if (!fio_file_open(f))
491 return 0;
492
493 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
494}
495
496int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
497{
498 int ret = 0;
499
500 dprint(FD_FILE, "fd close %s\n", f->file_name);
501
502 remove_file_hash(f);
503
504 if (close(f->fd) < 0)
505 ret = errno;
506
507 f->fd = -1;
508
509 if (f->shadow_fd != -1) {
510 close(f->shadow_fd);
511 f->shadow_fd = -1;
512 }
513
514 f->engine_data = 0;
515 return ret;
516}
517
518int file_lookup_open(struct fio_file *f, int flags)
519{
520 struct fio_file *__f;
521 int from_hash;
522
523 __f = lookup_file_hash(f->file_name);
524 if (__f) {
525 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
526 f->lock = __f->lock;
527 from_hash = 1;
528 } else {
529 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
530 from_hash = 0;
531 }
532
533#ifdef WIN32
534 flags |= _O_BINARY;
535#endif
536
537 f->fd = open(f->file_name, flags, 0600);
538 return from_hash;
539}
540
541static int file_close_shadow_fds(struct thread_data *td)
542{
543 struct fio_file *f;
544 int num_closed = 0;
545 unsigned int i;
546
547 for_each_file(td, f, i) {
548 if (f->shadow_fd == -1)
549 continue;
550
551 close(f->shadow_fd);
552 f->shadow_fd = -1;
553 num_closed++;
554 }
555
556 return num_closed;
557}
558
559int generic_open_file(struct thread_data *td, struct fio_file *f)
560{
561 int is_std = 0;
562 int flags = 0;
563 int from_hash = 0;
564
565 dprint(FD_FILE, "fd open %s\n", f->file_name);
566
567 if (!strcmp(f->file_name, "-")) {
568 if (td_rw(td)) {
569 log_err("fio: can't read/write to stdin/out\n");
570 return 1;
571 }
572 is_std = 1;
573
574 /*
575 * move output logging to stderr, if we are writing to stdout
576 */
577 if (td_write(td))
578 f_out = stderr;
579 }
580
581 if (td_trim(td))
582 goto skip_flags;
583 if (td->o.odirect)
584 flags |= OS_O_DIRECT;
585 if (td->o.oatomic) {
586 if (!FIO_O_ATOMIC) {
587 td_verror(td, EINVAL, "OS does not support atomic IO");
588 return 1;
589 }
590 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
591 }
592 if (td->o.sync_io)
593 flags |= O_SYNC;
594 if (td->o.create_on_open && td->o.allow_create)
595 flags |= O_CREAT;
596skip_flags:
597 if (f->filetype != FIO_TYPE_FILE)
598 flags |= FIO_O_NOATIME;
599
600open_again:
601 if (td_write(td)) {
602 if (!read_only)
603 flags |= O_RDWR;
604
605 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
606 flags |= O_CREAT;
607
608 if (is_std)
609 f->fd = dup(STDOUT_FILENO);
610 else
611 from_hash = file_lookup_open(f, flags);
612 } else if (td_read(td)) {
613 if (f->filetype == FIO_TYPE_CHAR && !read_only)
614 flags |= O_RDWR;
615 else
616 flags |= O_RDONLY;
617
618 if (is_std)
619 f->fd = dup(STDIN_FILENO);
620 else
621 from_hash = file_lookup_open(f, flags);
622 } else { //td trim
623 flags |= O_RDWR;
624 from_hash = file_lookup_open(f, flags);
625 }
626
627 if (f->fd == -1) {
628 char buf[FIO_VERROR_SIZE];
629 int __e = errno;
630
631 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
632 flags &= ~FIO_O_NOATIME;
633 goto open_again;
634 }
635 if (__e == EMFILE && file_close_shadow_fds(td))
636 goto open_again;
637
638 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
639
640 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
641 log_err("fio: looks like your file system does not " \
642 "support direct=1/buffered=0\n");
643 }
644
645 td_verror(td, __e, buf);
646 return 1;
647 }
648
649 if (!from_hash && f->fd != -1) {
650 if (add_file_hash(f)) {
651 int fio_unused ret;
652
653 /*
654 * Stash away descriptor for later close. This is to
655 * work-around a "feature" on Linux, where a close of
656 * an fd that has been opened for write will trigger
657 * udev to call blkid to check partitions, fs id, etc.
658 * That pollutes the device cache, which can slow down
659 * unbuffered accesses.
660 */
661 if (f->shadow_fd == -1)
662 f->shadow_fd = f->fd;
663 else {
664 /*
665 * OK to ignore, we haven't done anything
666 * with it
667 */
668 ret = generic_close_file(td, f);
669 }
670 goto open_again;
671 }
672 }
673
674 return 0;
675}
676
677/*
678 * This function i.e. get_file_size() is the default .get_file_size
679 * implementation of majority of I/O engines.
680 */
681int generic_get_file_size(struct thread_data *td, struct fio_file *f)
682{
683 return get_file_size(td, f);
684}
685
686/*
687 * open/close all files, so that ->real_file_size gets set
688 */
689static int get_file_sizes(struct thread_data *td)
690{
691 struct fio_file *f;
692 unsigned int i;
693 int err = 0;
694
695 for_each_file(td, f, i) {
696 dprint(FD_FILE, "get file size for %p/%d/%s\n", f, i,
697 f->file_name);
698
699 if (td_io_get_file_size(td, f)) {
700 if (td->error != ENOENT) {
701 log_err("%s\n", td->verror);
702 err = 1;
703 break;
704 }
705 clear_error(td);
706 }
707
708 /*
709 * There are corner cases where we end up with -1 for
710 * ->real_file_size due to unsupported file type, etc.
711 * We then just set to size option value divided by number
712 * of files, similar to the way file ->io_size is set.
713 * stat(2) failure doesn't set ->real_file_size to -1.
714 */
715 if (f->real_file_size == -1ULL && td->o.size)
716 f->real_file_size = td->o.size / td->o.nr_files;
717 }
718
719 return err;
720}
721
722struct fio_mount {
723 struct flist_head list;
724 const char *base;
725 char __base[256];
726 unsigned int key;
727};
728
729/*
730 * Get free number of bytes for each file on each unique mount.
731 */
732static unsigned long long get_fs_free_counts(struct thread_data *td)
733{
734 struct flist_head *n, *tmp;
735 unsigned long long ret = 0;
736 struct fio_mount *fm;
737 FLIST_HEAD(list);
738 struct fio_file *f;
739 unsigned int i;
740
741 for_each_file(td, f, i) {
742 struct stat sb;
743 char buf[256];
744
745 if (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_CHAR) {
746 if (f->real_file_size != -1ULL)
747 ret += f->real_file_size;
748 continue;
749 } else if (f->filetype != FIO_TYPE_FILE)
750 continue;
751
752 buf[255] = '\0';
753 strncpy(buf, f->file_name, 255);
754
755 if (stat(buf, &sb) < 0) {
756 if (errno != ENOENT)
757 break;
758 strcpy(buf, ".");
759 if (stat(buf, &sb) < 0)
760 break;
761 }
762
763 fm = NULL;
764 flist_for_each(n, &list) {
765 fm = flist_entry(n, struct fio_mount, list);
766 if (fm->key == sb.st_dev)
767 break;
768
769 fm = NULL;
770 }
771
772 if (fm)
773 continue;
774
775 fm = calloc(1, sizeof(*fm));
776 strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
777 fm->base = basename(fm->__base);
778 fm->key = sb.st_dev;
779 flist_add(&fm->list, &list);
780 }
781
782 flist_for_each_safe(n, tmp, &list) {
783 unsigned long long sz;
784
785 fm = flist_entry(n, struct fio_mount, list);
786 flist_del(&fm->list);
787
788 sz = get_fs_free_size(fm->base);
789 if (sz && sz != -1ULL)
790 ret += sz;
791
792 free(fm);
793 }
794
795 return ret;
796}
797
798uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
799{
800 struct thread_options *o = &td->o;
801
802 if (o->file_append && f->filetype == FIO_TYPE_FILE)
803 return f->real_file_size;
804
805 return td->o.start_offset +
806 td->subjob_number * td->o.offset_increment;
807}
808
809/*
810 * Open the files and setup files sizes, creating files if necessary.
811 */
812int setup_files(struct thread_data *td)
813{
814 unsigned long long total_size, extend_size;
815 struct thread_options *o = &td->o;
816 struct fio_file *f;
817 unsigned int i, nr_fs_extra = 0;
818 int err = 0, need_extend;
819 int old_state;
820 const unsigned int bs = td_min_bs(td);
821 uint64_t fs = 0;
822
823 dprint(FD_FILE, "setup files\n");
824
825 old_state = td_bump_runstate(td, TD_SETTING_UP);
826
827 if (o->read_iolog_file)
828 goto done;
829
830 /*
831 * Find out physical size of files or devices for this thread,
832 * before we determine I/O size and range of our targets.
833 * If ioengine defines a setup() method, it's responsible for
834 * opening the files and setting f->real_file_size to indicate
835 * the valid range for that file.
836 */
837 if (td->io_ops->setup)
838 err = td->io_ops->setup(td);
839 else
840 err = get_file_sizes(td);
841
842 if (err)
843 goto err_out;
844
845 /*
846 * check sizes. if the files/devices do not exist and the size
847 * isn't passed to fio, abort.
848 */
849 total_size = 0;
850 for_each_file(td, f, i) {
851 f->fileno = i;
852 if (f->real_file_size == -1ULL)
853 total_size = -1ULL;
854 else
855 total_size += f->real_file_size;
856 }
857
858 if (o->fill_device)
859 td->fill_device_size = get_fs_free_counts(td);
860
861 /*
862 * device/file sizes are zero and no size given, punt
863 */
864 if ((!total_size || total_size == -1ULL) && !o->size &&
865 !td_ioengine_flagged(td, FIO_NOIO) && !o->fill_device &&
866 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
867 log_err("%s: you need to specify size=\n", o->name);
868 td_verror(td, EINVAL, "total_file_size");
869 goto err_out;
870 }
871
872 /*
873 * Calculate per-file size and potential extra size for the
874 * first files, if needed (i.e. if we don't have a fixed size).
875 */
876 if (!o->file_size_low && o->nr_files) {
877 uint64_t all_fs;
878
879 fs = o->size / o->nr_files;
880 all_fs = fs * o->nr_files;
881
882 if (all_fs < o->size)
883 nr_fs_extra = (o->size - all_fs) / bs;
884 }
885
886 /*
887 * now file sizes are known, so we can set ->io_size. if size= is
888 * not given, ->io_size is just equal to ->real_file_size. if size
889 * is given, ->io_size is size / nr_files.
890 */
891 extend_size = total_size = 0;
892 need_extend = 0;
893 for_each_file(td, f, i) {
894 f->file_offset = get_start_offset(td, f);
895
896 /*
897 * Update ->io_size depending on options specified.
898 * ->file_size_low being 0 means filesize option isn't set.
899 * Non zero ->file_size_low equals ->file_size_high means
900 * filesize option is set in a fixed size format.
901 * Non zero ->file_size_low not equals ->file_size_high means
902 * filesize option is set in a range format.
903 */
904 if (!o->file_size_low) {
905 /*
906 * no file size or range given, file size is equal to
907 * total size divided by number of files. If the size
908 * doesn't divide nicely with the min blocksize,
909 * make the first files bigger.
910 */
911 f->io_size = fs;
912 if (nr_fs_extra) {
913 nr_fs_extra--;
914 f->io_size += bs;
915 }
916
917 /*
918 * We normally don't come here, but if the result is 0,
919 * set it to the real file size. This could be size of
920 * the existing one if it already exists, but otherwise
921 * will be set to 0. A new file won't be created because
922 * ->io_size + ->file_offset equals ->real_file_size.
923 */
924 if (!f->io_size) {
925 if (f->file_offset > f->real_file_size)
926 goto err_offset;
927 f->io_size = f->real_file_size - f->file_offset;
928 log_info("fio: forcing file %s size to %llu\n",
929 f->file_name,
930 (unsigned long long)f->io_size);
931 if (!f->io_size)
932 log_info("fio: file %s may be ignored\n",
933 f->file_name);
934 }
935 } else if (f->real_file_size < o->file_size_low ||
936 f->real_file_size > o->file_size_high) {
937 if (f->file_offset > o->file_size_low)
938 goto err_offset;
939 /*
940 * file size given. if it's fixed, use that. if it's a
941 * range, generate a random size in-between.
942 */
943 if (o->file_size_low == o->file_size_high)
944 f->io_size = o->file_size_low - f->file_offset;
945 else {
946 f->io_size = get_rand_file_size(td)
947 - f->file_offset;
948 }
949 } else
950 f->io_size = f->real_file_size - f->file_offset;
951
952 if (f->io_size == -1ULL)
953 total_size = -1ULL;
954 else {
955 if (o->size_percent) {
956 f->io_size = (f->io_size * o->size_percent) / 100;
957 f->io_size -= (f->io_size % td_min_bs(td));
958 }
959 total_size += f->io_size;
960 }
961
962 if (f->filetype == FIO_TYPE_FILE &&
963 (f->io_size + f->file_offset) > f->real_file_size &&
964 !td_ioengine_flagged(td, FIO_DISKLESSIO)) {
965 if (!o->create_on_open) {
966 need_extend++;
967 extend_size += (f->io_size + f->file_offset);
968 } else
969 f->real_file_size = f->io_size + f->file_offset;
970 fio_file_set_extend(f);
971 }
972 }
973
974 if (td->o.block_error_hist) {
975 int len;
976
977 assert(td->o.nr_files == 1); /* checked in fixup_options */
978 f = td->files[0];
979 len = f->io_size / td->o.bs[DDIR_TRIM];
980 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
981 log_err("fio: cannot calculate block histogram with "
982 "%d trim blocks, maximum %d\n",
983 len, MAX_NR_BLOCK_INFOS);
984 td_verror(td, EINVAL, "block_error_hist");
985 goto err_out;
986 }
987
988 td->ts.nr_block_infos = len;
989 for (i = 0; i < len; i++)
990 td->ts.block_infos[i] =
991 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
992 } else
993 td->ts.nr_block_infos = 0;
994
995 if (!o->size || (total_size && o->size > total_size))
996 o->size = total_size;
997
998 if (o->size < td_min_bs(td)) {
999 log_err("fio: blocksize too large for data set\n");
1000 goto err_out;
1001 }
1002
1003 /*
1004 * See if we need to extend some files, typically needed when our
1005 * target regular files don't exist yet, but our jobs require them
1006 * initially due to read I/Os.
1007 */
1008 if (need_extend) {
1009 temp_stall_ts = 1;
1010 if (output_format & FIO_OUTPUT_NORMAL)
1011 log_info("%s: Laying out IO file(s) (%u file(s) / %lluMiB)\n",
1012 o->name, need_extend, extend_size >> 20);
1013
1014 for_each_file(td, f, i) {
1015 unsigned long long old_len = -1ULL, extend_len = -1ULL;
1016
1017 if (!fio_file_extend(f))
1018 continue;
1019
1020 assert(f->filetype == FIO_TYPE_FILE);
1021 fio_file_clear_extend(f);
1022 if (!o->fill_device) {
1023 old_len = f->real_file_size;
1024 extend_len = f->io_size + f->file_offset -
1025 old_len;
1026 }
1027 f->real_file_size = (f->io_size + f->file_offset);
1028 err = extend_file(td, f);
1029 if (err)
1030 break;
1031
1032 err = __file_invalidate_cache(td, f, old_len,
1033 extend_len);
1034
1035 /*
1036 * Shut up static checker
1037 */
1038 if (f->fd != -1)
1039 close(f->fd);
1040
1041 f->fd = -1;
1042 if (err)
1043 break;
1044 }
1045 temp_stall_ts = 0;
1046 }
1047
1048 if (err)
1049 goto err_out;
1050
1051 if (!o->zone_size)
1052 o->zone_size = o->size;
1053
1054 /*
1055 * iolog already set the total io size, if we read back
1056 * stored entries.
1057 */
1058 if (!o->read_iolog_file) {
1059 if (o->io_size)
1060 td->total_io_size = o->io_size * o->loops;
1061 else
1062 td->total_io_size = o->size * o->loops;
1063 }
1064
1065done:
1066 if (o->create_only)
1067 td->done = 1;
1068
1069 td_restore_runstate(td, old_state);
1070 return 0;
1071err_offset:
1072 log_err("%s: you need to specify valid offset=\n", o->name);
1073err_out:
1074 td_restore_runstate(td, old_state);
1075 return 1;
1076}
1077
1078int pre_read_files(struct thread_data *td)
1079{
1080 struct fio_file *f;
1081 unsigned int i;
1082
1083 dprint(FD_FILE, "pre_read files\n");
1084
1085 for_each_file(td, f, i) {
1086 pre_read_file(td, f);
1087 }
1088
1089 return 1;
1090}
1091
1092static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1093{
1094 unsigned int range_size, seed;
1095 unsigned long nranges;
1096 uint64_t fsize;
1097
1098 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1099 fsize = min(f->real_file_size, f->io_size);
1100
1101 nranges = (fsize + range_size - 1) / range_size;
1102
1103 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1104 if (!td->o.rand_repeatable)
1105 seed = td->rand_seeds[4];
1106
1107 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1108 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1109 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1110 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1111 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1112 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1113
1114 return 1;
1115}
1116
1117static int init_rand_distribution(struct thread_data *td)
1118{
1119 struct fio_file *f;
1120 unsigned int i;
1121 int state;
1122
1123 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1124 return 0;
1125
1126 state = td_bump_runstate(td, TD_SETTING_UP);
1127
1128 for_each_file(td, f, i)
1129 __init_rand_distribution(td, f);
1130
1131 td_restore_runstate(td, state);
1132
1133 return 1;
1134}
1135
1136/*
1137 * Check if the number of blocks exceeds the randomness capability of
1138 * the selected generator. Tausworthe is 32-bit, the others are fullly
1139 * 64-bit capable.
1140 */
1141static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1142 uint64_t blocks)
1143{
1144 if (blocks <= FRAND32_MAX)
1145 return 0;
1146 if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1147 return 0;
1148
1149 /*
1150 * If the user hasn't specified a random generator, switch
1151 * to tausworthe64 with informational warning. If the user did
1152 * specify one, just warn.
1153 */
1154 log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1155 f->file_name);
1156
1157 if (!fio_option_is_set(&td->o, random_generator)) {
1158 log_info("fio: Switching to tausworthe64. Use the "
1159 "random_generator= option to get rid of this "
1160 "warning.\n");
1161 td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1162 return 0;
1163 }
1164
1165 /*
1166 * Just make this information to avoid breaking scripts.
1167 */
1168 log_info("fio: Use the random_generator= option to switch to lfsr or "
1169 "tausworthe64.\n");
1170 return 0;
1171}
1172
1173int init_random_map(struct thread_data *td)
1174{
1175 unsigned long long blocks;
1176 struct fio_file *f;
1177 unsigned int i;
1178
1179 if (init_rand_distribution(td))
1180 return 0;
1181 if (!td_random(td))
1182 return 0;
1183
1184 for_each_file(td, f, i) {
1185 uint64_t fsize = min(f->real_file_size, f->io_size);
1186
1187 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1188
1189 if (check_rand_gen_limits(td, f, blocks))
1190 return 1;
1191
1192 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1193 unsigned long seed;
1194
1195 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1196
1197 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1198 fio_file_set_lfsr(f);
1199 continue;
1200 }
1201 } else if (!td->o.norandommap) {
1202 f->io_axmap = axmap_new(blocks);
1203 if (f->io_axmap) {
1204 fio_file_set_axmap(f);
1205 continue;
1206 }
1207 } else if (td->o.norandommap)
1208 continue;
1209
1210 if (!td->o.softrandommap) {
1211 log_err("fio: failed allocating random map. If running"
1212 " a large number of jobs, try the 'norandommap'"
1213 " option or set 'softrandommap'. Or give"
1214 " a larger --alloc-size to fio.\n");
1215 return 1;
1216 }
1217
1218 log_info("fio: file %s failed allocating random map. Running "
1219 "job without.\n", f->file_name);
1220 }
1221
1222 return 0;
1223}
1224
1225void close_files(struct thread_data *td)
1226{
1227 struct fio_file *f;
1228 unsigned int i;
1229
1230 for_each_file(td, f, i) {
1231 if (fio_file_open(f))
1232 td_io_close_file(td, f);
1233 }
1234}
1235
1236void close_and_free_files(struct thread_data *td)
1237{
1238 struct fio_file *f;
1239 unsigned int i;
1240
1241 dprint(FD_FILE, "close files\n");
1242
1243 for_each_file(td, f, i) {
1244 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1245 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1246 td_io_unlink_file(td, f);
1247 }
1248
1249 if (fio_file_open(f))
1250 td_io_close_file(td, f);
1251
1252 remove_file_hash(f);
1253
1254 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1255 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1256 td_io_unlink_file(td, f);
1257 }
1258
1259 sfree(f->file_name);
1260 f->file_name = NULL;
1261 if (fio_file_axmap(f)) {
1262 axmap_free(f->io_axmap);
1263 f->io_axmap = NULL;
1264 }
1265 sfree(f);
1266 }
1267
1268 td->o.filename = NULL;
1269 free(td->files);
1270 free(td->file_locks);
1271 td->files_index = 0;
1272 td->files = NULL;
1273 td->file_locks = NULL;
1274 td->o.file_lock_mode = FILE_LOCK_NONE;
1275 td->o.nr_files = 0;
1276}
1277
1278static void get_file_type(struct fio_file *f)
1279{
1280 struct stat sb;
1281
1282 if (!strcmp(f->file_name, "-"))
1283 f->filetype = FIO_TYPE_PIPE;
1284 else
1285 f->filetype = FIO_TYPE_FILE;
1286
1287#ifdef WIN32
1288 /* \\.\ is the device namespace in Windows, where every file is
1289 * a block device */
1290 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1291 f->filetype = FIO_TYPE_BLOCK;
1292#endif
1293
1294 if (!stat(f->file_name, &sb)) {
1295 if (S_ISBLK(sb.st_mode))
1296 f->filetype = FIO_TYPE_BLOCK;
1297 else if (S_ISCHR(sb.st_mode))
1298 f->filetype = FIO_TYPE_CHAR;
1299 else if (S_ISFIFO(sb.st_mode))
1300 f->filetype = FIO_TYPE_PIPE;
1301 }
1302}
1303
1304static bool __is_already_allocated(const char *fname, bool set)
1305{
1306 struct flist_head *entry;
1307 bool ret;
1308
1309 ret = file_bloom_exists(fname, set);
1310 if (!ret)
1311 return ret;
1312
1313 flist_for_each(entry, &filename_list) {
1314 struct file_name *fn;
1315
1316 fn = flist_entry(entry, struct file_name, list);
1317
1318 if (!strcmp(fn->filename, fname))
1319 return true;
1320 }
1321
1322 return false;
1323}
1324
1325static bool is_already_allocated(const char *fname)
1326{
1327 bool ret;
1328
1329 fio_file_hash_lock();
1330 ret = __is_already_allocated(fname, false);
1331 fio_file_hash_unlock();
1332
1333 return ret;
1334}
1335
1336static void set_already_allocated(const char *fname)
1337{
1338 struct file_name *fn;
1339
1340 fn = malloc(sizeof(struct file_name));
1341 fn->filename = strdup(fname);
1342
1343 fio_file_hash_lock();
1344 if (!__is_already_allocated(fname, true)) {
1345 flist_add_tail(&fn->list, &filename_list);
1346 fn = NULL;
1347 }
1348 fio_file_hash_unlock();
1349
1350 if (fn) {
1351 free(fn->filename);
1352 free(fn);
1353 }
1354}
1355
1356static void free_already_allocated(void)
1357{
1358 struct flist_head *entry, *tmp;
1359 struct file_name *fn;
1360
1361 if (flist_empty(&filename_list))
1362 return;
1363
1364 fio_file_hash_lock();
1365 flist_for_each_safe(entry, tmp, &filename_list) {
1366 fn = flist_entry(entry, struct file_name, list);
1367 free(fn->filename);
1368 flist_del(&fn->list);
1369 free(fn);
1370 }
1371
1372 fio_file_hash_unlock();
1373}
1374
1375static struct fio_file *alloc_new_file(struct thread_data *td)
1376{
1377 struct fio_file *f;
1378
1379 f = smalloc(sizeof(*f));
1380 if (!f) {
1381 assert(0);
1382 return NULL;
1383 }
1384
1385 f->fd = -1;
1386 f->shadow_fd = -1;
1387 fio_file_reset(td, f);
1388 return f;
1389}
1390
1391bool exists_and_not_regfile(const char *filename)
1392{
1393 struct stat sb;
1394
1395 if (lstat(filename, &sb) == -1)
1396 return false;
1397
1398#ifndef WIN32 /* NOT Windows */
1399 if (S_ISREG(sb.st_mode))
1400 return false;
1401#else
1402 /* \\.\ is the device namespace in Windows, where every file
1403 * is a device node */
1404 if (S_ISREG(sb.st_mode) && strncmp(filename, "\\\\.\\", 4) != 0)
1405 return false;
1406#endif
1407
1408 return true;
1409}
1410
1411int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1412{
1413 int cur_files = td->files_index;
1414 char file_name[PATH_MAX];
1415 struct fio_file *f;
1416 int len = 0;
1417
1418 dprint(FD_FILE, "add file %s\n", fname);
1419
1420 if (td->o.directory)
1421 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob,
1422 td->o.unique_filename);
1423
1424 sprintf(file_name + len, "%s", fname);
1425
1426 /* clean cloned siblings using existing files */
1427 if (numjob && is_already_allocated(file_name) &&
1428 !exists_and_not_regfile(fname))
1429 return 0;
1430
1431 f = alloc_new_file(td);
1432
1433 if (td->files_size <= td->files_index) {
1434 unsigned int new_size = td->o.nr_files + 1;
1435
1436 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1437
1438 td->files = realloc(td->files, new_size * sizeof(f));
1439 if (td->files == NULL) {
1440 log_err("fio: realloc OOM\n");
1441 assert(0);
1442 }
1443 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1444 td->file_locks = realloc(td->file_locks, new_size);
1445 if (!td->file_locks) {
1446 log_err("fio: realloc OOM\n");
1447 assert(0);
1448 }
1449 td->file_locks[cur_files] = FILE_LOCK_NONE;
1450 }
1451 td->files_size = new_size;
1452 }
1453 td->files[cur_files] = f;
1454 f->fileno = cur_files;
1455
1456 /*
1457 * init function, io engine may not be loaded yet
1458 */
1459 if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
1460 f->real_file_size = -1ULL;
1461
1462 f->file_name = smalloc_strdup(file_name);
1463 if (!f->file_name)
1464 assert(0);
1465
1466 get_file_type(f);
1467
1468 switch (td->o.file_lock_mode) {
1469 case FILE_LOCK_NONE:
1470 break;
1471 case FILE_LOCK_READWRITE:
1472 f->rwlock = fio_rwlock_init();
1473 break;
1474 case FILE_LOCK_EXCLUSIVE:
1475 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1476 break;
1477 default:
1478 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1479 assert(0);
1480 }
1481
1482 td->files_index++;
1483 if (f->filetype == FIO_TYPE_FILE)
1484 td->nr_normal_files++;
1485
1486 set_already_allocated(file_name);
1487
1488 if (inc)
1489 td->o.nr_files++;
1490
1491 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1492 cur_files);
1493
1494 return cur_files;
1495}
1496
1497int add_file_exclusive(struct thread_data *td, const char *fname)
1498{
1499 struct fio_file *f;
1500 unsigned int i;
1501
1502 for_each_file(td, f, i) {
1503 if (!strcmp(f->file_name, fname))
1504 return i;
1505 }
1506
1507 return add_file(td, fname, 0, 1);
1508}
1509
1510void get_file(struct fio_file *f)
1511{
1512 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1513 assert(fio_file_open(f));
1514 f->references++;
1515}
1516
1517int put_file(struct thread_data *td, struct fio_file *f)
1518{
1519 int f_ret = 0, ret = 0;
1520
1521 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1522
1523 if (!fio_file_open(f)) {
1524 assert(f->fd == -1);
1525 return 0;
1526 }
1527
1528 assert(f->references);
1529 if (--f->references)
1530 return 0;
1531
1532 if (should_fsync(td) && td->o.fsync_on_close) {
1533 f_ret = fsync(f->fd);
1534 if (f_ret < 0)
1535 f_ret = errno;
1536 }
1537
1538 if (td->io_ops->close_file)
1539 ret = td->io_ops->close_file(td, f);
1540
1541 if (!ret)
1542 ret = f_ret;
1543
1544 td->nr_open_files--;
1545 fio_file_clear_open(f);
1546 assert(f->fd == -1);
1547 return ret;
1548}
1549
1550void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1551{
1552 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1553 return;
1554
1555 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1556 if (ddir == DDIR_READ)
1557 fio_rwlock_read(f->rwlock);
1558 else
1559 fio_rwlock_write(f->rwlock);
1560 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1561 fio_mutex_down(f->lock);
1562
1563 td->file_locks[f->fileno] = td->o.file_lock_mode;
1564}
1565
1566void unlock_file(struct thread_data *td, struct fio_file *f)
1567{
1568 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1569 return;
1570
1571 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1572 fio_rwlock_unlock(f->rwlock);
1573 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1574 fio_mutex_up(f->lock);
1575
1576 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1577}
1578
1579void unlock_file_all(struct thread_data *td, struct fio_file *f)
1580{
1581 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1582 return;
1583 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1584 unlock_file(td, f);
1585}
1586
1587static int recurse_dir(struct thread_data *td, const char *dirname)
1588{
1589 struct dirent *dir;
1590 int ret = 0;
1591 DIR *D;
1592
1593 D = opendir(dirname);
1594 if (!D) {
1595 char buf[FIO_VERROR_SIZE];
1596
1597 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1598 td_verror(td, errno, buf);
1599 return 1;
1600 }
1601
1602 while ((dir = readdir(D)) != NULL) {
1603 char full_path[PATH_MAX];
1604 struct stat sb;
1605
1606 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1607 continue;
1608
1609 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1610
1611 if (lstat(full_path, &sb) == -1) {
1612 if (errno != ENOENT) {
1613 td_verror(td, errno, "stat");
1614 ret = 1;
1615 break;
1616 }
1617 }
1618
1619 if (S_ISREG(sb.st_mode)) {
1620 add_file(td, full_path, 0, 1);
1621 continue;
1622 }
1623 if (!S_ISDIR(sb.st_mode))
1624 continue;
1625
1626 ret = recurse_dir(td, full_path);
1627 if (ret)
1628 break;
1629 }
1630
1631 closedir(D);
1632 return ret;
1633}
1634
1635int add_dir_files(struct thread_data *td, const char *path)
1636{
1637 int ret = recurse_dir(td, path);
1638
1639 if (!ret)
1640 log_info("fio: opendir added %d files\n", td->o.nr_files);
1641
1642 return ret;
1643}
1644
1645void dup_files(struct thread_data *td, struct thread_data *org)
1646{
1647 struct fio_file *f;
1648 unsigned int i;
1649
1650 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1651
1652 if (!org->files)
1653 return;
1654
1655 td->files = malloc(org->files_index * sizeof(f));
1656
1657 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1658 td->file_locks = malloc(org->files_index);
1659
1660 for_each_file(org, f, i) {
1661 struct fio_file *__f;
1662
1663 __f = alloc_new_file(td);
1664
1665 if (f->file_name) {
1666 __f->file_name = smalloc_strdup(f->file_name);
1667 if (!__f->file_name)
1668 assert(0);
1669
1670 __f->filetype = f->filetype;
1671 }
1672
1673 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1674 __f->lock = f->lock;
1675 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1676 __f->rwlock = f->rwlock;
1677
1678 td->files[i] = __f;
1679 }
1680}
1681
1682/*
1683 * Returns the index that matches the filename, or -1 if not there
1684 */
1685int get_fileno(struct thread_data *td, const char *fname)
1686{
1687 struct fio_file *f;
1688 unsigned int i;
1689
1690 for_each_file(td, f, i)
1691 if (!strcmp(f->file_name, fname))
1692 return i;
1693
1694 return -1;
1695}
1696
1697/*
1698 * For log usage, where we add/open/close files automatically
1699 */
1700void free_release_files(struct thread_data *td)
1701{
1702 close_files(td);
1703 td->o.nr_files = 0;
1704 td->o.open_files = 0;
1705 td->files_index = 0;
1706 td->nr_normal_files = 0;
1707}
1708
1709void fio_file_reset(struct thread_data *td, struct fio_file *f)
1710{
1711 int i;
1712
1713 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1714 f->last_pos[i] = f->file_offset;
1715 f->last_start[i] = -1ULL;
1716 }
1717
1718 if (fio_file_axmap(f))
1719 axmap_reset(f->io_axmap);
1720 else if (fio_file_lfsr(f))
1721 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1722}
1723
1724bool fio_files_done(struct thread_data *td)
1725{
1726 struct fio_file *f;
1727 unsigned int i;
1728
1729 for_each_file(td, f, i)
1730 if (!fio_file_done(f))
1731 return false;
1732
1733 return true;
1734}
1735
1736/* free memory used in initialization phase only */
1737void filesetup_mem_free(void)
1738{
1739 free_already_allocated();
1740}