filesetup: print warning if chosen random generator can't cover range
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27static inline void clear_error(struct thread_data *td)
28{
29 td->error = 0;
30 td->verror[0] = '\0';
31}
32
33/*
34 * Leaves f->fd open on success, caller must close
35 */
36static int extend_file(struct thread_data *td, struct fio_file *f)
37{
38 int r, new_layout = 0, unlink_file = 0, flags;
39 unsigned long long left;
40 unsigned int bs;
41 char *b = NULL;
42
43 if (read_only) {
44 log_err("fio: refusing extend of file due to read-only\n");
45 return 0;
46 }
47
48 /*
49 * check if we need to lay the file out complete again. fio
50 * does that for operations involving reads, or for writes
51 * where overwrite is set
52 */
53 if (td_read(td) ||
54 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
56 new_layout = 1;
57 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
58 unlink_file = 1;
59
60 if (unlink_file || new_layout) {
61 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
62 if ((td_io_unlink_file(td, f) < 0) && (errno != ENOENT)) {
63 td_verror(td, errno, "unlink");
64 return 1;
65 }
66 }
67
68 flags = O_WRONLY;
69 if (td->o.allow_create)
70 flags |= O_CREAT;
71 if (new_layout)
72 flags |= O_TRUNC;
73
74#ifdef WIN32
75 flags |= _O_BINARY;
76#endif
77
78 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
79 f->fd = open(f->file_name, flags, 0644);
80 if (f->fd < 0) {
81 int err = errno;
82
83 if (err == ENOENT && !td->o.allow_create)
84 log_err("fio: file creation disallowed by "
85 "allow_file_create=0\n");
86 else
87 td_verror(td, err, "open");
88 return 1;
89 }
90
91#ifdef CONFIG_POSIX_FALLOCATE
92 if (!td->o.fill_device) {
93 switch (td->o.fallocate_mode) {
94 case FIO_FALLOCATE_NONE:
95 break;
96 case FIO_FALLOCATE_POSIX:
97 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
98 f->file_name,
99 (unsigned long long) f->real_file_size);
100
101 r = posix_fallocate(f->fd, 0, f->real_file_size);
102 if (r > 0) {
103 log_err("fio: posix_fallocate fails: %s\n",
104 strerror(r));
105 }
106 break;
107#ifdef CONFIG_LINUX_FALLOCATE
108 case FIO_FALLOCATE_KEEP_SIZE:
109 dprint(FD_FILE,
110 "fallocate(FALLOC_FL_KEEP_SIZE) "
111 "file %s size %llu\n", f->file_name,
112 (unsigned long long) f->real_file_size);
113
114 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
115 f->real_file_size);
116 if (r != 0)
117 td_verror(td, errno, "fallocate");
118
119 break;
120#endif /* CONFIG_LINUX_FALLOCATE */
121 default:
122 log_err("fio: unknown fallocate mode: %d\n",
123 td->o.fallocate_mode);
124 assert(0);
125 }
126 }
127#endif /* CONFIG_POSIX_FALLOCATE */
128
129 if (!new_layout)
130 goto done;
131
132 /*
133 * The size will be -1ULL when fill_device is used, so don't truncate
134 * or fallocate this file, just write it
135 */
136 if (!td->o.fill_device) {
137 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
138 (unsigned long long) f->real_file_size);
139 if (ftruncate(f->fd, f->real_file_size) == -1) {
140 if (errno != EFBIG) {
141 td_verror(td, errno, "ftruncate");
142 goto err;
143 }
144 }
145 }
146
147 b = malloc(td->o.max_bs[DDIR_WRITE]);
148
149 left = f->real_file_size;
150 while (left && !td->terminate) {
151 bs = td->o.max_bs[DDIR_WRITE];
152 if (bs > left)
153 bs = left;
154
155 fill_io_buffer(td, b, bs, bs);
156
157 r = write(f->fd, b, bs);
158
159 if (r > 0) {
160 left -= r;
161 continue;
162 } else {
163 if (r < 0) {
164 int __e = errno;
165
166 if (__e == ENOSPC) {
167 if (td->o.fill_device)
168 break;
169 log_info("fio: ENOSPC on laying out "
170 "file, stopping\n");
171 break;
172 }
173 td_verror(td, errno, "write");
174 } else
175 td_verror(td, EIO, "write");
176
177 break;
178 }
179 }
180
181 if (td->terminate) {
182 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
183 td_io_unlink_file(td, f);
184 } else if (td->o.create_fsync) {
185 if (fsync(f->fd) < 0) {
186 td_verror(td, errno, "fsync");
187 goto err;
188 }
189 }
190 if (td->o.fill_device && !td_write(td)) {
191 fio_file_clear_size_known(f);
192 if (td_io_get_file_size(td, f))
193 goto err;
194 if (f->io_size > f->real_file_size)
195 f->io_size = f->real_file_size;
196 }
197
198 free(b);
199done:
200 return 0;
201err:
202 close(f->fd);
203 f->fd = -1;
204 if (b)
205 free(b);
206 return 1;
207}
208
209static int pre_read_file(struct thread_data *td, struct fio_file *f)
210{
211 int ret = 0, r, did_open = 0, old_runstate;
212 unsigned long long left;
213 unsigned int bs;
214 char *b;
215
216 if (td->io_ops->flags & FIO_PIPEIO)
217 return 0;
218
219 if (!fio_file_open(f)) {
220 if (td->io_ops->open_file(td, f)) {
221 log_err("fio: cannot pre-read, failed to open file\n");
222 return 1;
223 }
224 did_open = 1;
225 }
226
227 old_runstate = td_bump_runstate(td, TD_PRE_READING);
228
229 bs = td->o.max_bs[DDIR_READ];
230 b = malloc(bs);
231 memset(b, 0, bs);
232
233 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
234 td_verror(td, errno, "lseek");
235 log_err("fio: failed to lseek pre-read file\n");
236 ret = 1;
237 goto error;
238 }
239
240 left = f->io_size;
241
242 while (left && !td->terminate) {
243 if (bs > left)
244 bs = left;
245
246 r = read(f->fd, b, bs);
247
248 if (r == (int) bs) {
249 left -= bs;
250 continue;
251 } else {
252 td_verror(td, EIO, "pre_read");
253 break;
254 }
255 }
256
257error:
258 td_restore_runstate(td, old_runstate);
259
260 if (did_open)
261 td->io_ops->close_file(td, f);
262
263 free(b);
264 return ret;
265}
266
267static unsigned long long get_rand_file_size(struct thread_data *td)
268{
269 unsigned long long ret, sized;
270 uint64_t frand_max;
271 unsigned long r;
272
273 frand_max = rand_max(&td->file_size_state);
274 r = __rand(&td->file_size_state);
275 sized = td->o.file_size_high - td->o.file_size_low;
276 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
277 ret += td->o.file_size_low;
278 ret -= (ret % td->o.rw_min_bs);
279 return ret;
280}
281
282static int file_size(struct thread_data *td, struct fio_file *f)
283{
284 struct stat st;
285
286 if (stat(f->file_name, &st) == -1) {
287 td_verror(td, errno, "fstat");
288 return 1;
289 }
290
291 f->real_file_size = st.st_size;
292 return 0;
293}
294
295static int bdev_size(struct thread_data *td, struct fio_file *f)
296{
297 unsigned long long bytes = 0;
298 int r;
299
300 if (td->io_ops->open_file(td, f)) {
301 log_err("fio: failed opening blockdev %s for size check\n",
302 f->file_name);
303 return 1;
304 }
305
306 r = blockdev_size(f, &bytes);
307 if (r) {
308 td_verror(td, r, "blockdev_size");
309 goto err;
310 }
311
312 if (!bytes) {
313 log_err("%s: zero sized block device?\n", f->file_name);
314 goto err;
315 }
316
317 f->real_file_size = bytes;
318 td->io_ops->close_file(td, f);
319 return 0;
320err:
321 td->io_ops->close_file(td, f);
322 return 1;
323}
324
325static int char_size(struct thread_data *td, struct fio_file *f)
326{
327#ifdef FIO_HAVE_CHARDEV_SIZE
328 unsigned long long bytes = 0;
329 int r;
330
331 if (td->io_ops->open_file(td, f)) {
332 log_err("fio: failed opening blockdev %s for size check\n",
333 f->file_name);
334 return 1;
335 }
336
337 r = chardev_size(f, &bytes);
338 if (r) {
339 td_verror(td, r, "chardev_size");
340 goto err;
341 }
342
343 if (!bytes) {
344 log_err("%s: zero sized char device?\n", f->file_name);
345 goto err;
346 }
347
348 f->real_file_size = bytes;
349 td->io_ops->close_file(td, f);
350 return 0;
351err:
352 td->io_ops->close_file(td, f);
353 return 1;
354#else
355 f->real_file_size = -1ULL;
356 return 0;
357#endif
358}
359
360static int get_file_size(struct thread_data *td, struct fio_file *f)
361{
362 int ret = 0;
363
364 if (fio_file_size_known(f))
365 return 0;
366
367 if (f->filetype == FIO_TYPE_FILE)
368 ret = file_size(td, f);
369 else if (f->filetype == FIO_TYPE_BD)
370 ret = bdev_size(td, f);
371 else if (f->filetype == FIO_TYPE_CHAR)
372 ret = char_size(td, f);
373 else
374 f->real_file_size = -1;
375
376 if (ret)
377 return ret;
378
379 if (f->file_offset > f->real_file_size) {
380 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
381 (unsigned long long) f->file_offset,
382 (unsigned long long) f->real_file_size);
383 return 1;
384 }
385
386 fio_file_set_size_known(f);
387 return 0;
388}
389
390static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
391 unsigned long long off,
392 unsigned long long len)
393{
394 int ret = 0;
395
396#ifdef CONFIG_ESX
397 return 0;
398#endif
399
400 if (len == -1ULL)
401 len = f->io_size;
402 if (off == -1ULL)
403 off = f->file_offset;
404
405 if (len == -1ULL || off == -1ULL)
406 return 0;
407
408 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
409 len);
410
411 if (td->io_ops->invalidate)
412 ret = td->io_ops->invalidate(td, f);
413 else if (f->filetype == FIO_TYPE_FILE)
414 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
415 else if (f->filetype == FIO_TYPE_BD) {
416 ret = blockdev_invalidate_cache(f);
417 if (ret < 0 && errno == EACCES && geteuid()) {
418 if (!root_warn) {
419 log_err("fio: only root may flush block "
420 "devices. Cache flush bypassed!\n");
421 root_warn = 1;
422 }
423 ret = 0;
424 }
425 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
426 ret = 0;
427
428 /*
429 * Cache flushing isn't a fatal condition, and we know it will
430 * happen on some platforms where we don't have the proper
431 * function to flush eg block device caches. So just warn and
432 * continue on our way.
433 */
434 if (ret) {
435 log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errno));
436 ret = 0;
437 }
438
439 return 0;
440
441}
442
443int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
444{
445 if (!fio_file_open(f))
446 return 0;
447
448 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
449}
450
451int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
452{
453 int ret = 0;
454
455 dprint(FD_FILE, "fd close %s\n", f->file_name);
456
457 remove_file_hash(f);
458
459 if (close(f->fd) < 0)
460 ret = errno;
461
462 f->fd = -1;
463
464 if (f->shadow_fd != -1) {
465 close(f->shadow_fd);
466 f->shadow_fd = -1;
467 }
468
469 f->engine_data = 0;
470 return ret;
471}
472
473int file_lookup_open(struct fio_file *f, int flags)
474{
475 struct fio_file *__f;
476 int from_hash;
477
478 __f = lookup_file_hash(f->file_name);
479 if (__f) {
480 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
481 /*
482 * racy, need the __f->lock locked
483 */
484 f->lock = __f->lock;
485 from_hash = 1;
486 } else {
487 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
488 from_hash = 0;
489 }
490
491#ifdef WIN32
492 flags |= _O_BINARY;
493#endif
494
495 f->fd = open(f->file_name, flags, 0600);
496 return from_hash;
497}
498
499static int file_close_shadow_fds(struct thread_data *td)
500{
501 struct fio_file *f;
502 int num_closed = 0;
503 unsigned int i;
504
505 for_each_file(td, f, i) {
506 if (f->shadow_fd == -1)
507 continue;
508
509 close(f->shadow_fd);
510 f->shadow_fd = -1;
511 num_closed++;
512 }
513
514 return num_closed;
515}
516
517int generic_open_file(struct thread_data *td, struct fio_file *f)
518{
519 int is_std = 0;
520 int flags = 0;
521 int from_hash = 0;
522
523 dprint(FD_FILE, "fd open %s\n", f->file_name);
524
525 if (!strcmp(f->file_name, "-")) {
526 if (td_rw(td)) {
527 log_err("fio: can't read/write to stdin/out\n");
528 return 1;
529 }
530 is_std = 1;
531
532 /*
533 * move output logging to stderr, if we are writing to stdout
534 */
535 if (td_write(td))
536 f_out = stderr;
537 }
538
539 if (td_trim(td))
540 goto skip_flags;
541 if (td->o.odirect)
542 flags |= OS_O_DIRECT;
543 if (td->o.oatomic) {
544 if (!FIO_O_ATOMIC) {
545 td_verror(td, EINVAL, "OS does not support atomic IO");
546 return 1;
547 }
548 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
549 }
550 if (td->o.sync_io)
551 flags |= O_SYNC;
552 if (td->o.create_on_open && td->o.allow_create)
553 flags |= O_CREAT;
554skip_flags:
555 if (f->filetype != FIO_TYPE_FILE)
556 flags |= FIO_O_NOATIME;
557
558open_again:
559 if (td_write(td)) {
560 if (!read_only)
561 flags |= O_RDWR;
562
563 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
564 flags |= O_CREAT;
565
566 if (is_std)
567 f->fd = dup(STDOUT_FILENO);
568 else
569 from_hash = file_lookup_open(f, flags);
570 } else if (td_read(td)) {
571 if (f->filetype == FIO_TYPE_CHAR && !read_only)
572 flags |= O_RDWR;
573 else
574 flags |= O_RDONLY;
575
576 if (is_std)
577 f->fd = dup(STDIN_FILENO);
578 else
579 from_hash = file_lookup_open(f, flags);
580 } else { //td trim
581 flags |= O_RDWR;
582 from_hash = file_lookup_open(f, flags);
583 }
584
585 if (f->fd == -1) {
586 char buf[FIO_VERROR_SIZE];
587 int __e = errno;
588
589 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
590 flags &= ~FIO_O_NOATIME;
591 goto open_again;
592 }
593 if (__e == EMFILE && file_close_shadow_fds(td))
594 goto open_again;
595
596 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
597
598 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
599 log_err("fio: looks like your file system does not " \
600 "support direct=1/buffered=0\n");
601 }
602
603 td_verror(td, __e, buf);
604 return 1;
605 }
606
607 if (!from_hash && f->fd != -1) {
608 if (add_file_hash(f)) {
609 int fio_unused ret;
610
611 /*
612 * Stash away descriptor for later close. This is to
613 * work-around a "feature" on Linux, where a close of
614 * an fd that has been opened for write will trigger
615 * udev to call blkid to check partitions, fs id, etc.
616 * That pollutes the device cache, which can slow down
617 * unbuffered accesses.
618 */
619 if (f->shadow_fd == -1)
620 f->shadow_fd = f->fd;
621 else {
622 /*
623 * OK to ignore, we haven't done anything
624 * with it
625 */
626 ret = generic_close_file(td, f);
627 }
628 goto open_again;
629 }
630 }
631
632 return 0;
633}
634
635int generic_get_file_size(struct thread_data *td, struct fio_file *f)
636{
637 return get_file_size(td, f);
638}
639
640/*
641 * open/close all files, so that ->real_file_size gets set
642 */
643static int get_file_sizes(struct thread_data *td)
644{
645 struct fio_file *f;
646 unsigned int i;
647 int err = 0;
648
649 for_each_file(td, f, i) {
650 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
651 f->file_name);
652
653 if (td_io_get_file_size(td, f)) {
654 if (td->error != ENOENT) {
655 log_err("%s\n", td->verror);
656 err = 1;
657 break;
658 }
659 clear_error(td);
660 }
661
662 if (f->real_file_size == -1ULL && td->o.size)
663 f->real_file_size = td->o.size / td->o.nr_files;
664 }
665
666 return err;
667}
668
669struct fio_mount {
670 struct flist_head list;
671 const char *base;
672 char __base[256];
673 unsigned int key;
674};
675
676/*
677 * Get free number of bytes for each file on each unique mount.
678 */
679static unsigned long long get_fs_free_counts(struct thread_data *td)
680{
681 struct flist_head *n, *tmp;
682 unsigned long long ret = 0;
683 struct fio_mount *fm;
684 FLIST_HEAD(list);
685 struct fio_file *f;
686 unsigned int i;
687
688 for_each_file(td, f, i) {
689 struct stat sb;
690 char buf[256];
691
692 if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
693 if (f->real_file_size != -1ULL)
694 ret += f->real_file_size;
695 continue;
696 } else if (f->filetype != FIO_TYPE_FILE)
697 continue;
698
699 buf[255] = '\0';
700 strncpy(buf, f->file_name, 255);
701
702 if (stat(buf, &sb) < 0) {
703 if (errno != ENOENT)
704 break;
705 strcpy(buf, ".");
706 if (stat(buf, &sb) < 0)
707 break;
708 }
709
710 fm = NULL;
711 flist_for_each(n, &list) {
712 fm = flist_entry(n, struct fio_mount, list);
713 if (fm->key == sb.st_dev)
714 break;
715
716 fm = NULL;
717 }
718
719 if (fm)
720 continue;
721
722 fm = calloc(1, sizeof(*fm));
723 strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
724 fm->base = basename(fm->__base);
725 fm->key = sb.st_dev;
726 flist_add(&fm->list, &list);
727 }
728
729 flist_for_each_safe(n, tmp, &list) {
730 unsigned long long sz;
731
732 fm = flist_entry(n, struct fio_mount, list);
733 flist_del(&fm->list);
734
735 sz = get_fs_size(fm->base);
736 if (sz && sz != -1ULL)
737 ret += sz;
738
739 free(fm);
740 }
741
742 return ret;
743}
744
745uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
746{
747 struct thread_options *o = &td->o;
748
749 if (o->file_append && f->filetype == FIO_TYPE_FILE)
750 return f->real_file_size;
751
752 return td->o.start_offset +
753 td->subjob_number * td->o.offset_increment;
754}
755
756/*
757 * Open the files and setup files sizes, creating files if necessary.
758 */
759int setup_files(struct thread_data *td)
760{
761 unsigned long long total_size, extend_size;
762 struct thread_options *o = &td->o;
763 struct fio_file *f;
764 unsigned int i, nr_fs_extra = 0;
765 int err = 0, need_extend;
766 int old_state;
767 const unsigned int bs = td_min_bs(td);
768 uint64_t fs = 0;
769
770 dprint(FD_FILE, "setup files\n");
771
772 old_state = td_bump_runstate(td, TD_SETTING_UP);
773
774 if (o->read_iolog_file)
775 goto done;
776
777 /*
778 * if ioengine defines a setup() method, it's responsible for
779 * opening the files and setting f->real_file_size to indicate
780 * the valid range for that file.
781 */
782 if (td->io_ops->setup)
783 err = td->io_ops->setup(td);
784 else
785 err = get_file_sizes(td);
786
787 if (err)
788 goto err_out;
789
790 /*
791 * check sizes. if the files/devices do not exist and the size
792 * isn't passed to fio, abort.
793 */
794 total_size = 0;
795 for_each_file(td, f, i) {
796 if (f->real_file_size == -1ULL)
797 total_size = -1ULL;
798 else
799 total_size += f->real_file_size;
800 }
801
802 if (o->fill_device)
803 td->fill_device_size = get_fs_free_counts(td);
804
805 /*
806 * device/file sizes are zero and no size given, punt
807 */
808 if ((!total_size || total_size == -1ULL) && !o->size &&
809 !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
810 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
811 log_err("%s: you need to specify size=\n", o->name);
812 td_verror(td, EINVAL, "total_file_size");
813 goto err_out;
814 }
815
816 /*
817 * Calculate per-file size and potential extra size for the
818 * first files, if needed.
819 */
820 if (!o->file_size_low && o->nr_files) {
821 uint64_t all_fs;
822
823 fs = o->size / o->nr_files;
824 all_fs = fs * o->nr_files;
825
826 if (all_fs < o->size)
827 nr_fs_extra = (o->size - all_fs) / bs;
828 }
829
830 /*
831 * now file sizes are known, so we can set ->io_size. if size= is
832 * not given, ->io_size is just equal to ->real_file_size. if size
833 * is given, ->io_size is size / nr_files.
834 */
835 extend_size = total_size = 0;
836 need_extend = 0;
837 for_each_file(td, f, i) {
838 f->file_offset = get_start_offset(td, f);
839
840 if (!o->file_size_low) {
841 /*
842 * no file size range given, file size is equal to
843 * total size divided by number of files. If that is
844 * zero, set it to the real file size. If the size
845 * doesn't divide nicely with the min blocksize,
846 * make the first files bigger.
847 */
848 f->io_size = fs;
849 if (nr_fs_extra) {
850 nr_fs_extra--;
851 f->io_size += bs;
852 }
853
854 if (!f->io_size)
855 f->io_size = f->real_file_size - f->file_offset;
856 } else if (f->real_file_size < o->file_size_low ||
857 f->real_file_size > o->file_size_high) {
858 if (f->file_offset > o->file_size_low)
859 goto err_offset;
860 /*
861 * file size given. if it's fixed, use that. if it's a
862 * range, generate a random size in-between.
863 */
864 if (o->file_size_low == o->file_size_high)
865 f->io_size = o->file_size_low - f->file_offset;
866 else {
867 f->io_size = get_rand_file_size(td)
868 - f->file_offset;
869 }
870 } else
871 f->io_size = f->real_file_size - f->file_offset;
872
873 if (f->io_size == -1ULL)
874 total_size = -1ULL;
875 else {
876 if (o->size_percent)
877 f->io_size = (f->io_size * o->size_percent) / 100;
878 total_size += f->io_size;
879 }
880
881 if (f->filetype == FIO_TYPE_FILE &&
882 (f->io_size + f->file_offset) > f->real_file_size &&
883 !(td->io_ops->flags & FIO_DISKLESSIO)) {
884 if (!o->create_on_open) {
885 need_extend++;
886 extend_size += (f->io_size + f->file_offset);
887 } else
888 f->real_file_size = f->io_size + f->file_offset;
889 fio_file_set_extend(f);
890 }
891 }
892
893 if (td->o.block_error_hist) {
894 int len;
895
896 assert(td->o.nr_files == 1); /* checked in fixup_options */
897 f = td->files[0];
898 len = f->io_size / td->o.bs[DDIR_TRIM];
899 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
900 log_err("fio: cannot calculate block histogram with "
901 "%d trim blocks, maximum %d\n",
902 len, MAX_NR_BLOCK_INFOS);
903 td_verror(td, EINVAL, "block_error_hist");
904 goto err_out;
905 }
906
907 td->ts.nr_block_infos = len;
908 for (int i = 0; i < len; i++)
909 td->ts.block_infos[i] =
910 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
911 } else
912 td->ts.nr_block_infos = 0;
913
914 if (!o->size || (total_size && o->size > total_size))
915 o->size = total_size;
916
917 if (o->size < td_min_bs(td)) {
918 log_err("fio: blocksize too large for data set\n");
919 goto err_out;
920 }
921
922 /*
923 * See if we need to extend some files
924 */
925 if (need_extend) {
926 temp_stall_ts = 1;
927 if (output_format == FIO_OUTPUT_NORMAL)
928 log_info("%s: Laying out IO file(s) (%u file(s) /"
929 " %lluMB)\n", o->name, need_extend,
930 extend_size >> 20);
931
932 for_each_file(td, f, i) {
933 unsigned long long old_len = -1ULL, extend_len = -1ULL;
934
935 if (!fio_file_extend(f))
936 continue;
937
938 assert(f->filetype == FIO_TYPE_FILE);
939 fio_file_clear_extend(f);
940 if (!o->fill_device) {
941 old_len = f->real_file_size;
942 extend_len = f->io_size + f->file_offset -
943 old_len;
944 }
945 f->real_file_size = (f->io_size + f->file_offset);
946 err = extend_file(td, f);
947 if (err)
948 break;
949
950 err = __file_invalidate_cache(td, f, old_len,
951 extend_len);
952
953 /*
954 * Shut up static checker
955 */
956 if (f->fd != -1)
957 close(f->fd);
958
959 f->fd = -1;
960 if (err)
961 break;
962 }
963 temp_stall_ts = 0;
964 }
965
966 if (err)
967 goto err_out;
968
969 if (!o->zone_size)
970 o->zone_size = o->size;
971
972 /*
973 * iolog already set the total io size, if we read back
974 * stored entries.
975 */
976 if (!o->read_iolog_file) {
977 if (o->io_limit)
978 td->total_io_size = o->io_limit * o->loops;
979 else
980 td->total_io_size = o->size * o->loops;
981 }
982
983done:
984 if (o->create_only)
985 td->done = 1;
986
987 td_restore_runstate(td, old_state);
988 return 0;
989err_offset:
990 log_err("%s: you need to specify valid offset=\n", o->name);
991err_out:
992 td_restore_runstate(td, old_state);
993 return 1;
994}
995
996int pre_read_files(struct thread_data *td)
997{
998 struct fio_file *f;
999 unsigned int i;
1000
1001 dprint(FD_FILE, "pre_read files\n");
1002
1003 for_each_file(td, f, i) {
1004 pre_read_file(td, f);
1005 }
1006
1007 return 1;
1008}
1009
1010static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1011{
1012 unsigned int range_size, seed;
1013 unsigned long nranges;
1014 uint64_t fsize;
1015
1016 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1017 fsize = min(f->real_file_size, f->io_size);
1018
1019 nranges = (fsize + range_size - 1) / range_size;
1020
1021 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1022 if (!td->o.rand_repeatable)
1023 seed = td->rand_seeds[4];
1024
1025 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1026 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1027 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1028 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1029 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1030 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1031
1032 return 1;
1033}
1034
1035static int init_rand_distribution(struct thread_data *td)
1036{
1037 struct fio_file *f;
1038 unsigned int i;
1039 int state;
1040
1041 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1042 return 0;
1043
1044 state = td_bump_runstate(td, TD_SETTING_UP);
1045
1046 for_each_file(td, f, i)
1047 __init_rand_distribution(td, f);
1048
1049 td_restore_runstate(td, state);
1050
1051 return 1;
1052}
1053
1054int init_random_map(struct thread_data *td)
1055{
1056 unsigned long long blocks;
1057 struct fio_file *f;
1058 unsigned int i;
1059
1060 if (init_rand_distribution(td))
1061 return 0;
1062 if (!td_random(td))
1063 return 0;
1064
1065 for_each_file(td, f, i) {
1066 uint64_t fsize = min(f->real_file_size, f->io_size);
1067
1068 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1069
1070 if (blocks > FRAND32_MAX &&
1071 td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE &&
1072 !fio_option_is_set(&td->o, random_generator)) {
1073 log_err("fio: file %s exceeds 32-bit tausworthe "
1074 "random generator. Use lfsr or "
1075 "tausworthe64.\n", f->file_name);
1076 td_verror(td, EINVAL, "init file random");
1077 return 1;
1078 }
1079
1080 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1081 unsigned long seed;
1082
1083 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1084
1085 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1086 fio_file_set_lfsr(f);
1087 continue;
1088 }
1089 } else if (!td->o.norandommap) {
1090 f->io_axmap = axmap_new(blocks);
1091 if (f->io_axmap) {
1092 fio_file_set_axmap(f);
1093 continue;
1094 }
1095 } else if (td->o.norandommap)
1096 continue;
1097
1098 if (!td->o.softrandommap) {
1099 log_err("fio: failed allocating random map. If running"
1100 " a large number of jobs, try the 'norandommap'"
1101 " option or set 'softrandommap'. Or give"
1102 " a larger --alloc-size to fio.\n");
1103 return 1;
1104 }
1105
1106 log_info("fio: file %s failed allocating random map. Running "
1107 "job without.\n", f->file_name);
1108 }
1109
1110 return 0;
1111}
1112
1113void close_files(struct thread_data *td)
1114{
1115 struct fio_file *f;
1116 unsigned int i;
1117
1118 for_each_file(td, f, i) {
1119 if (fio_file_open(f))
1120 td_io_close_file(td, f);
1121 }
1122}
1123
1124void close_and_free_files(struct thread_data *td)
1125{
1126 struct fio_file *f;
1127 unsigned int i;
1128
1129 dprint(FD_FILE, "close files\n");
1130
1131 for_each_file(td, f, i) {
1132 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1133 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1134 td_io_unlink_file(td, f);
1135 }
1136
1137 if (fio_file_open(f))
1138 td_io_close_file(td, f);
1139
1140 remove_file_hash(f);
1141
1142 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1143 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1144 td_io_unlink_file(td, f);
1145 }
1146
1147 sfree(f->file_name);
1148 f->file_name = NULL;
1149 if (fio_file_axmap(f)) {
1150 axmap_free(f->io_axmap);
1151 f->io_axmap = NULL;
1152 }
1153 sfree(f);
1154 }
1155
1156 td->o.filename = NULL;
1157 free(td->files);
1158 free(td->file_locks);
1159 td->files_index = 0;
1160 td->files = NULL;
1161 td->file_locks = NULL;
1162 td->o.file_lock_mode = FILE_LOCK_NONE;
1163 td->o.nr_files = 0;
1164}
1165
1166static void get_file_type(struct fio_file *f)
1167{
1168 struct stat sb;
1169
1170 if (!strcmp(f->file_name, "-"))
1171 f->filetype = FIO_TYPE_PIPE;
1172 else
1173 f->filetype = FIO_TYPE_FILE;
1174
1175 /* \\.\ is the device namespace in Windows, where every file is
1176 * a block device */
1177 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1178 f->filetype = FIO_TYPE_BD;
1179
1180 if (!stat(f->file_name, &sb)) {
1181 if (S_ISBLK(sb.st_mode))
1182 f->filetype = FIO_TYPE_BD;
1183 else if (S_ISCHR(sb.st_mode))
1184 f->filetype = FIO_TYPE_CHAR;
1185 else if (S_ISFIFO(sb.st_mode))
1186 f->filetype = FIO_TYPE_PIPE;
1187 }
1188}
1189
1190static int __is_already_allocated(const char *fname)
1191{
1192 struct flist_head *entry;
1193 char *filename;
1194
1195 if (flist_empty(&filename_list))
1196 return 0;
1197
1198 flist_for_each(entry, &filename_list) {
1199 filename = flist_entry(entry, struct file_name, list)->filename;
1200
1201 if (strcmp(filename, fname) == 0)
1202 return 1;
1203 }
1204
1205 return 0;
1206}
1207
1208static int is_already_allocated(const char *fname)
1209{
1210 int ret;
1211
1212 fio_file_hash_lock();
1213 ret = __is_already_allocated(fname);
1214 fio_file_hash_unlock();
1215 return ret;
1216}
1217
1218static void set_already_allocated(const char *fname)
1219{
1220 struct file_name *fn;
1221
1222 fn = malloc(sizeof(struct file_name));
1223 fn->filename = strdup(fname);
1224
1225 fio_file_hash_lock();
1226 if (!__is_already_allocated(fname)) {
1227 flist_add_tail(&fn->list, &filename_list);
1228 fn = NULL;
1229 }
1230 fio_file_hash_unlock();
1231
1232 if (fn) {
1233 free(fn->filename);
1234 free(fn);
1235 }
1236}
1237
1238
1239static void free_already_allocated(void)
1240{
1241 struct flist_head *entry, *tmp;
1242 struct file_name *fn;
1243
1244 if (flist_empty(&filename_list))
1245 return;
1246
1247 fio_file_hash_lock();
1248 flist_for_each_safe(entry, tmp, &filename_list) {
1249 fn = flist_entry(entry, struct file_name, list);
1250 free(fn->filename);
1251 flist_del(&fn->list);
1252 free(fn);
1253 }
1254
1255 fio_file_hash_unlock();
1256}
1257
1258static struct fio_file *alloc_new_file(struct thread_data *td)
1259{
1260 struct fio_file *f;
1261
1262 f = smalloc(sizeof(*f));
1263 if (!f) {
1264 log_err("fio: smalloc OOM\n");
1265 assert(0);
1266 return NULL;
1267 }
1268
1269 f->fd = -1;
1270 f->shadow_fd = -1;
1271 fio_file_reset(td, f);
1272 return f;
1273}
1274
1275int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1276{
1277 int cur_files = td->files_index;
1278 char file_name[PATH_MAX];
1279 struct fio_file *f;
1280 int len = 0;
1281
1282 dprint(FD_FILE, "add file %s\n", fname);
1283
1284 if (td->o.directory)
1285 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob);
1286
1287 sprintf(file_name + len, "%s", fname);
1288
1289 /* clean cloned siblings using existing files */
1290 if (numjob && is_already_allocated(file_name))
1291 return 0;
1292
1293 f = alloc_new_file(td);
1294
1295 if (td->files_size <= td->files_index) {
1296 unsigned int new_size = td->o.nr_files + 1;
1297
1298 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1299
1300 td->files = realloc(td->files, new_size * sizeof(f));
1301 if (td->files == NULL) {
1302 log_err("fio: realloc OOM\n");
1303 assert(0);
1304 }
1305 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1306 td->file_locks = realloc(td->file_locks, new_size);
1307 if (!td->file_locks) {
1308 log_err("fio: realloc OOM\n");
1309 assert(0);
1310 }
1311 td->file_locks[cur_files] = FILE_LOCK_NONE;
1312 }
1313 td->files_size = new_size;
1314 }
1315 td->files[cur_files] = f;
1316 f->fileno = cur_files;
1317
1318 /*
1319 * init function, io engine may not be loaded yet
1320 */
1321 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1322 f->real_file_size = -1ULL;
1323
1324 f->file_name = smalloc_strdup(file_name);
1325 if (!f->file_name) {
1326 log_err("fio: smalloc OOM\n");
1327 assert(0);
1328 }
1329
1330 get_file_type(f);
1331
1332 switch (td->o.file_lock_mode) {
1333 case FILE_LOCK_NONE:
1334 break;
1335 case FILE_LOCK_READWRITE:
1336 f->rwlock = fio_rwlock_init();
1337 break;
1338 case FILE_LOCK_EXCLUSIVE:
1339 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1340 break;
1341 default:
1342 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1343 assert(0);
1344 }
1345
1346 td->files_index++;
1347 if (f->filetype == FIO_TYPE_FILE)
1348 td->nr_normal_files++;
1349
1350 set_already_allocated(file_name);
1351
1352 if (inc)
1353 td->o.nr_files++;
1354
1355 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1356 cur_files);
1357
1358 return cur_files;
1359}
1360
1361int add_file_exclusive(struct thread_data *td, const char *fname)
1362{
1363 struct fio_file *f;
1364 unsigned int i;
1365
1366 for_each_file(td, f, i) {
1367 if (!strcmp(f->file_name, fname))
1368 return i;
1369 }
1370
1371 return add_file(td, fname, 0, 1);
1372}
1373
1374void get_file(struct fio_file *f)
1375{
1376 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1377 assert(fio_file_open(f));
1378 f->references++;
1379}
1380
1381int put_file(struct thread_data *td, struct fio_file *f)
1382{
1383 int f_ret = 0, ret = 0;
1384
1385 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1386
1387 if (!fio_file_open(f)) {
1388 assert(f->fd == -1);
1389 return 0;
1390 }
1391
1392 assert(f->references);
1393 if (--f->references)
1394 return 0;
1395
1396 if (should_fsync(td) && td->o.fsync_on_close) {
1397 f_ret = fsync(f->fd);
1398 if (f_ret < 0)
1399 f_ret = errno;
1400 }
1401
1402 if (td->io_ops->close_file)
1403 ret = td->io_ops->close_file(td, f);
1404
1405 if (!ret)
1406 ret = f_ret;
1407
1408 td->nr_open_files--;
1409 fio_file_clear_open(f);
1410 assert(f->fd == -1);
1411 return ret;
1412}
1413
1414void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1415{
1416 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1417 return;
1418
1419 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1420 if (ddir == DDIR_READ)
1421 fio_rwlock_read(f->rwlock);
1422 else
1423 fio_rwlock_write(f->rwlock);
1424 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1425 fio_mutex_down(f->lock);
1426
1427 td->file_locks[f->fileno] = td->o.file_lock_mode;
1428}
1429
1430void unlock_file(struct thread_data *td, struct fio_file *f)
1431{
1432 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1433 return;
1434
1435 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1436 fio_rwlock_unlock(f->rwlock);
1437 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1438 fio_mutex_up(f->lock);
1439
1440 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1441}
1442
1443void unlock_file_all(struct thread_data *td, struct fio_file *f)
1444{
1445 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1446 return;
1447 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1448 unlock_file(td, f);
1449}
1450
1451static int recurse_dir(struct thread_data *td, const char *dirname)
1452{
1453 struct dirent *dir;
1454 int ret = 0;
1455 DIR *D;
1456
1457 D = opendir(dirname);
1458 if (!D) {
1459 char buf[FIO_VERROR_SIZE];
1460
1461 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1462 td_verror(td, errno, buf);
1463 return 1;
1464 }
1465
1466 while ((dir = readdir(D)) != NULL) {
1467 char full_path[PATH_MAX];
1468 struct stat sb;
1469
1470 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1471 continue;
1472
1473 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1474
1475 if (lstat(full_path, &sb) == -1) {
1476 if (errno != ENOENT) {
1477 td_verror(td, errno, "stat");
1478 ret = 1;
1479 break;
1480 }
1481 }
1482
1483 if (S_ISREG(sb.st_mode)) {
1484 add_file(td, full_path, 0, 1);
1485 continue;
1486 }
1487 if (!S_ISDIR(sb.st_mode))
1488 continue;
1489
1490 ret = recurse_dir(td, full_path);
1491 if (ret)
1492 break;
1493 }
1494
1495 closedir(D);
1496 return ret;
1497}
1498
1499int add_dir_files(struct thread_data *td, const char *path)
1500{
1501 int ret = recurse_dir(td, path);
1502
1503 if (!ret)
1504 log_info("fio: opendir added %d files\n", td->o.nr_files);
1505
1506 return ret;
1507}
1508
1509void dup_files(struct thread_data *td, struct thread_data *org)
1510{
1511 struct fio_file *f;
1512 unsigned int i;
1513
1514 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1515
1516 if (!org->files)
1517 return;
1518
1519 td->files = malloc(org->files_index * sizeof(f));
1520
1521 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1522 td->file_locks = malloc(org->files_index);
1523
1524 for_each_file(org, f, i) {
1525 struct fio_file *__f;
1526
1527 __f = alloc_new_file(td);
1528
1529 if (f->file_name) {
1530 __f->file_name = smalloc_strdup(f->file_name);
1531 if (!__f->file_name) {
1532 log_err("fio: smalloc OOM\n");
1533 assert(0);
1534 }
1535
1536 __f->filetype = f->filetype;
1537 }
1538
1539 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1540 __f->lock = f->lock;
1541 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1542 __f->rwlock = f->rwlock;
1543
1544 td->files[i] = __f;
1545 }
1546}
1547
1548/*
1549 * Returns the index that matches the filename, or -1 if not there
1550 */
1551int get_fileno(struct thread_data *td, const char *fname)
1552{
1553 struct fio_file *f;
1554 unsigned int i;
1555
1556 for_each_file(td, f, i)
1557 if (!strcmp(f->file_name, fname))
1558 return i;
1559
1560 return -1;
1561}
1562
1563/*
1564 * For log usage, where we add/open/close files automatically
1565 */
1566void free_release_files(struct thread_data *td)
1567{
1568 close_files(td);
1569 td->o.nr_files = 0;
1570 td->o.open_files = 0;
1571 td->files_index = 0;
1572 td->nr_normal_files = 0;
1573}
1574
1575void fio_file_reset(struct thread_data *td, struct fio_file *f)
1576{
1577 int i;
1578
1579 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1580 f->last_pos[i] = f->file_offset;
1581 f->last_start[i] = -1ULL;
1582 }
1583
1584 if (fio_file_axmap(f))
1585 axmap_reset(f->io_axmap);
1586 else if (fio_file_lfsr(f))
1587 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1588}
1589
1590int fio_files_done(struct thread_data *td)
1591{
1592 struct fio_file *f;
1593 unsigned int i;
1594
1595 for_each_file(td, f, i)
1596 if (!fio_file_done(f))
1597 return 0;
1598
1599 return 1;
1600}
1601
1602/* free memory used in initialization phase only */
1603void filesetup_mem_free(void)
1604{
1605 free_already_allocated();
1606}