Add runstate swap helpers
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27static inline void clear_error(struct thread_data *td)
28{
29 td->error = 0;
30 td->verror[0] = '\0';
31}
32
33/*
34 * Leaves f->fd open on success, caller must close
35 */
36static int extend_file(struct thread_data *td, struct fio_file *f)
37{
38 int r, new_layout = 0, unlink_file = 0, flags;
39 unsigned long long left;
40 unsigned int bs;
41 char *b;
42
43 if (read_only) {
44 log_err("fio: refusing extend of file due to read-only\n");
45 return 0;
46 }
47
48 /*
49 * check if we need to lay the file out complete again. fio
50 * does that for operations involving reads, or for writes
51 * where overwrite is set
52 */
53 if (td_read(td) || (td_write(td) && td->o.overwrite) ||
54 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
55 new_layout = 1;
56 if (td_write(td) && !td->o.overwrite)
57 unlink_file = 1;
58
59 if (unlink_file || new_layout) {
60 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
61 if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
62 td_verror(td, errno, "unlink");
63 return 1;
64 }
65 }
66
67 flags = O_WRONLY | O_CREAT;
68 if (new_layout)
69 flags |= O_TRUNC;
70
71 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
72 f->fd = open(f->file_name, flags, 0644);
73 if (f->fd < 0) {
74 td_verror(td, errno, "open");
75 return 1;
76 }
77
78#ifdef CONFIG_POSIX_FALLOCATE
79 if (!td->o.fill_device) {
80 switch (td->o.fallocate_mode) {
81 case FIO_FALLOCATE_NONE:
82 break;
83 case FIO_FALLOCATE_POSIX:
84 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
85 f->file_name,
86 (unsigned long long) f->real_file_size);
87
88 r = posix_fallocate(f->fd, 0, f->real_file_size);
89 if (r > 0) {
90 log_err("fio: posix_fallocate fails: %s\n",
91 strerror(r));
92 }
93 break;
94#ifdef CONFIG_LINUX_FALLOCATE
95 case FIO_FALLOCATE_KEEP_SIZE:
96 dprint(FD_FILE,
97 "fallocate(FALLOC_FL_KEEP_SIZE) "
98 "file %s size %llu\n", f->file_name,
99 (unsigned long long) f->real_file_size);
100
101 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
102 f->real_file_size);
103 if (r != 0)
104 td_verror(td, errno, "fallocate");
105
106 break;
107#endif /* CONFIG_LINUX_FALLOCATE */
108 default:
109 log_err("fio: unknown fallocate mode: %d\n",
110 td->o.fallocate_mode);
111 assert(0);
112 }
113 }
114#endif /* CONFIG_POSIX_FALLOCATE */
115
116 if (!new_layout)
117 goto done;
118
119 /*
120 * The size will be -1ULL when fill_device is used, so don't truncate
121 * or fallocate this file, just write it
122 */
123 if (!td->o.fill_device) {
124 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
125 (unsigned long long) f->real_file_size);
126 if (ftruncate(f->fd, f->real_file_size) == -1) {
127 if (errno != EFBIG) {
128 td_verror(td, errno, "ftruncate");
129 goto err;
130 }
131 }
132 }
133
134 b = malloc(td->o.max_bs[DDIR_WRITE]);
135
136 left = f->real_file_size;
137 while (left && !td->terminate) {
138 bs = td->o.max_bs[DDIR_WRITE];
139 if (bs > left)
140 bs = left;
141
142 fill_io_buffer(td, b, bs, bs);
143
144 r = write(f->fd, b, bs);
145
146 if (r > 0) {
147 left -= r;
148 continue;
149 } else {
150 if (r < 0) {
151 int __e = errno;
152
153 if (__e == ENOSPC) {
154 if (td->o.fill_device)
155 break;
156 log_info("fio: ENOSPC on laying out "
157 "file, stopping\n");
158 break;
159 }
160 td_verror(td, errno, "write");
161 } else
162 td_verror(td, EIO, "write");
163
164 break;
165 }
166 }
167
168 if (td->terminate) {
169 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
170 unlink(f->file_name);
171 } else if (td->o.create_fsync) {
172 if (fsync(f->fd) < 0) {
173 td_verror(td, errno, "fsync");
174 goto err;
175 }
176 }
177 if (td->o.fill_device && !td_write(td)) {
178 fio_file_clear_size_known(f);
179 if (td_io_get_file_size(td, f))
180 goto err;
181 if (f->io_size > f->real_file_size)
182 f->io_size = f->real_file_size;
183 }
184
185 free(b);
186done:
187 return 0;
188err:
189 close(f->fd);
190 f->fd = -1;
191 return 1;
192}
193
194static int pre_read_file(struct thread_data *td, struct fio_file *f)
195{
196 int r, did_open = 0, old_runstate;
197 unsigned long long left;
198 unsigned int bs;
199 char *b;
200
201 if (td->io_ops->flags & FIO_PIPEIO)
202 return 0;
203
204 if (!fio_file_open(f)) {
205 if (td->io_ops->open_file(td, f)) {
206 log_err("fio: cannot pre-read, failed to open file\n");
207 return 1;
208 }
209 did_open = 1;
210 }
211
212 old_runstate = td_bump_runstate(td, TD_PRE_READING);
213
214 bs = td->o.max_bs[DDIR_READ];
215 b = malloc(bs);
216 memset(b, 0, bs);
217
218 lseek(f->fd, f->file_offset, SEEK_SET);
219 left = f->io_size;
220
221 while (left && !td->terminate) {
222 if (bs > left)
223 bs = left;
224
225 r = read(f->fd, b, bs);
226
227 if (r == (int) bs) {
228 left -= bs;
229 continue;
230 } else {
231 td_verror(td, EIO, "pre_read");
232 break;
233 }
234 }
235
236 td_restore_runstate(td, old_runstate);
237
238 if (did_open)
239 td->io_ops->close_file(td, f);
240 free(b);
241 return 0;
242}
243
244static unsigned long long get_rand_file_size(struct thread_data *td)
245{
246 unsigned long long ret, sized;
247 unsigned long r;
248
249 if (td->o.use_os_rand) {
250 r = os_random_long(&td->file_size_state);
251 sized = td->o.file_size_high - td->o.file_size_low;
252 ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0)));
253 } else {
254 r = __rand(&td->__file_size_state);
255 sized = td->o.file_size_high - td->o.file_size_low;
256 ret = (unsigned long long) ((double) sized * (r / (FRAND_MAX + 1.0)));
257 }
258
259 ret += td->o.file_size_low;
260 ret -= (ret % td->o.rw_min_bs);
261 return ret;
262}
263
264static int file_size(struct thread_data *td, struct fio_file *f)
265{
266 struct stat st;
267
268 if (stat(f->file_name, &st) == -1) {
269 td_verror(td, errno, "fstat");
270 return 1;
271 }
272
273 f->real_file_size = st.st_size;
274 return 0;
275}
276
277static int bdev_size(struct thread_data *td, struct fio_file *f)
278{
279 unsigned long long bytes = 0;
280 int r;
281
282 if (td->io_ops->open_file(td, f)) {
283 log_err("fio: failed opening blockdev %s for size check\n",
284 f->file_name);
285 return 1;
286 }
287
288 r = blockdev_size(f, &bytes);
289 if (r) {
290 td_verror(td, r, "blockdev_size");
291 goto err;
292 }
293
294 if (!bytes) {
295 log_err("%s: zero sized block device?\n", f->file_name);
296 goto err;
297 }
298
299 f->real_file_size = bytes;
300 td->io_ops->close_file(td, f);
301 return 0;
302err:
303 td->io_ops->close_file(td, f);
304 return 1;
305}
306
307static int char_size(struct thread_data *td, struct fio_file *f)
308{
309#ifdef FIO_HAVE_CHARDEV_SIZE
310 unsigned long long bytes = 0;
311 int r;
312
313 if (td->io_ops->open_file(td, f)) {
314 log_err("fio: failed opening blockdev %s for size check\n",
315 f->file_name);
316 return 1;
317 }
318
319 r = chardev_size(f, &bytes);
320 if (r) {
321 td_verror(td, r, "chardev_size");
322 goto err;
323 }
324
325 if (!bytes) {
326 log_err("%s: zero sized char device?\n", f->file_name);
327 goto err;
328 }
329
330 f->real_file_size = bytes;
331 td->io_ops->close_file(td, f);
332 return 0;
333err:
334 td->io_ops->close_file(td, f);
335 return 1;
336#else
337 f->real_file_size = -1ULL;
338 return 0;
339#endif
340}
341
342static int get_file_size(struct thread_data *td, struct fio_file *f)
343{
344 int ret = 0;
345
346 if (fio_file_size_known(f))
347 return 0;
348
349 if (f->filetype == FIO_TYPE_FILE)
350 ret = file_size(td, f);
351 else if (f->filetype == FIO_TYPE_BD)
352 ret = bdev_size(td, f);
353 else if (f->filetype == FIO_TYPE_CHAR)
354 ret = char_size(td, f);
355 else
356 f->real_file_size = -1;
357
358 if (ret)
359 return ret;
360
361 if (f->file_offset > f->real_file_size) {
362 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
363 (unsigned long long) f->file_offset,
364 (unsigned long long) f->real_file_size);
365 return 1;
366 }
367
368 fio_file_set_size_known(f);
369 return 0;
370}
371
372static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
373 unsigned long long off,
374 unsigned long long len)
375{
376 int ret = 0;
377
378 if (len == -1ULL)
379 len = f->io_size;
380 if (off == -1ULL)
381 off = f->file_offset;
382
383 if (len == -1ULL || off == -1ULL)
384 return 0;
385
386 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
387 len);
388
389 /*
390 * FIXME: add blockdev flushing too
391 */
392 if (f->mmap_ptr) {
393 ret = posix_madvise(f->mmap_ptr, f->mmap_sz, POSIX_MADV_DONTNEED);
394#ifdef FIO_MADV_FREE
395 if (f->filetype == FIO_TYPE_BD)
396 (void) posix_madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE);
397#endif
398 } else if (f->filetype == FIO_TYPE_FILE) {
399 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
400 } else if (f->filetype == FIO_TYPE_BD) {
401 ret = blockdev_invalidate_cache(f);
402 if (ret < 0 && errno == EACCES && geteuid()) {
403 if (!root_warn) {
404 log_err("fio: only root may flush block "
405 "devices. Cache flush bypassed!\n");
406 root_warn = 1;
407 }
408 ret = 0;
409 }
410 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
411 ret = 0;
412
413 if (ret < 0) {
414 td_verror(td, errno, "invalidate_cache");
415 return 1;
416 } else if (ret > 0) {
417 td_verror(td, ret, "invalidate_cache");
418 return 1;
419 }
420
421 return ret;
422
423}
424
425int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
426{
427 if (!fio_file_open(f))
428 return 0;
429
430 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
431}
432
433int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
434{
435 int ret = 0;
436
437 dprint(FD_FILE, "fd close %s\n", f->file_name);
438
439 remove_file_hash(f);
440
441 if (close(f->fd) < 0)
442 ret = errno;
443
444 f->fd = -1;
445
446 if (f->shadow_fd != -1) {
447 close(f->shadow_fd);
448 f->shadow_fd = -1;
449 }
450
451 f->engine_data = 0;
452 return ret;
453}
454
455int file_lookup_open(struct fio_file *f, int flags)
456{
457 struct fio_file *__f;
458 int from_hash;
459
460 __f = lookup_file_hash(f->file_name);
461 if (__f) {
462 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
463 /*
464 * racy, need the __f->lock locked
465 */
466 f->lock = __f->lock;
467 from_hash = 1;
468 } else {
469 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
470 from_hash = 0;
471 }
472
473 f->fd = open(f->file_name, flags, 0600);
474 return from_hash;
475}
476
477static int file_close_shadow_fds(struct thread_data *td)
478{
479 struct fio_file *f;
480 int num_closed = 0;
481 unsigned int i;
482
483 for_each_file(td, f, i) {
484 if (f->shadow_fd == -1)
485 continue;
486
487 close(f->shadow_fd);
488 f->shadow_fd = -1;
489 num_closed++;
490 }
491
492 return num_closed;
493}
494
495int generic_open_file(struct thread_data *td, struct fio_file *f)
496{
497 int is_std = 0;
498 int flags = 0;
499 int from_hash = 0;
500
501 dprint(FD_FILE, "fd open %s\n", f->file_name);
502
503 if (td_trim(td) && f->filetype != FIO_TYPE_BD) {
504 log_err("fio: trim only applies to block device\n");
505 return 1;
506 }
507
508 if (!strcmp(f->file_name, "-")) {
509 if (td_rw(td)) {
510 log_err("fio: can't read/write to stdin/out\n");
511 return 1;
512 }
513 is_std = 1;
514
515 /*
516 * move output logging to stderr, if we are writing to stdout
517 */
518 if (td_write(td))
519 f_out = stderr;
520 }
521
522 if (td_trim(td))
523 goto skip_flags;
524 if (td->o.odirect)
525 flags |= OS_O_DIRECT;
526 if (td->o.oatomic) {
527 if (!FIO_O_ATOMIC) {
528 td_verror(td, EINVAL, "OS does not support atomic IO");
529 return 1;
530 }
531 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
532 }
533 if (td->o.sync_io)
534 flags |= O_SYNC;
535 if (td->o.create_on_open)
536 flags |= O_CREAT;
537skip_flags:
538 if (f->filetype != FIO_TYPE_FILE)
539 flags |= FIO_O_NOATIME;
540
541open_again:
542 if (td_write(td)) {
543 if (!read_only)
544 flags |= O_RDWR;
545
546 if (f->filetype == FIO_TYPE_FILE)
547 flags |= O_CREAT;
548
549 if (is_std)
550 f->fd = dup(STDOUT_FILENO);
551 else
552 from_hash = file_lookup_open(f, flags);
553 } else if (td_read(td)) {
554 if (f->filetype == FIO_TYPE_CHAR && !read_only)
555 flags |= O_RDWR;
556 else
557 flags |= O_RDONLY;
558
559 if (is_std)
560 f->fd = dup(STDIN_FILENO);
561 else
562 from_hash = file_lookup_open(f, flags);
563 } else { //td trim
564 flags |= O_RDWR;
565 from_hash = file_lookup_open(f, flags);
566 }
567
568 if (f->fd == -1) {
569 char buf[FIO_VERROR_SIZE];
570 int __e = errno;
571
572 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
573 flags &= ~FIO_O_NOATIME;
574 goto open_again;
575 }
576 if (__e == EMFILE && file_close_shadow_fds(td))
577 goto open_again;
578
579 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
580
581 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
582 log_err("fio: looks like your file system does not " \
583 "support direct=1/buffered=0\n");
584 }
585
586 td_verror(td, __e, buf);
587 }
588
589 if (!from_hash && f->fd != -1) {
590 if (add_file_hash(f)) {
591 int fio_unused ret;
592
593 /*
594 * Stash away descriptor for later close. This is to
595 * work-around a "feature" on Linux, where a close of
596 * an fd that has been opened for write will trigger
597 * udev to call blkid to check partitions, fs id, etc.
598 * That pollutes the device cache, which can slow down
599 * unbuffered accesses.
600 */
601 if (f->shadow_fd == -1)
602 f->shadow_fd = f->fd;
603 else {
604 /*
605 * OK to ignore, we haven't done anything
606 * with it
607 */
608 ret = generic_close_file(td, f);
609 }
610 goto open_again;
611 }
612 }
613
614 return 0;
615}
616
617int generic_get_file_size(struct thread_data *td, struct fio_file *f)
618{
619 return get_file_size(td, f);
620}
621
622/*
623 * open/close all files, so that ->real_file_size gets set
624 */
625static int get_file_sizes(struct thread_data *td)
626{
627 struct fio_file *f;
628 unsigned int i;
629 int err = 0;
630
631 for_each_file(td, f, i) {
632 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
633 f->file_name);
634
635 if (td_io_get_file_size(td, f)) {
636 if (td->error != ENOENT) {
637 log_err("%s\n", td->verror);
638 err = 1;
639 }
640 clear_error(td);
641 }
642
643 if (f->real_file_size == -1ULL && td->o.size)
644 f->real_file_size = td->o.size / td->o.nr_files;
645 }
646
647 return err;
648}
649
650struct fio_mount {
651 struct flist_head list;
652 const char *base;
653 char __base[256];
654 unsigned int key;
655};
656
657/*
658 * Get free number of bytes for each file on each unique mount.
659 */
660static unsigned long long get_fs_free_counts(struct thread_data *td)
661{
662 struct flist_head *n, *tmp;
663 unsigned long long ret = 0;
664 struct fio_mount *fm;
665 FLIST_HEAD(list);
666 struct fio_file *f;
667 unsigned int i;
668
669 for_each_file(td, f, i) {
670 struct stat sb;
671 char buf[256];
672
673 if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
674 if (f->real_file_size != -1ULL)
675 ret += f->real_file_size;
676 continue;
677 } else if (f->filetype != FIO_TYPE_FILE)
678 continue;
679
680 strcpy(buf, f->file_name);
681
682 if (stat(buf, &sb) < 0) {
683 if (errno != ENOENT)
684 break;
685 strcpy(buf, ".");
686 if (stat(buf, &sb) < 0)
687 break;
688 }
689
690 fm = NULL;
691 flist_for_each(n, &list) {
692 fm = flist_entry(n, struct fio_mount, list);
693 if (fm->key == sb.st_dev)
694 break;
695
696 fm = NULL;
697 }
698
699 if (fm)
700 continue;
701
702 fm = malloc(sizeof(*fm));
703 strcpy(fm->__base, buf);
704 fm->base = basename(fm->__base);
705 fm->key = sb.st_dev;
706 flist_add(&fm->list, &list);
707 }
708
709 flist_for_each_safe(n, tmp, &list) {
710 unsigned long long sz;
711
712 fm = flist_entry(n, struct fio_mount, list);
713 flist_del(&fm->list);
714
715 sz = get_fs_size(fm->base);
716 if (sz && sz != -1ULL)
717 ret += sz;
718
719 free(fm);
720 }
721
722 return ret;
723}
724
725uint64_t get_start_offset(struct thread_data *td)
726{
727 return td->o.start_offset +
728 (td->thread_number - 1) * td->o.offset_increment;
729}
730
731/*
732 * Open the files and setup files sizes, creating files if necessary.
733 */
734int setup_files(struct thread_data *td)
735{
736 unsigned long long total_size, extend_size;
737 struct thread_options *o = &td->o;
738 struct fio_file *f;
739 unsigned int i, nr_fs_extra = 0;
740 int err = 0, need_extend;
741 int old_state;
742 const unsigned int bs = td_min_bs(td);
743 uint64_t fs = 0;
744
745 dprint(FD_FILE, "setup files\n");
746
747 old_state = td_bump_runstate(td, TD_SETTING_UP);
748
749 if (o->read_iolog_file)
750 goto done;
751
752 /*
753 * if ioengine defines a setup() method, it's responsible for
754 * opening the files and setting f->real_file_size to indicate
755 * the valid range for that file.
756 */
757 if (td->io_ops->setup)
758 err = td->io_ops->setup(td);
759 else
760 err = get_file_sizes(td);
761
762 if (err)
763 goto err_out;
764
765 /*
766 * check sizes. if the files/devices do not exist and the size
767 * isn't passed to fio, abort.
768 */
769 total_size = 0;
770 for_each_file(td, f, i) {
771 if (f->real_file_size == -1ULL)
772 total_size = -1ULL;
773 else
774 total_size += f->real_file_size;
775 }
776
777 if (o->fill_device)
778 td->fill_device_size = get_fs_free_counts(td);
779
780 /*
781 * device/file sizes are zero and no size given, punt
782 */
783 if ((!total_size || total_size == -1ULL) && !o->size &&
784 !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
785 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
786 log_err("%s: you need to specify size=\n", o->name);
787 td_verror(td, EINVAL, "total_file_size");
788 goto err_out;
789 }
790
791 /*
792 * Calculate per-file size and potential extra size for the
793 * first files, if needed.
794 */
795 if (!o->file_size_low) {
796 uint64_t all_fs;
797
798 fs = o->size / o->nr_files;
799 all_fs = fs * o->nr_files;
800
801 if (all_fs < o->size)
802 nr_fs_extra = (o->size - all_fs) / bs;
803 }
804
805 /*
806 * now file sizes are known, so we can set ->io_size. if size= is
807 * not given, ->io_size is just equal to ->real_file_size. if size
808 * is given, ->io_size is size / nr_files.
809 */
810 extend_size = total_size = 0;
811 need_extend = 0;
812 for_each_file(td, f, i) {
813 f->file_offset = get_start_offset(td);
814
815 if (!o->file_size_low) {
816 /*
817 * no file size range given, file size is equal to
818 * total size divided by number of files. If that is
819 * zero, set it to the real file size. If the size
820 * doesn't divide nicely with the min blocksize,
821 * make the first files bigger.
822 */
823 f->io_size = fs;
824 if (nr_fs_extra) {
825 nr_fs_extra--;
826 f->io_size += bs;
827 }
828
829 if (!f->io_size)
830 f->io_size = f->real_file_size - f->file_offset;
831 } else if (f->real_file_size < o->file_size_low ||
832 f->real_file_size > o->file_size_high) {
833 if (f->file_offset > o->file_size_low)
834 goto err_offset;
835 /*
836 * file size given. if it's fixed, use that. if it's a
837 * range, generate a random size in-between.
838 */
839 if (o->file_size_low == o->file_size_high)
840 f->io_size = o->file_size_low - f->file_offset;
841 else {
842 f->io_size = get_rand_file_size(td)
843 - f->file_offset;
844 }
845 } else
846 f->io_size = f->real_file_size - f->file_offset;
847
848 if (f->io_size == -1ULL)
849 total_size = -1ULL;
850 else {
851 if (o->size_percent)
852 f->io_size = (f->io_size * o->size_percent) / 100;
853 total_size += f->io_size;
854 }
855
856 if (f->filetype == FIO_TYPE_FILE &&
857 (f->io_size + f->file_offset) > f->real_file_size &&
858 !(td->io_ops->flags & FIO_DISKLESSIO)) {
859 if (!o->create_on_open) {
860 need_extend++;
861 extend_size += (f->io_size + f->file_offset);
862 } else
863 f->real_file_size = f->io_size + f->file_offset;
864 fio_file_set_extend(f);
865 }
866 }
867
868 if (!o->size || o->size > total_size)
869 o->size = total_size;
870
871 /*
872 * See if we need to extend some files
873 */
874 if (need_extend) {
875 temp_stall_ts = 1;
876 if (output_format == FIO_OUTPUT_NORMAL)
877 log_info("%s: Laying out IO file(s) (%u file(s) /"
878 " %lluMB)\n", o->name, need_extend,
879 extend_size >> 20);
880
881 for_each_file(td, f, i) {
882 unsigned long long old_len = -1ULL, extend_len = -1ULL;
883
884 if (!fio_file_extend(f))
885 continue;
886
887 assert(f->filetype == FIO_TYPE_FILE);
888 fio_file_clear_extend(f);
889 if (!o->fill_device) {
890 old_len = f->real_file_size;
891 extend_len = f->io_size + f->file_offset -
892 old_len;
893 }
894 f->real_file_size = (f->io_size + f->file_offset);
895 err = extend_file(td, f);
896 if (err)
897 break;
898
899 err = __file_invalidate_cache(td, f, old_len,
900 extend_len);
901 close(f->fd);
902 f->fd = -1;
903 if (err)
904 break;
905 }
906 temp_stall_ts = 0;
907 }
908
909 if (err)
910 goto err_out;
911
912 if (!o->zone_size)
913 o->zone_size = o->size;
914
915 /*
916 * iolog already set the total io size, if we read back
917 * stored entries.
918 */
919 if (!o->read_iolog_file)
920 td->total_io_size = o->size * o->loops;
921
922done:
923 if (o->create_only)
924 td->done = 1;
925
926 td_restore_runstate(td, old_state);
927 return 0;
928err_offset:
929 log_err("%s: you need to specify valid offset=\n", o->name);
930err_out:
931 td_restore_runstate(td, old_state);
932 return 1;
933}
934
935int pre_read_files(struct thread_data *td)
936{
937 struct fio_file *f;
938 unsigned int i;
939
940 dprint(FD_FILE, "pre_read files\n");
941
942 for_each_file(td, f, i) {
943 pre_read_file(td, f);
944 }
945
946 return 1;
947}
948
949static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
950{
951 unsigned int range_size, seed;
952 unsigned long nranges;
953 uint64_t file_size;
954
955 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
956 file_size = min(f->real_file_size, f->io_size);
957
958 nranges = (file_size + range_size - 1) / range_size;
959
960 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
961 if (!td->o.rand_repeatable)
962 seed = td->rand_seeds[4];
963
964 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
965 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
966 else
967 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
968
969 return 1;
970}
971
972static int init_rand_distribution(struct thread_data *td)
973{
974 struct fio_file *f;
975 unsigned int i;
976 int state;
977
978 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
979 return 0;
980
981 state = td_bump_runstate(td, TD_SETTING_UP);
982
983 for_each_file(td, f, i)
984 __init_rand_distribution(td, f);
985
986 td_restore_runstate(td, state);
987
988 return 1;
989}
990
991int init_random_map(struct thread_data *td)
992{
993 unsigned long long blocks;
994 struct fio_file *f;
995 unsigned int i;
996
997 if (init_rand_distribution(td))
998 return 0;
999 if (!td_random(td))
1000 return 0;
1001
1002 for_each_file(td, f, i) {
1003 uint64_t file_size = min(f->real_file_size, f->io_size);
1004
1005 blocks = file_size / (unsigned long long) td->o.rw_min_bs;
1006
1007 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1008 unsigned long seed;
1009
1010 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1011
1012 if (!lfsr_init(&f->lfsr, blocks, seed, 0))
1013 continue;
1014 } else if (!td->o.norandommap) {
1015 f->io_axmap = axmap_new(blocks);
1016 if (f->io_axmap)
1017 continue;
1018 } else if (td->o.norandommap)
1019 continue;
1020
1021 if (!td->o.softrandommap) {
1022 log_err("fio: failed allocating random map. If running"
1023 " a large number of jobs, try the 'norandommap'"
1024 " option or set 'softrandommap'. Or give"
1025 " a larger --alloc-size to fio.\n");
1026 return 1;
1027 }
1028
1029 log_info("fio: file %s failed allocating random map. Running "
1030 "job without.\n", f->file_name);
1031 }
1032
1033 return 0;
1034}
1035
1036void close_files(struct thread_data *td)
1037{
1038 struct fio_file *f;
1039 unsigned int i;
1040
1041 for_each_file(td, f, i) {
1042 if (fio_file_open(f))
1043 td_io_close_file(td, f);
1044 }
1045}
1046
1047void close_and_free_files(struct thread_data *td)
1048{
1049 struct fio_file *f;
1050 unsigned int i;
1051
1052 dprint(FD_FILE, "close files\n");
1053
1054 for_each_file(td, f, i) {
1055 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1056 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1057 unlink(f->file_name);
1058 }
1059
1060 if (fio_file_open(f))
1061 td_io_close_file(td, f);
1062
1063 remove_file_hash(f);
1064
1065 sfree(f->file_name);
1066 f->file_name = NULL;
1067 axmap_free(f->io_axmap);
1068 f->io_axmap = NULL;
1069 sfree(f);
1070 }
1071
1072 td->o.filename = NULL;
1073 free(td->files);
1074 free(td->file_locks);
1075 td->files_index = 0;
1076 td->files = NULL;
1077 td->file_locks = NULL;
1078 td->o.file_lock_mode = FILE_LOCK_NONE;
1079 td->o.nr_files = 0;
1080}
1081
1082static void get_file_type(struct fio_file *f)
1083{
1084 struct stat sb;
1085
1086 if (!strcmp(f->file_name, "-"))
1087 f->filetype = FIO_TYPE_PIPE;
1088 else
1089 f->filetype = FIO_TYPE_FILE;
1090
1091 /* \\.\ is the device namespace in Windows, where every file is
1092 * a block device */
1093 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1094 f->filetype = FIO_TYPE_BD;
1095
1096 if (!stat(f->file_name, &sb)) {
1097 if (S_ISBLK(sb.st_mode))
1098 f->filetype = FIO_TYPE_BD;
1099 else if (S_ISCHR(sb.st_mode))
1100 f->filetype = FIO_TYPE_CHAR;
1101 else if (S_ISFIFO(sb.st_mode))
1102 f->filetype = FIO_TYPE_PIPE;
1103 }
1104}
1105
1106static void set_already_allocated(const char *fname) {
1107 struct file_name *fn;
1108
1109 fn = malloc(sizeof(struct file_name));
1110 fn->filename = strdup(fname);
1111 flist_add_tail(&fn->list, &filename_list);
1112}
1113
1114static int is_already_allocated(const char *fname)
1115{
1116 struct flist_head *entry;
1117 char *filename;
1118
1119 if (!flist_empty(&filename_list))
1120 {
1121 flist_for_each(entry, &filename_list) {
1122 filename = flist_entry(entry, struct file_name, list)->filename;
1123
1124 if (strcmp(filename, fname) == 0)
1125 return 1;
1126 }
1127 }
1128
1129 return 0;
1130}
1131
1132static void free_already_allocated() {
1133 struct flist_head *entry, *tmp;
1134 struct file_name *fn;
1135
1136 if (!flist_empty(&filename_list))
1137 {
1138 flist_for_each_safe(entry, tmp, &filename_list) {
1139 fn = flist_entry(entry, struct file_name, list);
1140 free(fn->filename);
1141 flist_del(&fn->list);
1142 free(fn);
1143 }
1144 }
1145}
1146
1147int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1148{
1149 int cur_files = td->files_index;
1150 char file_name[PATH_MAX];
1151 struct fio_file *f;
1152 int len = 0;
1153
1154 dprint(FD_FILE, "add file %s\n", fname);
1155
1156 if (td->o.directory)
1157 len = set_name_idx(file_name, td->o.directory, numjob);
1158
1159 sprintf(file_name + len, "%s", fname);
1160
1161 /* clean cloned siblings using existing files */
1162 if (numjob && is_already_allocated(file_name))
1163 return 0;
1164
1165 f = smalloc(sizeof(*f));
1166 if (!f) {
1167 log_err("fio: smalloc OOM\n");
1168 assert(0);
1169 }
1170
1171 f->fd = -1;
1172 f->shadow_fd = -1;
1173 fio_file_reset(td, f);
1174
1175 if (td->files_size <= td->files_index) {
1176 unsigned int new_size = td->o.nr_files + 1;
1177
1178 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1179
1180 td->files = realloc(td->files, new_size * sizeof(f));
1181 if (td->files == NULL) {
1182 log_err("fio: realloc OOM\n");
1183 assert(0);
1184 }
1185 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1186 td->file_locks = realloc(td->file_locks, new_size);
1187 if (!td->file_locks) {
1188 log_err("fio: realloc OOM\n");
1189 assert(0);
1190 }
1191 td->file_locks[cur_files] = FILE_LOCK_NONE;
1192 }
1193 td->files_size = new_size;
1194 }
1195 td->files[cur_files] = f;
1196 f->fileno = cur_files;
1197
1198 /*
1199 * init function, io engine may not be loaded yet
1200 */
1201 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1202 f->real_file_size = -1ULL;
1203
1204 f->file_name = smalloc_strdup(file_name);
1205 if (!f->file_name) {
1206 log_err("fio: smalloc OOM\n");
1207 assert(0);
1208 }
1209
1210 get_file_type(f);
1211
1212 switch (td->o.file_lock_mode) {
1213 case FILE_LOCK_NONE:
1214 break;
1215 case FILE_LOCK_READWRITE:
1216 f->rwlock = fio_rwlock_init();
1217 break;
1218 case FILE_LOCK_EXCLUSIVE:
1219 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1220 break;
1221 default:
1222 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1223 assert(0);
1224 }
1225
1226 td->files_index++;
1227 if (f->filetype == FIO_TYPE_FILE)
1228 td->nr_normal_files++;
1229
1230 set_already_allocated(file_name);
1231
1232 /*
1233 * For adding files after the fact - if openfiles= isn't
1234 * given as an option, ensure we allow at least one file open
1235 */
1236 if (!td->o.open_files)
1237 td->o.open_files = 1;
1238
1239 if (inc)
1240 td->o.nr_files++;
1241
1242 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1243 cur_files);
1244
1245 return cur_files;
1246}
1247
1248int add_file_exclusive(struct thread_data *td, const char *fname)
1249{
1250 struct fio_file *f;
1251 unsigned int i;
1252
1253 for_each_file(td, f, i) {
1254 if (!strcmp(f->file_name, fname))
1255 return i;
1256 }
1257
1258 return add_file(td, fname, 0, 1);
1259}
1260
1261void get_file(struct fio_file *f)
1262{
1263 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1264 assert(fio_file_open(f));
1265 f->references++;
1266}
1267
1268int put_file(struct thread_data *td, struct fio_file *f)
1269{
1270 int f_ret = 0, ret = 0;
1271
1272 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1273
1274 if (!fio_file_open(f)) {
1275 assert(f->fd == -1);
1276 return 0;
1277 }
1278
1279 assert(f->references);
1280 if (--f->references)
1281 return 0;
1282
1283 if (should_fsync(td) && td->o.fsync_on_close)
1284 f_ret = fsync(f->fd);
1285
1286 if (td->io_ops->close_file)
1287 ret = td->io_ops->close_file(td, f);
1288
1289 if (!ret)
1290 ret = f_ret;
1291
1292 td->nr_open_files--;
1293 fio_file_clear_open(f);
1294 assert(f->fd == -1);
1295 return ret;
1296}
1297
1298void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1299{
1300 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1301 return;
1302
1303 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1304 if (ddir == DDIR_READ)
1305 fio_rwlock_read(f->rwlock);
1306 else
1307 fio_rwlock_write(f->rwlock);
1308 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1309 fio_mutex_down(f->lock);
1310
1311 td->file_locks[f->fileno] = td->o.file_lock_mode;
1312}
1313
1314void unlock_file(struct thread_data *td, struct fio_file *f)
1315{
1316 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1317 return;
1318
1319 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1320 fio_rwlock_unlock(f->rwlock);
1321 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1322 fio_mutex_up(f->lock);
1323
1324 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1325}
1326
1327void unlock_file_all(struct thread_data *td, struct fio_file *f)
1328{
1329 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1330 return;
1331 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1332 unlock_file(td, f);
1333}
1334
1335static int recurse_dir(struct thread_data *td, const char *dirname)
1336{
1337 struct dirent *dir;
1338 int ret = 0;
1339 DIR *D;
1340
1341 D = opendir(dirname);
1342 if (!D) {
1343 char buf[FIO_VERROR_SIZE];
1344
1345 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1346 td_verror(td, errno, buf);
1347 return 1;
1348 }
1349
1350 while ((dir = readdir(D)) != NULL) {
1351 char full_path[PATH_MAX];
1352 struct stat sb;
1353
1354 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1355 continue;
1356
1357 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1358
1359 if (lstat(full_path, &sb) == -1) {
1360 if (errno != ENOENT) {
1361 td_verror(td, errno, "stat");
1362 return 1;
1363 }
1364 }
1365
1366 if (S_ISREG(sb.st_mode)) {
1367 add_file(td, full_path, 0, 1);
1368 continue;
1369 }
1370 if (!S_ISDIR(sb.st_mode))
1371 continue;
1372
1373 ret = recurse_dir(td, full_path);
1374 if (ret)
1375 break;
1376 }
1377
1378 closedir(D);
1379 return ret;
1380}
1381
1382int add_dir_files(struct thread_data *td, const char *path)
1383{
1384 int ret = recurse_dir(td, path);
1385
1386 if (!ret)
1387 log_info("fio: opendir added %d files\n", td->o.nr_files);
1388
1389 return ret;
1390}
1391
1392void dup_files(struct thread_data *td, struct thread_data *org)
1393{
1394 struct fio_file *f;
1395 unsigned int i;
1396
1397 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1398
1399 if (!org->files)
1400 return;
1401
1402 td->files = malloc(org->files_index * sizeof(f));
1403
1404 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1405 td->file_locks = malloc(org->files_index);
1406
1407 for_each_file(org, f, i) {
1408 struct fio_file *__f;
1409
1410 __f = smalloc(sizeof(*__f));
1411 if (!__f) {
1412 log_err("fio: smalloc OOM\n");
1413 assert(0);
1414 }
1415 __f->fd = -1;
1416 fio_file_reset(td, __f);
1417
1418 if (f->file_name) {
1419 __f->file_name = smalloc_strdup(f->file_name);
1420 if (!__f->file_name) {
1421 log_err("fio: smalloc OOM\n");
1422 assert(0);
1423 }
1424
1425 __f->filetype = f->filetype;
1426 }
1427
1428 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1429 __f->lock = f->lock;
1430 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1431 __f->rwlock = f->rwlock;
1432
1433 td->files[i] = __f;
1434 }
1435}
1436
1437/*
1438 * Returns the index that matches the filename, or -1 if not there
1439 */
1440int get_fileno(struct thread_data *td, const char *fname)
1441{
1442 struct fio_file *f;
1443 unsigned int i;
1444
1445 for_each_file(td, f, i)
1446 if (!strcmp(f->file_name, fname))
1447 return i;
1448
1449 return -1;
1450}
1451
1452/*
1453 * For log usage, where we add/open/close files automatically
1454 */
1455void free_release_files(struct thread_data *td)
1456{
1457 close_files(td);
1458 td->files_index = 0;
1459 td->nr_normal_files = 0;
1460}
1461
1462void fio_file_reset(struct thread_data *td, struct fio_file *f)
1463{
1464 f->last_pos = f->file_offset;
1465 f->last_start = -1ULL;
1466 if (f->io_axmap)
1467 axmap_reset(f->io_axmap);
1468 if (td->o.random_generator == FIO_RAND_GEN_LFSR)
1469 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1470}
1471
1472int fio_files_done(struct thread_data *td)
1473{
1474 struct fio_file *f;
1475 unsigned int i;
1476
1477 for_each_file(td, f, i)
1478 if (!fio_file_done(f))
1479 return 0;
1480
1481 return 1;
1482}
1483
1484/* free memory used in initialization phase only */
1485void filesetup_mem_free() {
1486 free_already_allocated();
1487}