arch,lib/seqlock: implement seqlock with C++ atomic if compiled with C++
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8
9#include "fio.h"
10#include "smalloc.h"
11#include "filehash.h"
12#include "options.h"
13#include "os/os.h"
14#include "hash.h"
15#include "lib/axmap.h"
16#include "rwlock.h"
17#include "zbd.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static FLIST_HEAD(filename_list);
24
25/*
26 * List entry for filename_list
27 */
28struct file_name {
29 struct flist_head list;
30 char *filename;
31};
32
33static inline void clear_error(struct thread_data *td)
34{
35 td->error = 0;
36 td->verror[0] = '\0';
37}
38
39static int native_fallocate(struct thread_data *td, struct fio_file *f)
40{
41 bool success;
42
43 success = fio_fallocate(f, 0, f->real_file_size);
44 dprint(FD_FILE, "native fallocate of file %s size %llu was "
45 "%ssuccessful\n", f->file_name,
46 (unsigned long long) f->real_file_size,
47 !success ? "un": "");
48
49 if (success)
50 return false;
51
52 if (errno == ENOSYS)
53 dprint(FD_FILE, "native fallocate is not implemented\n");
54
55 return true;
56}
57
58static void fallocate_file(struct thread_data *td, struct fio_file *f)
59{
60 if (td->o.fill_device)
61 return;
62
63 switch (td->o.fallocate_mode) {
64 case FIO_FALLOCATE_NATIVE:
65 native_fallocate(td, f);
66 break;
67 case FIO_FALLOCATE_NONE:
68 break;
69#ifdef CONFIG_POSIX_FALLOCATE
70 case FIO_FALLOCATE_POSIX: {
71 int r;
72
73 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
74 f->file_name,
75 (unsigned long long) f->real_file_size);
76
77 r = posix_fallocate(f->fd, 0, f->real_file_size);
78 if (r > 0)
79 log_err("fio: posix_fallocate fails: %s\n", strerror(r));
80 break;
81 }
82#endif /* CONFIG_POSIX_FALLOCATE */
83#ifdef CONFIG_LINUX_FALLOCATE
84 case FIO_FALLOCATE_KEEP_SIZE: {
85 int r;
86
87 dprint(FD_FILE, "fallocate(FALLOC_FL_KEEP_SIZE) "
88 "file %s size %llu\n", f->file_name,
89 (unsigned long long) f->real_file_size);
90
91 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0, f->real_file_size);
92 if (r != 0)
93 td_verror(td, errno, "fallocate");
94
95 break;
96 }
97#endif /* CONFIG_LINUX_FALLOCATE */
98 case FIO_FALLOCATE_TRUNCATE: {
99 int r;
100
101 dprint(FD_FILE, "ftruncate file %s size %llu\n",
102 f->file_name,
103 (unsigned long long) f->real_file_size);
104 r = ftruncate(f->fd, f->real_file_size);
105 if (r != 0)
106 td_verror(td, errno, "ftruncate");
107
108 break;
109 }
110 default:
111 log_err("fio: unknown fallocate mode: %d\n", td->o.fallocate_mode);
112 assert(0);
113 }
114}
115
116/*
117 * Leaves f->fd open on success, caller must close
118 */
119static int extend_file(struct thread_data *td, struct fio_file *f)
120{
121 int new_layout = 0, unlink_file = 0, flags;
122 unsigned long long left;
123 unsigned long long bs;
124 char *b = NULL;
125
126 if (read_only) {
127 log_err("fio: refusing extend of file due to read-only\n");
128 return 0;
129 }
130
131 /*
132 * check if we need to lay the file out complete again. fio
133 * does that for operations involving reads, or for writes
134 * where overwrite is set
135 */
136 if (td_read(td) ||
137 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
138 (td_write(td) && td_ioengine_flagged(td, FIO_NOEXTEND)))
139 new_layout = 1;
140 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
141 unlink_file = 1;
142
143 if (unlink_file || new_layout) {
144 int ret;
145
146 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
147
148 ret = td_io_unlink_file(td, f);
149 if (ret != 0 && ret != ENOENT) {
150 td_verror(td, errno, "unlink");
151 return 1;
152 }
153 }
154
155 flags = O_WRONLY;
156 if (td->o.allow_create)
157 flags |= O_CREAT;
158 if (new_layout)
159 flags |= O_TRUNC;
160
161#ifdef WIN32
162 flags |= _O_BINARY;
163#endif
164
165 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
166 f->fd = open(f->file_name, flags, 0644);
167 if (f->fd < 0) {
168 int err = errno;
169
170 if (err == ENOENT && !td->o.allow_create)
171 log_err("fio: file creation disallowed by "
172 "allow_file_create=0\n");
173 else
174 td_verror(td, err, "open");
175 return 1;
176 }
177
178 fallocate_file(td, f);
179
180 /*
181 * If our jobs don't require regular files initially, we're done.
182 */
183 if (!new_layout)
184 goto done;
185
186 /*
187 * The size will be -1ULL when fill_device is used, so don't truncate
188 * or fallocate this file, just write it
189 */
190 if (!td->o.fill_device) {
191 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
192 (unsigned long long) f->real_file_size);
193 if (ftruncate(f->fd, f->real_file_size) == -1) {
194 if (errno != EFBIG) {
195 td_verror(td, errno, "ftruncate");
196 goto err;
197 }
198 }
199 }
200
201 left = f->real_file_size;
202 bs = td->o.max_bs[DDIR_WRITE];
203 if (bs > left)
204 bs = left;
205
206 b = malloc(bs);
207 if (!b) {
208 td_verror(td, errno, "malloc");
209 goto err;
210 }
211
212 while (left && !td->terminate) {
213 ssize_t r;
214
215 if (bs > left)
216 bs = left;
217
218 fill_io_buffer(td, b, bs, bs);
219
220 r = write(f->fd, b, bs);
221
222 if (r > 0) {
223 left -= r;
224 continue;
225 } else {
226 if (r < 0) {
227 int __e = errno;
228
229 if (__e == ENOSPC || __e == EDQUOT) {
230 const char *__e_name;
231 if (td->o.fill_device)
232 break;
233 if (__e == ENOSPC)
234 __e_name = "ENOSPC";
235 else
236 __e_name = "EDQUOT";
237 log_info("fio: %s on laying out "
238 "file, stopping\n", __e_name);
239 }
240 td_verror(td, errno, "write");
241 } else
242 td_verror(td, EIO, "write");
243
244 goto err;
245 }
246 }
247
248 if (td->terminate) {
249 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
250 td_io_unlink_file(td, f);
251 } else if (td->o.create_fsync) {
252 if (fsync(f->fd) < 0) {
253 td_verror(td, errno, "fsync");
254 goto err;
255 }
256 }
257 if (td->o.fill_device && !td_write(td)) {
258 fio_file_clear_size_known(f);
259 if (td_io_get_file_size(td, f))
260 goto err;
261 if (f->io_size > f->real_file_size)
262 f->io_size = f->real_file_size;
263 }
264
265 free(b);
266done:
267 return 0;
268err:
269 close(f->fd);
270 f->fd = -1;
271 if (b)
272 free(b);
273 return 1;
274}
275
276static bool pre_read_file(struct thread_data *td, struct fio_file *f)
277{
278 int r, did_open = 0, old_runstate;
279 unsigned long long left;
280 unsigned long long bs;
281 bool ret = true;
282 char *b;
283
284 if (td_ioengine_flagged(td, FIO_PIPEIO) ||
285 td_ioengine_flagged(td, FIO_NOIO))
286 return true;
287
288 if (f->filetype == FIO_TYPE_CHAR)
289 return true;
290
291 if (!fio_file_open(f)) {
292 if (td->io_ops->open_file(td, f)) {
293 log_err("fio: cannot pre-read, failed to open file\n");
294 return false;
295 }
296 did_open = 1;
297 }
298
299 old_runstate = td_bump_runstate(td, TD_PRE_READING);
300
301 left = f->io_size;
302 bs = td->o.max_bs[DDIR_READ];
303 if (bs > left)
304 bs = left;
305
306 b = malloc(bs);
307 if (!b) {
308 td_verror(td, errno, "malloc");
309 ret = false;
310 goto error;
311 }
312 memset(b, 0, bs);
313
314 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
315 td_verror(td, errno, "lseek");
316 log_err("fio: failed to lseek pre-read file\n");
317 ret = false;
318 goto error;
319 }
320
321 while (left && !td->terminate) {
322 if (bs > left)
323 bs = left;
324
325 r = read(f->fd, b, bs);
326
327 if (r == (int) bs) {
328 left -= bs;
329 continue;
330 } else {
331 td_verror(td, EIO, "pre_read");
332 break;
333 }
334 }
335
336error:
337 td_restore_runstate(td, old_runstate);
338
339 if (did_open)
340 td->io_ops->close_file(td, f);
341
342 free(b);
343 return ret;
344}
345
346/*
347 * Generic function to prepopulate regular file with data.
348 * Useful if you want to make sure I/O engine has data to read.
349 * Leaves f->fd open on success, caller must close.
350 */
351int generic_prepopulate_file(struct thread_data *td, struct fio_file *f)
352{
353 int flags;
354 unsigned long long left, bs;
355 char *b = NULL;
356
357 /* generic function for regular files only */
358 assert(f->filetype == FIO_TYPE_FILE);
359
360 if (read_only) {
361 log_err("fio: refusing to write a file due to read-only\n");
362 return 0;
363 }
364
365 flags = O_WRONLY;
366 if (td->o.allow_create)
367 flags |= O_CREAT;
368
369#ifdef WIN32
370 flags |= _O_BINARY;
371#endif
372
373 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
374 f->fd = open(f->file_name, flags, 0644);
375 if (f->fd < 0) {
376 int err = errno;
377
378 if (err == ENOENT && !td->o.allow_create)
379 log_err("fio: file creation disallowed by "
380 "allow_file_create=0\n");
381 else
382 td_verror(td, err, "open");
383 return 1;
384 }
385
386 left = f->real_file_size;
387 bs = td->o.max_bs[DDIR_WRITE];
388 if (bs > left)
389 bs = left;
390
391 b = malloc(bs);
392 if (!b) {
393 td_verror(td, errno, "malloc");
394 goto err;
395 }
396
397 while (left && !td->terminate) {
398 ssize_t r;
399
400 if (bs > left)
401 bs = left;
402
403 fill_io_buffer(td, b, bs, bs);
404
405 r = write(f->fd, b, bs);
406
407 if (r > 0) {
408 left -= r;
409 } else {
410 td_verror(td, errno, "write");
411 goto err;
412 }
413 }
414
415 if (td->terminate) {
416 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
417 td_io_unlink_file(td, f);
418 } else if (td->o.create_fsync) {
419 if (fsync(f->fd) < 0) {
420 td_verror(td, errno, "fsync");
421 goto err;
422 }
423 }
424
425 free(b);
426 return 0;
427err:
428 close(f->fd);
429 f->fd = -1;
430 if (b)
431 free(b);
432 return 1;
433}
434
435unsigned long long get_rand_file_size(struct thread_data *td)
436{
437 unsigned long long ret, sized;
438 uint64_t frand_max;
439 uint64_t r;
440
441 frand_max = rand_max(&td->file_size_state);
442 r = __rand(&td->file_size_state);
443 sized = td->o.file_size_high - td->o.file_size_low;
444 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
445 ret += td->o.file_size_low;
446 ret -= (ret % td->o.rw_min_bs);
447 return ret;
448}
449
450static int file_size(struct thread_data *td, struct fio_file *f)
451{
452 struct stat st;
453
454 if (stat(f->file_name, &st) == -1) {
455 td_verror(td, errno, "fstat");
456 return 1;
457 }
458
459 f->real_file_size = st.st_size;
460 return 0;
461}
462
463static int bdev_size(struct thread_data *td, struct fio_file *f)
464{
465 unsigned long long bytes = 0;
466 int r;
467
468 if (td->io_ops->open_file(td, f)) {
469 log_err("fio: failed opening blockdev %s for size check\n",
470 f->file_name);
471 return 1;
472 }
473
474 r = blockdev_size(f, &bytes);
475 if (r) {
476 td_verror(td, r, "blockdev_size");
477 goto err;
478 }
479
480 if (!bytes) {
481 log_err("%s: zero sized block device?\n", f->file_name);
482 goto err;
483 }
484
485 f->real_file_size = bytes;
486 td->io_ops->close_file(td, f);
487 return 0;
488err:
489 td->io_ops->close_file(td, f);
490 return 1;
491}
492
493static int char_size(struct thread_data *td, struct fio_file *f)
494{
495#ifdef FIO_HAVE_CHARDEV_SIZE
496 unsigned long long bytes = 0;
497 int r;
498
499 if (td->io_ops->open_file(td, f)) {
500 log_err("fio: failed opening chardev %s for size check\n",
501 f->file_name);
502 return 1;
503 }
504
505 r = chardev_size(f, &bytes);
506 if (r) {
507 td_verror(td, r, "chardev_size");
508 goto err;
509 }
510
511 if (!bytes) {
512 log_err("%s: zero sized char device?\n", f->file_name);
513 goto err;
514 }
515
516 f->real_file_size = bytes;
517 td->io_ops->close_file(td, f);
518 return 0;
519err:
520 td->io_ops->close_file(td, f);
521 return 1;
522#else
523 f->real_file_size = -1ULL;
524 return 0;
525#endif
526}
527
528static int get_file_size(struct thread_data *td, struct fio_file *f)
529{
530 int ret = 0;
531
532 if (fio_file_size_known(f))
533 return 0;
534
535 if (f->filetype == FIO_TYPE_FILE)
536 ret = file_size(td, f);
537 else if (f->filetype == FIO_TYPE_BLOCK)
538 ret = bdev_size(td, f);
539 else if (f->filetype == FIO_TYPE_CHAR)
540 ret = char_size(td, f);
541 else {
542 f->real_file_size = -1;
543 log_info("%s: failed to get file size of %s\n", td->o.name,
544 f->file_name);
545 return 1; /* avoid offset extends end error message */
546 }
547
548 /*
549 * Leave ->real_file_size with 0 since it could be expectation
550 * of initial setup for regular files.
551 */
552 if (ret)
553 return ret;
554
555 /*
556 * ->file_offset normally hasn't been initialized yet, so this
557 * is basically always false unless ->real_file_size is -1, but
558 * if ->real_file_size is -1 this message doesn't make sense.
559 * As a result, this message is basically useless.
560 */
561 if (f->file_offset > f->real_file_size) {
562 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
563 (unsigned long long) f->file_offset,
564 (unsigned long long) f->real_file_size);
565 return 1;
566 }
567
568 fio_file_set_size_known(f);
569 return 0;
570}
571
572static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
573 unsigned long long off,
574 unsigned long long len)
575{
576 int errval = 0, ret = 0;
577
578#ifdef CONFIG_ESX
579 return 0;
580#endif
581
582 if (len == -1ULL)
583 len = f->io_size;
584 if (off == -1ULL)
585 off = f->file_offset;
586
587 if (len == -1ULL || off == -1ULL)
588 return 0;
589
590 if (td->io_ops->invalidate) {
591 dprint(FD_IO, "invalidate %s cache %s\n", td->io_ops->name,
592 f->file_name);
593 ret = td->io_ops->invalidate(td, f);
594 if (ret < 0)
595 errval = -ret;
596 } else if (td_ioengine_flagged(td, FIO_DISKLESSIO)) {
597 dprint(FD_IO, "invalidate not supported by ioengine %s\n",
598 td->io_ops->name);
599 } else if (f->filetype == FIO_TYPE_FILE) {
600 dprint(FD_IO, "declare unneeded cache %s: %llu/%llu\n",
601 f->file_name, off, len);
602 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
603 if (ret)
604 errval = ret;
605 } else if (f->filetype == FIO_TYPE_BLOCK) {
606 int retry_count = 0;
607
608 dprint(FD_IO, "drop page cache %s\n", f->file_name);
609 ret = blockdev_invalidate_cache(f);
610 while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
611 /*
612 * Linux multipath devices reject ioctl while
613 * the maps are being updated. That window can
614 * last tens of milliseconds; we'll try up to
615 * a quarter of a second.
616 */
617 usleep(10000);
618 ret = blockdev_invalidate_cache(f);
619 }
620 if (ret < 0 && errno == EACCES && geteuid()) {
621 if (!fio_did_warn(FIO_WARN_ROOT_FLUSH)) {
622 log_err("fio: only root may flush block "
623 "devices. Cache flush bypassed!\n");
624 }
625 }
626 if (ret < 0)
627 errval = errno;
628 } else if (f->filetype == FIO_TYPE_CHAR ||
629 f->filetype == FIO_TYPE_PIPE) {
630 dprint(FD_IO, "invalidate not supported %s\n", f->file_name);
631 }
632
633 /*
634 * Cache flushing isn't a fatal condition, and we know it will
635 * happen on some platforms where we don't have the proper
636 * function to flush eg block device caches. So just warn and
637 * continue on our way.
638 */
639 if (errval)
640 log_info("fio: cache invalidation of %s failed: %s\n",
641 f->file_name, strerror(errval));
642
643 return 0;
644
645}
646
647int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
648{
649 if (!fio_file_open(f))
650 return 0;
651
652 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
653}
654
655int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
656{
657 int ret = 0;
658
659 dprint(FD_FILE, "fd close %s\n", f->file_name);
660
661 remove_file_hash(f);
662
663 if (close(f->fd) < 0)
664 ret = errno;
665
666 f->fd = -1;
667
668 if (f->shadow_fd != -1) {
669 close(f->shadow_fd);
670 f->shadow_fd = -1;
671 }
672
673 f->engine_pos = 0;
674 return ret;
675}
676
677int file_lookup_open(struct fio_file *f, int flags)
678{
679 struct fio_file *__f;
680 int from_hash;
681
682 __f = lookup_file_hash(f->file_name);
683 if (__f) {
684 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
685 f->lock = __f->lock;
686 from_hash = 1;
687 } else {
688 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
689 from_hash = 0;
690 }
691
692#ifdef WIN32
693 flags |= _O_BINARY;
694#endif
695
696 f->fd = open(f->file_name, flags, 0600);
697 return from_hash;
698}
699
700static int file_close_shadow_fds(struct thread_data *td)
701{
702 struct fio_file *f;
703 int num_closed = 0;
704 unsigned int i;
705
706 for_each_file(td, f, i) {
707 if (f->shadow_fd == -1)
708 continue;
709
710 close(f->shadow_fd);
711 f->shadow_fd = -1;
712 num_closed++;
713 }
714
715 return num_closed;
716}
717
718int generic_open_file(struct thread_data *td, struct fio_file *f)
719{
720 int is_std = 0;
721 int flags = 0;
722 int from_hash = 0;
723
724 dprint(FD_FILE, "fd open %s\n", f->file_name);
725
726 if (!strcmp(f->file_name, "-")) {
727 if (td_rw(td)) {
728 log_err("fio: can't read/write to stdin/out\n");
729 return 1;
730 }
731 is_std = 1;
732
733 /*
734 * move output logging to stderr, if we are writing to stdout
735 */
736 if (td_write(td))
737 f_out = stderr;
738 }
739
740 if (td_trim(td))
741 goto skip_flags;
742 if (td->o.odirect)
743 flags |= OS_O_DIRECT;
744 if (td->o.oatomic) {
745 if (!FIO_O_ATOMIC) {
746 td_verror(td, EINVAL, "OS does not support atomic IO");
747 return 1;
748 }
749 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
750 }
751 flags |= td->o.sync_io;
752 if (td->o.create_on_open && td->o.allow_create)
753 flags |= O_CREAT;
754skip_flags:
755 if (f->filetype != FIO_TYPE_FILE)
756 flags |= FIO_O_NOATIME;
757
758open_again:
759 if (td_write(td)) {
760 if (!read_only)
761 flags |= O_RDWR;
762
763 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
764 flags |= O_CREAT;
765
766 if (is_std)
767 f->fd = dup(STDOUT_FILENO);
768 else
769 from_hash = file_lookup_open(f, flags);
770 } else if (td_read(td)) {
771 if (f->filetype == FIO_TYPE_CHAR && !read_only)
772 flags |= O_RDWR;
773 else
774 flags |= O_RDONLY;
775
776 if (is_std)
777 f->fd = dup(STDIN_FILENO);
778 else
779 from_hash = file_lookup_open(f, flags);
780 } else if (td_trim(td)) {
781 assert(!td_rw(td)); /* should have matched above */
782 if (!read_only)
783 flags |= O_RDWR;
784 from_hash = file_lookup_open(f, flags);
785 }
786
787 if (f->fd == -1) {
788 char buf[FIO_VERROR_SIZE];
789 int __e = errno;
790
791 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
792 flags &= ~FIO_O_NOATIME;
793 goto open_again;
794 }
795 if (__e == EMFILE && file_close_shadow_fds(td))
796 goto open_again;
797
798 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
799
800 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
801 log_err("fio: looks like your file system does not " \
802 "support direct=1/buffered=0\n");
803 }
804
805 td_verror(td, __e, buf);
806 return 1;
807 }
808
809 if (!from_hash && f->fd != -1) {
810 if (add_file_hash(f)) {
811 int fio_unused ret;
812
813 /*
814 * Stash away descriptor for later close. This is to
815 * work-around a "feature" on Linux, where a close of
816 * an fd that has been opened for write will trigger
817 * udev to call blkid to check partitions, fs id, etc.
818 * That pollutes the device cache, which can slow down
819 * unbuffered accesses.
820 */
821 if (f->shadow_fd == -1)
822 f->shadow_fd = f->fd;
823 else {
824 /*
825 * OK to ignore, we haven't done anything
826 * with it
827 */
828 ret = generic_close_file(td, f);
829 }
830 goto open_again;
831 }
832 }
833
834 return 0;
835}
836
837/*
838 * This function i.e. get_file_size() is the default .get_file_size
839 * implementation of majority of I/O engines.
840 */
841int generic_get_file_size(struct thread_data *td, struct fio_file *f)
842{
843 return get_file_size(td, f);
844}
845
846/*
847 * open/close all files, so that ->real_file_size gets set
848 */
849static int get_file_sizes(struct thread_data *td)
850{
851 struct fio_file *f;
852 unsigned int i;
853 int err = 0;
854
855 for_each_file(td, f, i) {
856 dprint(FD_FILE, "get file size for %p/%d/%s\n", f, i,
857 f->file_name);
858
859 if (td_io_get_file_size(td, f)) {
860 if (td->error != ENOENT) {
861 log_err("%s\n", td->verror);
862 err = 1;
863 break;
864 }
865 clear_error(td);
866 }
867
868 /*
869 * There are corner cases where we end up with -1 for
870 * ->real_file_size due to unsupported file type, etc.
871 * We then just set to size option value divided by number
872 * of files, similar to the way file ->io_size is set.
873 * stat(2) failure doesn't set ->real_file_size to -1.
874 */
875 if (f->real_file_size == -1ULL && td->o.size)
876 f->real_file_size = td->o.size / td->o.nr_files;
877 }
878
879 return err;
880}
881
882struct fio_mount {
883 struct flist_head list;
884 const char *base;
885 char __base[256];
886 unsigned int key;
887};
888
889/*
890 * Get free number of bytes for each file on each unique mount.
891 */
892static unsigned long long get_fs_free_counts(struct thread_data *td)
893{
894 struct flist_head *n, *tmp;
895 unsigned long long ret = 0;
896 struct fio_mount *fm;
897 FLIST_HEAD(list);
898 struct fio_file *f;
899 unsigned int i;
900
901 for_each_file(td, f, i) {
902 struct stat sb;
903 char buf[256];
904
905 if (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_CHAR) {
906 if (f->real_file_size != -1ULL)
907 ret += f->real_file_size;
908 continue;
909 } else if (f->filetype != FIO_TYPE_FILE)
910 continue;
911
912 snprintf(buf, FIO_ARRAY_SIZE(buf), "%s", f->file_name);
913
914 if (stat(buf, &sb) < 0) {
915 if (errno != ENOENT)
916 break;
917 strcpy(buf, ".");
918 if (stat(buf, &sb) < 0)
919 break;
920 }
921
922 fm = NULL;
923 flist_for_each(n, &list) {
924 fm = flist_entry(n, struct fio_mount, list);
925 if (fm->key == sb.st_dev)
926 break;
927
928 fm = NULL;
929 }
930
931 if (fm)
932 continue;
933
934 fm = calloc(1, sizeof(*fm));
935 snprintf(fm->__base, FIO_ARRAY_SIZE(fm->__base), "%s", buf);
936 fm->base = basename(fm->__base);
937 fm->key = sb.st_dev;
938 flist_add(&fm->list, &list);
939 }
940
941 flist_for_each_safe(n, tmp, &list) {
942 unsigned long long sz;
943
944 fm = flist_entry(n, struct fio_mount, list);
945 flist_del(&fm->list);
946
947 sz = get_fs_free_size(fm->base);
948 if (sz && sz != -1ULL)
949 ret += sz;
950
951 free(fm);
952 }
953
954 return ret;
955}
956
957uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
958{
959 bool align = false;
960 struct thread_options *o = &td->o;
961 unsigned long long align_bs;
962 unsigned long long offset;
963 unsigned long long increment;
964
965 if (o->file_append && f->filetype == FIO_TYPE_FILE)
966 return f->real_file_size;
967
968 if (o->offset_increment_percent) {
969 assert(!o->offset_increment);
970 increment = o->offset_increment_percent * f->real_file_size / 100;
971 align = true;
972 } else
973 increment = o->offset_increment;
974
975 if (o->start_offset_percent > 0) {
976 /* calculate the raw offset */
977 offset = (f->real_file_size * o->start_offset_percent / 100) +
978 (td->subjob_number * increment);
979
980 align = true;
981 } else {
982 /* start_offset_percent not set */
983 offset = o->start_offset +
984 td->subjob_number * increment;
985 }
986
987 if (align) {
988 /*
989 * if offset_align is provided, use it
990 */
991 if (fio_option_is_set(o, start_offset_align)) {
992 align_bs = o->start_offset_align;
993 } else {
994 /* else take the minimum block size */
995 align_bs = td_min_bs(td);
996 }
997
998 /*
999 * block align the offset at the next available boundary at
1000 * ceiling(offset / align_bs) * align_bs
1001 */
1002 offset = (offset / align_bs + (offset % align_bs != 0)) * align_bs;
1003 }
1004
1005 return offset;
1006}
1007
1008/*
1009 * Find longest path component that exists and return its length
1010 */
1011int longest_existing_path(char *path) {
1012 char buf[PATH_MAX];
1013 bool done;
1014 char *buf_pos;
1015 int offset;
1016#ifdef WIN32
1017 DWORD dwAttr;
1018#else
1019 struct stat sb;
1020#endif
1021
1022 sprintf(buf, "%s", path);
1023 done = false;
1024 while (!done) {
1025 buf_pos = strrchr(buf, FIO_OS_PATH_SEPARATOR);
1026 if (!buf_pos) {
1027 done = true;
1028 offset = 0;
1029 break;
1030 }
1031
1032 *(buf_pos + 1) = '\0';
1033
1034#ifdef WIN32
1035 dwAttr = GetFileAttributesA(buf);
1036 if (dwAttr != INVALID_FILE_ATTRIBUTES) {
1037 done = true;
1038 }
1039#else
1040 if (stat(buf, &sb) == 0)
1041 done = true;
1042#endif
1043 if (done)
1044 offset = buf_pos - buf;
1045 else
1046 *buf_pos = '\0';
1047 }
1048
1049 return offset;
1050}
1051
1052static bool create_work_dirs(struct thread_data *td, const char *fname)
1053{
1054 char path[PATH_MAX];
1055 char *start, *end;
1056 int offset;
1057
1058 snprintf(path, PATH_MAX, "%s", fname);
1059 start = path;
1060
1061 offset = longest_existing_path(path);
1062 end = start + offset;
1063 while ((end = strchr(end, FIO_OS_PATH_SEPARATOR)) != NULL) {
1064 if (end == start) {
1065 end++;
1066 continue;
1067 }
1068 *end = '\0';
1069 errno = 0;
1070 if (fio_mkdir(path, 0700) && errno != EEXIST) {
1071 log_err("fio: failed to create dir (%s): %s\n",
1072 start, strerror(errno));
1073 return false;
1074 }
1075 *end = FIO_OS_PATH_SEPARATOR;
1076 end++;
1077 }
1078 td->flags |= TD_F_DIRS_CREATED;
1079 return true;
1080}
1081
1082/*
1083 * Open the files and setup files sizes, creating files if necessary.
1084 */
1085int setup_files(struct thread_data *td)
1086{
1087 unsigned long long total_size, extend_size;
1088 struct thread_options *o = &td->o;
1089 struct fio_file *f;
1090 unsigned int i, nr_fs_extra = 0;
1091 int err = 0, need_extend;
1092 int old_state;
1093 const unsigned long long bs = td_min_bs(td);
1094 uint64_t fs = 0;
1095
1096 dprint(FD_FILE, "setup files\n");
1097
1098 old_state = td_bump_runstate(td, TD_SETTING_UP);
1099
1100 for_each_file(td, f, i) {
1101 if (!td_ioengine_flagged(td, FIO_DISKLESSIO) &&
1102 strchr(f->file_name, FIO_OS_PATH_SEPARATOR) &&
1103 !(td->flags & TD_F_DIRS_CREATED) &&
1104 !create_work_dirs(td, f->file_name))
1105 goto err_out;
1106 }
1107
1108 /*
1109 * Find out physical size of files or devices for this thread,
1110 * before we determine I/O size and range of our targets.
1111 * If ioengine defines a setup() method, it's responsible for
1112 * opening the files and setting f->real_file_size to indicate
1113 * the valid range for that file.
1114 */
1115 if (td->io_ops->setup)
1116 err = td->io_ops->setup(td);
1117 else
1118 err = get_file_sizes(td);
1119
1120 if (err)
1121 goto err_out;
1122
1123 if (o->read_iolog_file)
1124 goto done;
1125
1126 if (td->o.zone_mode == ZONE_MODE_ZBD) {
1127 err = zbd_init_files(td);
1128 if (err)
1129 goto err_out;
1130 }
1131 zbd_recalc_options_with_zone_granularity(td);
1132
1133 /*
1134 * check sizes. if the files/devices do not exist and the size
1135 * isn't passed to fio, abort.
1136 */
1137 total_size = 0;
1138 for_each_file(td, f, i) {
1139 f->fileno = i;
1140 if (f->real_file_size == -1ULL)
1141 total_size = -1ULL;
1142 else
1143 total_size += f->real_file_size;
1144 }
1145
1146 if (o->fill_device)
1147 td->fill_device_size = get_fs_free_counts(td);
1148
1149 /*
1150 * device/file sizes are zero and no size given, punt
1151 */
1152 if ((!total_size || total_size == -1ULL) && !o->size &&
1153 !td_ioengine_flagged(td, FIO_NOIO) && !o->fill_device &&
1154 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
1155 log_err("%s: you need to specify size=\n", o->name);
1156 td_verror(td, EINVAL, "total_file_size");
1157 goto err_out;
1158 }
1159
1160 /*
1161 * Calculate per-file size and potential extra size for the
1162 * first files, if needed (i.e. if we don't have a fixed size).
1163 */
1164 if (!o->file_size_low && o->nr_files) {
1165 uint64_t all_fs;
1166
1167 fs = o->size / o->nr_files;
1168 all_fs = fs * o->nr_files;
1169
1170 if (all_fs < o->size)
1171 nr_fs_extra = (o->size - all_fs) / bs;
1172 }
1173
1174 /*
1175 * now file sizes are known, so we can set ->io_size. if size= is
1176 * not given, ->io_size is just equal to ->real_file_size. if size
1177 * is given, ->io_size is size / nr_files.
1178 */
1179 extend_size = total_size = 0;
1180 need_extend = 0;
1181 for_each_file(td, f, i) {
1182 f->file_offset = get_start_offset(td, f);
1183
1184 /*
1185 * Update ->io_size depending on options specified.
1186 * ->file_size_low being 0 means filesize option isn't set.
1187 * Non zero ->file_size_low equals ->file_size_high means
1188 * filesize option is set in a fixed size format.
1189 * Non zero ->file_size_low not equals ->file_size_high means
1190 * filesize option is set in a range format.
1191 */
1192 if (!o->file_size_low) {
1193 /*
1194 * no file size or range given, file size is equal to
1195 * total size divided by number of files. If the size
1196 * doesn't divide nicely with the min blocksize,
1197 * make the first files bigger.
1198 */
1199 f->io_size = fs;
1200 if (nr_fs_extra) {
1201 nr_fs_extra--;
1202 f->io_size += bs;
1203 }
1204
1205 /*
1206 * We normally don't come here for regular files, but
1207 * if the result is 0 for a regular file, set it to the
1208 * real file size. This could be size of the existing
1209 * one if it already exists, but otherwise will be set
1210 * to 0. A new file won't be created because
1211 * ->io_size + ->file_offset equals ->real_file_size.
1212 */
1213 if (!f->io_size) {
1214 if (f->file_offset > f->real_file_size)
1215 goto err_offset;
1216 f->io_size = f->real_file_size - f->file_offset;
1217 if (!f->io_size)
1218 log_info("fio: file %s may be ignored\n",
1219 f->file_name);
1220 }
1221 } else if (f->real_file_size < o->file_size_low ||
1222 f->real_file_size > o->file_size_high) {
1223 if (f->file_offset > o->file_size_low)
1224 goto err_offset;
1225 /*
1226 * file size given. if it's fixed, use that. if it's a
1227 * range, generate a random size in-between.
1228 */
1229 if (o->file_size_low == o->file_size_high)
1230 f->io_size = o->file_size_low - f->file_offset;
1231 else {
1232 f->io_size = get_rand_file_size(td)
1233 - f->file_offset;
1234 }
1235 } else
1236 f->io_size = f->real_file_size - f->file_offset;
1237
1238 if (f->io_size == -1ULL)
1239 total_size = -1ULL;
1240 else {
1241 uint64_t io_size;
1242
1243 if (o->size_percent && o->size_percent != 100) {
1244 uint64_t file_size;
1245
1246 file_size = f->io_size + f->file_offset;
1247 f->io_size = (file_size *
1248 o->size_percent) / 100;
1249 if (f->io_size > (file_size - f->file_offset))
1250 f->io_size = file_size - f->file_offset;
1251
1252 f->io_size -= (f->io_size % td_min_bs(td));
1253 }
1254
1255 io_size = f->io_size;
1256 if (o->io_size_percent && o->io_size_percent != 100) {
1257 io_size *= o->io_size_percent;
1258 io_size /= 100;
1259 }
1260
1261 total_size += io_size;
1262 }
1263
1264 if (f->filetype == FIO_TYPE_FILE &&
1265 (f->io_size + f->file_offset) > f->real_file_size) {
1266 if (!td_ioengine_flagged(td, FIO_DISKLESSIO) &&
1267 !o->create_on_open) {
1268 need_extend++;
1269 extend_size += (f->io_size + f->file_offset);
1270 fio_file_set_extend(f);
1271 } else if (!td_ioengine_flagged(td, FIO_DISKLESSIO) ||
1272 (td_ioengine_flagged(td, FIO_DISKLESSIO) &&
1273 td_ioengine_flagged(td, FIO_FAKEIO)))
1274 f->real_file_size = f->io_size + f->file_offset;
1275 }
1276 }
1277
1278 if (td->o.block_error_hist) {
1279 int len;
1280
1281 assert(td->o.nr_files == 1); /* checked in fixup_options */
1282 f = td->files[0];
1283 len = f->io_size / td->o.bs[DDIR_TRIM];
1284 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
1285 log_err("fio: cannot calculate block histogram with "
1286 "%d trim blocks, maximum %d\n",
1287 len, MAX_NR_BLOCK_INFOS);
1288 td_verror(td, EINVAL, "block_error_hist");
1289 goto err_out;
1290 }
1291
1292 td->ts.nr_block_infos = len;
1293 for (i = 0; i < len; i++)
1294 td->ts.block_infos[i] =
1295 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
1296 } else
1297 td->ts.nr_block_infos = 0;
1298
1299 if (!o->size || (total_size && o->size > total_size))
1300 o->size = total_size;
1301
1302 if (o->size < td_min_bs(td)) {
1303 log_err("fio: blocksize is larger than data set range\n");
1304 goto err_out;
1305 }
1306
1307 /*
1308 * See if we need to extend some files, typically needed when our
1309 * target regular files don't exist yet, but our jobs require them
1310 * initially due to read I/Os.
1311 */
1312 if (need_extend) {
1313 temp_stall_ts = 1;
1314 if (output_format & FIO_OUTPUT_NORMAL) {
1315 log_info("%s: Laying out IO file%s (%u file%s / %s%lluMiB)\n",
1316 o->name,
1317 need_extend > 1 ? "s" : "",
1318 need_extend,
1319 need_extend > 1 ? "s" : "",
1320 need_extend > 1 ? "total " : "",
1321 extend_size >> 20);
1322 }
1323
1324 for_each_file(td, f, i) {
1325 unsigned long long old_len = -1ULL, extend_len = -1ULL;
1326
1327 if (!fio_file_extend(f))
1328 continue;
1329
1330 assert(f->filetype == FIO_TYPE_FILE);
1331 fio_file_clear_extend(f);
1332 if (!o->fill_device) {
1333 old_len = f->real_file_size;
1334 extend_len = f->io_size + f->file_offset -
1335 old_len;
1336 }
1337 f->real_file_size = (f->io_size + f->file_offset);
1338 err = extend_file(td, f);
1339 if (err)
1340 break;
1341
1342 err = __file_invalidate_cache(td, f, old_len,
1343 extend_len);
1344
1345 /*
1346 * Shut up static checker
1347 */
1348 if (f->fd != -1)
1349 close(f->fd);
1350
1351 f->fd = -1;
1352 if (err)
1353 break;
1354 }
1355 temp_stall_ts = 0;
1356 }
1357
1358 if (err)
1359 goto err_out;
1360
1361 /*
1362 * Prepopulate files with data. It might be expected to read some
1363 * "real" data instead of zero'ed files (if no writes to file occurred
1364 * prior to a read job). Engine has to provide a way to do that.
1365 */
1366 if (td->io_ops->prepopulate_file) {
1367 temp_stall_ts = 1;
1368
1369 for_each_file(td, f, i) {
1370 if (output_format & FIO_OUTPUT_NORMAL) {
1371 log_info("%s: Prepopulating IO file (%s)\n",
1372 o->name, f->file_name);
1373 }
1374
1375 err = td->io_ops->prepopulate_file(td, f);
1376 if (err)
1377 break;
1378
1379 err = __file_invalidate_cache(td, f, f->file_offset,
1380 f->io_size);
1381
1382 /*
1383 * Shut up static checker
1384 */
1385 if (f->fd != -1)
1386 close(f->fd);
1387
1388 f->fd = -1;
1389 if (err)
1390 break;
1391 }
1392 temp_stall_ts = 0;
1393 }
1394
1395 if (err)
1396 goto err_out;
1397
1398 /*
1399 * iolog already set the total io size, if we read back
1400 * stored entries.
1401 */
1402 if (!o->read_iolog_file) {
1403 if (o->io_size)
1404 td->total_io_size = o->io_size * o->loops;
1405 else
1406 td->total_io_size = o->size * o->loops;
1407 }
1408
1409done:
1410 if (td->o.zone_mode == ZONE_MODE_ZBD) {
1411 err = zbd_setup_files(td);
1412 if (err)
1413 goto err_out;
1414 }
1415
1416 if (o->create_only)
1417 td->done = 1;
1418
1419 td_restore_runstate(td, old_state);
1420
1421 return 0;
1422
1423err_offset:
1424 log_err("%s: you need to specify valid offset=\n", o->name);
1425err_out:
1426 td_restore_runstate(td, old_state);
1427 return 1;
1428}
1429
1430bool pre_read_files(struct thread_data *td)
1431{
1432 struct fio_file *f;
1433 unsigned int i;
1434
1435 dprint(FD_FILE, "pre_read files\n");
1436
1437 for_each_file(td, f, i) {
1438 if (!pre_read_file(td, f))
1439 return false;
1440 }
1441
1442 return true;
1443}
1444
1445static void __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1446{
1447 unsigned int range_size, seed;
1448 uint64_t nranges;
1449 uint64_t fsize;
1450
1451 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1452 fsize = min(f->real_file_size, f->io_size);
1453
1454 nranges = (fsize + range_size - 1ULL) / range_size;
1455
1456 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1457 if (!td->o.rand_repeatable)
1458 seed = td->rand_seeds[4];
1459
1460 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1461 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, td->o.random_center.u.f, seed);
1462 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1463 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, td->o.random_center.u.f, seed);
1464 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1465 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, td->o.random_center.u.f, seed);
1466}
1467
1468static bool init_rand_distribution(struct thread_data *td)
1469{
1470 struct fio_file *f;
1471 unsigned int i;
1472 int state;
1473
1474 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM ||
1475 td->o.random_distribution == FIO_RAND_DIST_ZONED ||
1476 td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS)
1477 return false;
1478
1479 state = td_bump_runstate(td, TD_SETTING_UP);
1480
1481 for_each_file(td, f, i)
1482 __init_rand_distribution(td, f);
1483
1484 td_restore_runstate(td, state);
1485 return true;
1486}
1487
1488/*
1489 * Check if the number of blocks exceeds the randomness capability of
1490 * the selected generator. Tausworthe is 32-bit, the others are fullly
1491 * 64-bit capable.
1492 */
1493static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1494 uint64_t blocks)
1495{
1496 if (blocks <= FRAND32_MAX)
1497 return 0;
1498 if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1499 return 0;
1500
1501 /*
1502 * If the user hasn't specified a random generator, switch
1503 * to tausworthe64 with informational warning. If the user did
1504 * specify one, just warn.
1505 */
1506 log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1507 f->file_name);
1508
1509 if (!fio_option_is_set(&td->o, random_generator)) {
1510 log_info("fio: Switching to tausworthe64. Use the "
1511 "random_generator= option to get rid of this "
1512 "warning.\n");
1513 td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1514 return 0;
1515 }
1516
1517 /*
1518 * Just make this information to avoid breaking scripts.
1519 */
1520 log_info("fio: Use the random_generator= option to switch to lfsr or "
1521 "tausworthe64.\n");
1522 return 0;
1523}
1524
1525bool init_random_map(struct thread_data *td)
1526{
1527 unsigned long long blocks;
1528 struct fio_file *f;
1529 unsigned int i;
1530
1531 if (init_rand_distribution(td))
1532 return true;
1533 if (!td_random(td))
1534 return true;
1535
1536 for_each_file(td, f, i) {
1537 uint64_t fsize = min(f->real_file_size, f->io_size);
1538
1539 if (td->o.zone_mode == ZONE_MODE_STRIDED)
1540 fsize = td->o.zone_range;
1541
1542 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1543
1544 if (check_rand_gen_limits(td, f, blocks))
1545 return false;
1546
1547 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1548 uint64_t seed;
1549
1550 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1551
1552 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1553 fio_file_set_lfsr(f);
1554 continue;
1555 } else {
1556 log_err("fio: failed initializing LFSR\n");
1557 return false;
1558 }
1559 } else if (!td->o.norandommap) {
1560 f->io_axmap = axmap_new(blocks);
1561 if (f->io_axmap) {
1562 fio_file_set_axmap(f);
1563 continue;
1564 }
1565 } else if (td->o.norandommap)
1566 continue;
1567
1568 if (!td->o.softrandommap) {
1569 log_err("fio: failed allocating random map. If running"
1570 " a large number of jobs, try the 'norandommap'"
1571 " option or set 'softrandommap'. Or give"
1572 " a larger --alloc-size to fio.\n");
1573 return false;
1574 }
1575
1576 log_info("fio: file %s failed allocating random map. Running "
1577 "job without.\n", f->file_name);
1578 }
1579
1580 return true;
1581}
1582
1583void close_files(struct thread_data *td)
1584{
1585 struct fio_file *f;
1586 unsigned int i;
1587
1588 for_each_file(td, f, i) {
1589 if (fio_file_open(f))
1590 td_io_close_file(td, f);
1591 }
1592}
1593
1594void fio_file_free(struct fio_file *f)
1595{
1596 if (fio_file_axmap(f))
1597 axmap_free(f->io_axmap);
1598 if (!fio_file_smalloc(f)) {
1599 free(f->file_name);
1600 free(f);
1601 } else {
1602 sfree(f->file_name);
1603 sfree(f);
1604 }
1605}
1606
1607void close_and_free_files(struct thread_data *td)
1608{
1609 struct fio_file *f;
1610 unsigned int i;
1611
1612 dprint(FD_FILE, "close files\n");
1613
1614 for_each_file(td, f, i) {
1615 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1616 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1617 td_io_unlink_file(td, f);
1618 }
1619
1620 if (fio_file_open(f))
1621 td_io_close_file(td, f);
1622
1623 remove_file_hash(f);
1624
1625 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1626 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1627 td_io_unlink_file(td, f);
1628 }
1629
1630 zbd_close_file(f);
1631 fio_file_free(f);
1632 }
1633
1634 td->o.filename = NULL;
1635 free(td->files);
1636 free(td->file_locks);
1637 td->files_index = 0;
1638 td->files = NULL;
1639 td->file_locks = NULL;
1640 td->o.file_lock_mode = FILE_LOCK_NONE;
1641 td->o.nr_files = 0;
1642}
1643
1644static void get_file_type(struct fio_file *f)
1645{
1646 struct stat sb;
1647
1648 if (!strcmp(f->file_name, "-"))
1649 f->filetype = FIO_TYPE_PIPE;
1650 else
1651 f->filetype = FIO_TYPE_FILE;
1652
1653#ifdef WIN32
1654 /* \\.\ is the device namespace in Windows, where every file is
1655 * a block device */
1656 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1657 f->filetype = FIO_TYPE_BLOCK;
1658#endif
1659
1660 if (!stat(f->file_name, &sb)) {
1661 if (S_ISBLK(sb.st_mode))
1662 f->filetype = FIO_TYPE_BLOCK;
1663 else if (S_ISCHR(sb.st_mode))
1664 f->filetype = FIO_TYPE_CHAR;
1665 else if (S_ISFIFO(sb.st_mode))
1666 f->filetype = FIO_TYPE_PIPE;
1667 }
1668}
1669
1670static bool __is_already_allocated(const char *fname, bool set)
1671{
1672 struct flist_head *entry;
1673 bool ret;
1674
1675 ret = file_bloom_exists(fname, set);
1676 if (!ret)
1677 return ret;
1678
1679 flist_for_each(entry, &filename_list) {
1680 struct file_name *fn;
1681
1682 fn = flist_entry(entry, struct file_name, list);
1683
1684 if (!strcmp(fn->filename, fname))
1685 return true;
1686 }
1687
1688 return false;
1689}
1690
1691static bool is_already_allocated(const char *fname)
1692{
1693 bool ret;
1694
1695 fio_file_hash_lock();
1696 ret = __is_already_allocated(fname, false);
1697 fio_file_hash_unlock();
1698
1699 return ret;
1700}
1701
1702static void set_already_allocated(const char *fname)
1703{
1704 struct file_name *fn;
1705
1706 fn = malloc(sizeof(struct file_name));
1707 fn->filename = strdup(fname);
1708
1709 fio_file_hash_lock();
1710 if (!__is_already_allocated(fname, true)) {
1711 flist_add_tail(&fn->list, &filename_list);
1712 fn = NULL;
1713 }
1714 fio_file_hash_unlock();
1715
1716 if (fn) {
1717 free(fn->filename);
1718 free(fn);
1719 }
1720}
1721
1722static void free_already_allocated(void)
1723{
1724 struct flist_head *entry, *tmp;
1725 struct file_name *fn;
1726
1727 if (flist_empty(&filename_list))
1728 return;
1729
1730 fio_file_hash_lock();
1731 flist_for_each_safe(entry, tmp, &filename_list) {
1732 fn = flist_entry(entry, struct file_name, list);
1733 free(fn->filename);
1734 flist_del(&fn->list);
1735 free(fn);
1736 }
1737
1738 fio_file_hash_unlock();
1739}
1740
1741static struct fio_file *alloc_new_file(struct thread_data *td)
1742{
1743 struct fio_file *f;
1744
1745 if (td_ioengine_flagged(td, FIO_NOFILEHASH))
1746 f = calloc(1, sizeof(*f));
1747 else
1748 f = scalloc(1, sizeof(*f));
1749 if (!f) {
1750 assert(0);
1751 return NULL;
1752 }
1753
1754 f->fd = -1;
1755 f->shadow_fd = -1;
1756 fio_file_reset(td, f);
1757 if (!td_ioengine_flagged(td, FIO_NOFILEHASH))
1758 fio_file_set_smalloc(f);
1759 return f;
1760}
1761
1762bool exists_and_not_regfile(const char *filename)
1763{
1764 struct stat sb;
1765
1766 if (lstat(filename, &sb) == -1)
1767 return false;
1768
1769#ifndef WIN32 /* NOT Windows */
1770 if (S_ISREG(sb.st_mode))
1771 return false;
1772#else
1773 /* \\.\ is the device namespace in Windows, where every file
1774 * is a device node */
1775 if (S_ISREG(sb.st_mode) && strncmp(filename, "\\\\.\\", 4) != 0)
1776 return false;
1777#endif
1778
1779 return true;
1780}
1781
1782int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1783{
1784 int cur_files = td->files_index;
1785 char file_name[PATH_MAX];
1786 struct fio_file *f;
1787 int len = 0;
1788
1789 dprint(FD_FILE, "add file %s\n", fname);
1790
1791 if (td->o.directory)
1792 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob,
1793 td->o.unique_filename);
1794
1795 sprintf(file_name + len, "%s", fname);
1796
1797 /* clean cloned siblings using existing files */
1798 if (numjob && is_already_allocated(file_name) &&
1799 !exists_and_not_regfile(fname))
1800 return 0;
1801
1802 f = alloc_new_file(td);
1803
1804 if (td->files_size <= td->files_index) {
1805 unsigned int new_size = td->o.nr_files + 1;
1806
1807 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1808
1809 td->files = realloc(td->files, new_size * sizeof(f));
1810 if (td->files == NULL) {
1811 log_err("fio: realloc OOM\n");
1812 assert(0);
1813 }
1814 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1815 td->file_locks = realloc(td->file_locks, new_size);
1816 if (!td->file_locks) {
1817 log_err("fio: realloc OOM\n");
1818 assert(0);
1819 }
1820 td->file_locks[cur_files] = FILE_LOCK_NONE;
1821 }
1822 td->files_size = new_size;
1823 }
1824 td->files[cur_files] = f;
1825 f->fileno = cur_files;
1826
1827 /*
1828 * init function, io engine may not be loaded yet
1829 */
1830 if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
1831 f->real_file_size = -1ULL;
1832
1833 if (td_ioengine_flagged(td, FIO_NOFILEHASH))
1834 f->file_name = strdup(file_name);
1835 else
1836 f->file_name = smalloc_strdup(file_name);
1837
1838 /* can't handle smalloc failure from here */
1839 assert(f->file_name);
1840
1841 get_file_type(f);
1842
1843 switch (td->o.file_lock_mode) {
1844 case FILE_LOCK_NONE:
1845 break;
1846 case FILE_LOCK_READWRITE:
1847 f->rwlock = fio_rwlock_init();
1848 break;
1849 case FILE_LOCK_EXCLUSIVE:
1850 f->lock = fio_sem_init(FIO_SEM_UNLOCKED);
1851 break;
1852 default:
1853 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1854 assert(0);
1855 }
1856
1857 td->files_index++;
1858
1859 if (td->o.numjobs > 1)
1860 set_already_allocated(file_name);
1861
1862 if (inc)
1863 td->o.nr_files++;
1864
1865 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1866 cur_files);
1867
1868 return cur_files;
1869}
1870
1871int add_file_exclusive(struct thread_data *td, const char *fname)
1872{
1873 struct fio_file *f;
1874 unsigned int i;
1875
1876 for_each_file(td, f, i) {
1877 if (!strcmp(f->file_name, fname))
1878 return i;
1879 }
1880
1881 return add_file(td, fname, 0, 1);
1882}
1883
1884void get_file(struct fio_file *f)
1885{
1886 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1887 assert(fio_file_open(f));
1888 f->references++;
1889}
1890
1891int put_file(struct thread_data *td, struct fio_file *f)
1892{
1893 int f_ret = 0, ret = 0;
1894
1895 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1896
1897 if (!fio_file_open(f)) {
1898 assert(f->fd == -1);
1899 return 0;
1900 }
1901
1902 assert(f->references);
1903 if (--f->references)
1904 return 0;
1905
1906 disk_util_dec(f->du);
1907
1908 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1909 unlock_file_all(td, f);
1910
1911 if (should_fsync(td) && td->o.fsync_on_close) {
1912 f_ret = fsync(f->fd);
1913 if (f_ret < 0)
1914 f_ret = errno;
1915 }
1916
1917 if (td->io_ops->close_file)
1918 ret = td->io_ops->close_file(td, f);
1919
1920 if (!ret)
1921 ret = f_ret;
1922
1923 td->nr_open_files--;
1924 fio_file_clear_closing(f);
1925 fio_file_clear_open(f);
1926 assert(f->fd == -1);
1927 return ret;
1928}
1929
1930void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1931{
1932 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1933 return;
1934
1935 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1936 if (ddir == DDIR_READ)
1937 fio_rwlock_read(f->rwlock);
1938 else
1939 fio_rwlock_write(f->rwlock);
1940 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1941 fio_sem_down(f->lock);
1942
1943 td->file_locks[f->fileno] = td->o.file_lock_mode;
1944}
1945
1946void unlock_file(struct thread_data *td, struct fio_file *f)
1947{
1948 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1949 return;
1950
1951 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1952 fio_rwlock_unlock(f->rwlock);
1953 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1954 fio_sem_up(f->lock);
1955
1956 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1957}
1958
1959void unlock_file_all(struct thread_data *td, struct fio_file *f)
1960{
1961 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1962 return;
1963 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1964 unlock_file(td, f);
1965}
1966
1967static bool recurse_dir(struct thread_data *td, const char *dirname)
1968{
1969 struct dirent *dir;
1970 bool ret = false;
1971 DIR *D;
1972
1973 D = opendir(dirname);
1974 if (!D) {
1975 char buf[FIO_VERROR_SIZE];
1976
1977 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1978 td_verror(td, errno, buf);
1979 return true;
1980 }
1981
1982 while ((dir = readdir(D)) != NULL) {
1983 char full_path[PATH_MAX];
1984 struct stat sb;
1985
1986 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1987 continue;
1988
1989 sprintf(full_path, "%s%c%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1990
1991 if (lstat(full_path, &sb) == -1) {
1992 if (errno != ENOENT) {
1993 td_verror(td, errno, "stat");
1994 ret = true;
1995 break;
1996 }
1997 }
1998
1999 if (S_ISREG(sb.st_mode)) {
2000 add_file(td, full_path, 0, 1);
2001 continue;
2002 }
2003 if (!S_ISDIR(sb.st_mode))
2004 continue;
2005
2006 ret = recurse_dir(td, full_path);
2007 if (ret)
2008 break;
2009 }
2010
2011 closedir(D);
2012 return ret;
2013}
2014
2015int add_dir_files(struct thread_data *td, const char *path)
2016{
2017 int ret = recurse_dir(td, path);
2018
2019 if (!ret)
2020 log_info("fio: opendir added %d files\n", td->o.nr_files);
2021
2022 return ret;
2023}
2024
2025void dup_files(struct thread_data *td, struct thread_data *org)
2026{
2027 struct fio_file *f;
2028 unsigned int i;
2029
2030 dprint(FD_FILE, "dup files: %d\n", org->files_index);
2031
2032 if (!org->files)
2033 return;
2034
2035 td->files = malloc(org->files_index * sizeof(f));
2036
2037 if (td->o.file_lock_mode != FILE_LOCK_NONE)
2038 td->file_locks = malloc(org->files_index);
2039
2040 for_each_file(org, f, i) {
2041 struct fio_file *__f;
2042
2043 __f = alloc_new_file(td);
2044
2045 if (f->file_name) {
2046 if (td_ioengine_flagged(td, FIO_NOFILEHASH))
2047 __f->file_name = strdup(f->file_name);
2048 else
2049 __f->file_name = smalloc_strdup(f->file_name);
2050
2051 /* can't handle smalloc failure from here */
2052 assert(__f->file_name);
2053 __f->filetype = f->filetype;
2054 }
2055
2056 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
2057 __f->lock = f->lock;
2058 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
2059 __f->rwlock = f->rwlock;
2060
2061 td->files[i] = __f;
2062 }
2063}
2064
2065/*
2066 * Returns the index that matches the filename, or -1 if not there
2067 */
2068int get_fileno(struct thread_data *td, const char *fname)
2069{
2070 struct fio_file *f;
2071 unsigned int i;
2072
2073 for_each_file(td, f, i)
2074 if (!strcmp(f->file_name, fname))
2075 return i;
2076
2077 return -1;
2078}
2079
2080/*
2081 * For log usage, where we add/open/close files automatically
2082 */
2083void free_release_files(struct thread_data *td)
2084{
2085 close_files(td);
2086 td->o.nr_files = 0;
2087 td->o.open_files = 0;
2088 td->files_index = 0;
2089}
2090
2091void fio_file_reset(struct thread_data *td, struct fio_file *f)
2092{
2093 int i;
2094
2095 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2096 f->last_pos[i] = f->file_offset;
2097 f->last_start[i] = -1ULL;
2098 }
2099
2100 if (fio_file_axmap(f))
2101 axmap_reset(f->io_axmap);
2102 else if (fio_file_lfsr(f))
2103 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
2104
2105 zbd_file_reset(td, f);
2106}
2107
2108bool fio_files_done(struct thread_data *td)
2109{
2110 struct fio_file *f;
2111 unsigned int i;
2112
2113 for_each_file(td, f, i)
2114 if (!fio_file_done(f))
2115 return false;
2116
2117 return true;
2118}
2119
2120/* free memory used in initialization phase only */
2121void filesetup_mem_free(void)
2122{
2123 free_already_allocated();
2124}
2125
2126/*
2127 * This function is for platforms which support direct I/O but not O_DIRECT.
2128 */
2129int fio_set_directio(struct thread_data *td, struct fio_file *f)
2130{
2131#ifdef FIO_OS_DIRECTIO
2132 int ret = fio_set_odirect(f);
2133
2134 if (ret) {
2135 td_verror(td, ret, "fio_set_directio");
2136#if defined(__sun__)
2137 if (ret == ENOTTY) { /* ENOTTY suggests RAW device or ZFS */
2138 log_err("fio: doing directIO to RAW devices or ZFS not supported\n");
2139 } else {
2140 log_err("fio: the file system does not seem to support direct IO\n");
2141 }
2142#else
2143 log_err("fio: the file system does not seem to support direct IO\n");
2144#endif
2145 return -1;
2146 }
2147
2148 return 0;
2149#else
2150 log_err("fio: direct IO is not supported on this host operating system\n");
2151 return -1;
2152#endif
2153}