fio: replace malloc+memset with calloc
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8
9#include "fio.h"
10#include "smalloc.h"
11#include "filehash.h"
12#include "options.h"
13#include "os/os.h"
14#include "hash.h"
15#include "lib/axmap.h"
16#include "rwlock.h"
17#include "zbd.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static FLIST_HEAD(filename_list);
24
25/*
26 * List entry for filename_list
27 */
28struct file_name {
29 struct flist_head list;
30 char *filename;
31};
32
33static inline void clear_error(struct thread_data *td)
34{
35 td->error = 0;
36 td->verror[0] = '\0';
37}
38
39static int native_fallocate(struct thread_data *td, struct fio_file *f)
40{
41 bool success;
42
43 success = fio_fallocate(f, 0, f->real_file_size);
44 dprint(FD_FILE, "native fallocate of file %s size %llu was "
45 "%ssuccessful\n", f->file_name,
46 (unsigned long long) f->real_file_size,
47 !success ? "un": "");
48
49 if (success)
50 return false;
51
52 if (errno == ENOSYS)
53 dprint(FD_FILE, "native fallocate is not implemented\n");
54
55 return true;
56}
57
58static void fallocate_file(struct thread_data *td, struct fio_file *f)
59{
60 if (td->o.fill_device)
61 return;
62
63 switch (td->o.fallocate_mode) {
64 case FIO_FALLOCATE_NATIVE:
65 native_fallocate(td, f);
66 break;
67 case FIO_FALLOCATE_NONE:
68 break;
69#ifdef CONFIG_POSIX_FALLOCATE
70 case FIO_FALLOCATE_POSIX: {
71 int r;
72
73 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
74 f->file_name,
75 (unsigned long long) f->real_file_size);
76
77 r = posix_fallocate(f->fd, 0, f->real_file_size);
78 if (r > 0)
79 log_err("fio: posix_fallocate fails: %s\n", strerror(r));
80 break;
81 }
82#endif /* CONFIG_POSIX_FALLOCATE */
83#ifdef CONFIG_LINUX_FALLOCATE
84 case FIO_FALLOCATE_KEEP_SIZE: {
85 int r;
86
87 dprint(FD_FILE, "fallocate(FALLOC_FL_KEEP_SIZE) "
88 "file %s size %llu\n", f->file_name,
89 (unsigned long long) f->real_file_size);
90
91 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0, f->real_file_size);
92 if (r != 0)
93 td_verror(td, errno, "fallocate");
94
95 break;
96 }
97#endif /* CONFIG_LINUX_FALLOCATE */
98 case FIO_FALLOCATE_TRUNCATE: {
99 int r;
100
101 dprint(FD_FILE, "ftruncate file %s size %llu\n",
102 f->file_name,
103 (unsigned long long) f->real_file_size);
104 r = ftruncate(f->fd, f->real_file_size);
105 if (r != 0)
106 td_verror(td, errno, "ftruncate");
107
108 break;
109 }
110 default:
111 log_err("fio: unknown fallocate mode: %d\n", td->o.fallocate_mode);
112 assert(0);
113 }
114}
115
116/*
117 * Leaves f->fd open on success, caller must close
118 */
119static int extend_file(struct thread_data *td, struct fio_file *f)
120{
121 int new_layout = 0, unlink_file = 0, flags;
122 unsigned long long left;
123 unsigned long long bs;
124 char *b = NULL;
125
126 if (read_only) {
127 log_err("fio: refusing extend of file due to read-only\n");
128 return 0;
129 }
130
131 /*
132 * check if we need to lay the file out complete again. fio
133 * does that for operations involving reads, or for writes
134 * where overwrite is set
135 */
136 if (td_read(td) ||
137 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
138 (td_write(td) && td_ioengine_flagged(td, FIO_NOEXTEND)))
139 new_layout = 1;
140 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
141 unlink_file = 1;
142
143 if (unlink_file || new_layout) {
144 int ret;
145
146 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
147
148 ret = td_io_unlink_file(td, f);
149 if (ret != 0 && ret != ENOENT) {
150 td_verror(td, errno, "unlink");
151 return 1;
152 }
153 }
154
155 flags = O_WRONLY;
156 if (td->o.allow_create)
157 flags |= O_CREAT;
158 if (new_layout)
159 flags |= O_TRUNC;
160
161#ifdef WIN32
162 flags |= _O_BINARY;
163#endif
164
165 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
166 f->fd = open(f->file_name, flags, 0644);
167 if (f->fd < 0) {
168 int err = errno;
169
170 if (err == ENOENT && !td->o.allow_create)
171 log_err("fio: file creation disallowed by "
172 "allow_file_create=0\n");
173 else
174 td_verror(td, err, "open");
175 return 1;
176 }
177
178 fallocate_file(td, f);
179
180 /*
181 * If our jobs don't require regular files initially, we're done.
182 */
183 if (!new_layout)
184 goto done;
185
186 /*
187 * The size will be -1ULL when fill_device is used, so don't truncate
188 * or fallocate this file, just write it
189 */
190 if (!td->o.fill_device) {
191 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
192 (unsigned long long) f->real_file_size);
193 if (ftruncate(f->fd, f->real_file_size) == -1) {
194 if (errno != EFBIG) {
195 td_verror(td, errno, "ftruncate");
196 goto err;
197 }
198 }
199 }
200
201 left = f->real_file_size;
202 bs = td->o.max_bs[DDIR_WRITE];
203 if (bs > left)
204 bs = left;
205
206 b = malloc(bs);
207 if (!b) {
208 td_verror(td, errno, "malloc");
209 goto err;
210 }
211
212 while (left && !td->terminate) {
213 ssize_t r;
214
215 if (bs > left)
216 bs = left;
217
218 fill_io_buffer(td, b, bs, bs);
219
220 r = write(f->fd, b, bs);
221
222 if (r > 0) {
223 left -= r;
224 continue;
225 } else {
226 if (r < 0) {
227 int __e = errno;
228
229 if (__e == ENOSPC || __e == EDQUOT) {
230 const char *__e_name;
231 if (td->o.fill_device)
232 break;
233 if (__e == ENOSPC)
234 __e_name = "ENOSPC";
235 else
236 __e_name = "EDQUOT";
237 log_info("fio: %s on laying out "
238 "file, stopping\n", __e_name);
239 }
240 td_verror(td, errno, "write");
241 } else
242 td_verror(td, EIO, "write");
243
244 goto err;
245 }
246 }
247
248 if (td->terminate) {
249 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
250 td_io_unlink_file(td, f);
251 } else if (td->o.create_fsync) {
252 if (fsync(f->fd) < 0) {
253 td_verror(td, errno, "fsync");
254 goto err;
255 }
256 }
257 if (td->o.fill_device && !td_write(td)) {
258 fio_file_clear_size_known(f);
259 if (td_io_get_file_size(td, f))
260 goto err;
261 if (f->io_size > f->real_file_size)
262 f->io_size = f->real_file_size;
263 }
264
265 free(b);
266done:
267 return 0;
268err:
269 close(f->fd);
270 f->fd = -1;
271 if (b)
272 free(b);
273 return 1;
274}
275
276static bool pre_read_file(struct thread_data *td, struct fio_file *f)
277{
278 int r, did_open = 0, old_runstate;
279 unsigned long long left;
280 unsigned long long bs;
281 bool ret = true;
282 char *b;
283
284 if (td_ioengine_flagged(td, FIO_PIPEIO) ||
285 td_ioengine_flagged(td, FIO_NOIO))
286 return true;
287
288 if (f->filetype == FIO_TYPE_CHAR)
289 return true;
290
291 if (!fio_file_open(f)) {
292 if (td->io_ops->open_file(td, f)) {
293 log_err("fio: cannot pre-read, failed to open file\n");
294 return false;
295 }
296 did_open = 1;
297 }
298
299 old_runstate = td_bump_runstate(td, TD_PRE_READING);
300
301 left = f->io_size;
302 bs = td->o.max_bs[DDIR_READ];
303 if (bs > left)
304 bs = left;
305
306 b = calloc(1, bs);
307 if (!b) {
308 td_verror(td, errno, "malloc");
309 ret = false;
310 goto error;
311 }
312
313 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
314 td_verror(td, errno, "lseek");
315 log_err("fio: failed to lseek pre-read file\n");
316 ret = false;
317 goto error;
318 }
319
320 while (left && !td->terminate) {
321 if (bs > left)
322 bs = left;
323
324 r = read(f->fd, b, bs);
325
326 if (r == (int) bs) {
327 left -= bs;
328 continue;
329 } else {
330 td_verror(td, EIO, "pre_read");
331 break;
332 }
333 }
334
335error:
336 td_restore_runstate(td, old_runstate);
337
338 if (did_open)
339 td->io_ops->close_file(td, f);
340
341 free(b);
342 return ret;
343}
344
345/*
346 * Generic function to prepopulate regular file with data.
347 * Useful if you want to make sure I/O engine has data to read.
348 * Leaves f->fd open on success, caller must close.
349 */
350int generic_prepopulate_file(struct thread_data *td, struct fio_file *f)
351{
352 int flags;
353 unsigned long long left, bs;
354 char *b = NULL;
355
356 /* generic function for regular files only */
357 assert(f->filetype == FIO_TYPE_FILE);
358
359 if (read_only) {
360 log_err("fio: refusing to write a file due to read-only\n");
361 return 0;
362 }
363
364 flags = O_WRONLY;
365 if (td->o.allow_create)
366 flags |= O_CREAT;
367
368#ifdef WIN32
369 flags |= _O_BINARY;
370#endif
371
372 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
373 f->fd = open(f->file_name, flags, 0644);
374 if (f->fd < 0) {
375 int err = errno;
376
377 if (err == ENOENT && !td->o.allow_create)
378 log_err("fio: file creation disallowed by "
379 "allow_file_create=0\n");
380 else
381 td_verror(td, err, "open");
382 return 1;
383 }
384
385 left = f->real_file_size;
386 bs = td->o.max_bs[DDIR_WRITE];
387 if (bs > left)
388 bs = left;
389
390 b = malloc(bs);
391 if (!b) {
392 td_verror(td, errno, "malloc");
393 goto err;
394 }
395
396 while (left && !td->terminate) {
397 ssize_t r;
398
399 if (bs > left)
400 bs = left;
401
402 fill_io_buffer(td, b, bs, bs);
403
404 r = write(f->fd, b, bs);
405
406 if (r > 0) {
407 left -= r;
408 } else {
409 td_verror(td, errno, "write");
410 goto err;
411 }
412 }
413
414 if (td->terminate) {
415 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
416 td_io_unlink_file(td, f);
417 } else if (td->o.create_fsync) {
418 if (fsync(f->fd) < 0) {
419 td_verror(td, errno, "fsync");
420 goto err;
421 }
422 }
423
424 free(b);
425 return 0;
426err:
427 close(f->fd);
428 f->fd = -1;
429 if (b)
430 free(b);
431 return 1;
432}
433
434unsigned long long get_rand_file_size(struct thread_data *td)
435{
436 unsigned long long ret, sized;
437 uint64_t frand_max;
438 uint64_t r;
439
440 frand_max = rand_max(&td->file_size_state);
441 r = __rand(&td->file_size_state);
442 sized = td->o.file_size_high - td->o.file_size_low;
443 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
444 ret += td->o.file_size_low;
445 ret -= (ret % td->o.rw_min_bs);
446 return ret;
447}
448
449static int file_size(struct thread_data *td, struct fio_file *f)
450{
451 struct stat st;
452
453 if (stat(f->file_name, &st) == -1) {
454 td_verror(td, errno, "fstat");
455 return 1;
456 }
457
458 f->real_file_size = st.st_size;
459 return 0;
460}
461
462static int bdev_size(struct thread_data *td, struct fio_file *f)
463{
464 unsigned long long bytes = 0;
465 int r;
466
467 if (td->io_ops->open_file(td, f)) {
468 log_err("fio: failed opening blockdev %s for size check\n",
469 f->file_name);
470 return 1;
471 }
472
473 r = blockdev_size(f, &bytes);
474 if (r) {
475 td_verror(td, r, "blockdev_size");
476 goto err;
477 }
478
479 if (!bytes) {
480 log_err("%s: zero sized block device?\n", f->file_name);
481 goto err;
482 }
483
484 f->real_file_size = bytes;
485 td->io_ops->close_file(td, f);
486 return 0;
487err:
488 td->io_ops->close_file(td, f);
489 return 1;
490}
491
492static int char_size(struct thread_data *td, struct fio_file *f)
493{
494#ifdef FIO_HAVE_CHARDEV_SIZE
495 unsigned long long bytes = 0;
496 int r;
497
498 if (td->io_ops->open_file(td, f)) {
499 log_err("fio: failed opening chardev %s for size check\n",
500 f->file_name);
501 return 1;
502 }
503
504 r = chardev_size(f, &bytes);
505 if (r) {
506 td_verror(td, r, "chardev_size");
507 goto err;
508 }
509
510 if (!bytes) {
511 log_err("%s: zero sized char device?\n", f->file_name);
512 goto err;
513 }
514
515 f->real_file_size = bytes;
516 td->io_ops->close_file(td, f);
517 return 0;
518err:
519 td->io_ops->close_file(td, f);
520 return 1;
521#else
522 f->real_file_size = -1ULL;
523 return 0;
524#endif
525}
526
527static int get_file_size(struct thread_data *td, struct fio_file *f)
528{
529 int ret = 0;
530
531 if (fio_file_size_known(f))
532 return 0;
533
534 if (f->filetype == FIO_TYPE_FILE)
535 ret = file_size(td, f);
536 else if (f->filetype == FIO_TYPE_BLOCK)
537 ret = bdev_size(td, f);
538 else if (f->filetype == FIO_TYPE_CHAR)
539 ret = char_size(td, f);
540 else {
541 f->real_file_size = -1;
542 log_info("%s: failed to get file size of %s\n", td->o.name,
543 f->file_name);
544 return 1; /* avoid offset extends end error message */
545 }
546
547 /*
548 * Leave ->real_file_size with 0 since it could be expectation
549 * of initial setup for regular files.
550 */
551 if (ret)
552 return ret;
553
554 /*
555 * ->file_offset normally hasn't been initialized yet, so this
556 * is basically always false unless ->real_file_size is -1, but
557 * if ->real_file_size is -1 this message doesn't make sense.
558 * As a result, this message is basically useless.
559 */
560 if (f->file_offset > f->real_file_size) {
561 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
562 (unsigned long long) f->file_offset,
563 (unsigned long long) f->real_file_size);
564 return 1;
565 }
566
567 fio_file_set_size_known(f);
568 return 0;
569}
570
571static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
572 unsigned long long off,
573 unsigned long long len)
574{
575 int errval = 0, ret = 0;
576
577#ifdef CONFIG_ESX
578 return 0;
579#endif
580
581 if (len == -1ULL)
582 len = f->io_size;
583 if (off == -1ULL)
584 off = f->file_offset;
585
586 if (len == -1ULL || off == -1ULL)
587 return 0;
588
589 if (td->io_ops->invalidate) {
590 dprint(FD_IO, "invalidate %s cache %s\n", td->io_ops->name,
591 f->file_name);
592 ret = td->io_ops->invalidate(td, f);
593 if (ret < 0)
594 errval = -ret;
595 } else if (td_ioengine_flagged(td, FIO_DISKLESSIO)) {
596 dprint(FD_IO, "invalidate not supported by ioengine %s\n",
597 td->io_ops->name);
598 } else if (f->filetype == FIO_TYPE_FILE) {
599 dprint(FD_IO, "declare unneeded cache %s: %llu/%llu\n",
600 f->file_name, off, len);
601 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
602 if (ret)
603 errval = ret;
604 } else if (f->filetype == FIO_TYPE_BLOCK) {
605 int retry_count = 0;
606
607 dprint(FD_IO, "drop page cache %s\n", f->file_name);
608 ret = blockdev_invalidate_cache(f);
609 while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
610 /*
611 * Linux multipath devices reject ioctl while
612 * the maps are being updated. That window can
613 * last tens of milliseconds; we'll try up to
614 * a quarter of a second.
615 */
616 usleep(10000);
617 ret = blockdev_invalidate_cache(f);
618 }
619 if (ret < 0 && errno == EACCES && geteuid()) {
620 if (!fio_did_warn(FIO_WARN_ROOT_FLUSH)) {
621 log_err("fio: only root may flush block "
622 "devices. Cache flush bypassed!\n");
623 }
624 }
625 if (ret < 0)
626 errval = errno;
627 } else if (f->filetype == FIO_TYPE_CHAR ||
628 f->filetype == FIO_TYPE_PIPE) {
629 dprint(FD_IO, "invalidate not supported %s\n", f->file_name);
630 }
631
632 /*
633 * Cache flushing isn't a fatal condition, and we know it will
634 * happen on some platforms where we don't have the proper
635 * function to flush eg block device caches. So just warn and
636 * continue on our way.
637 */
638 if (errval)
639 log_info("fio: cache invalidation of %s failed: %s\n",
640 f->file_name, strerror(errval));
641
642 return 0;
643
644}
645
646int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
647{
648 if (!fio_file_open(f))
649 return 0;
650
651 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
652}
653
654int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
655{
656 int ret = 0;
657
658 dprint(FD_FILE, "fd close %s\n", f->file_name);
659
660 remove_file_hash(f);
661
662 if (close(f->fd) < 0)
663 ret = errno;
664
665 f->fd = -1;
666
667 if (f->shadow_fd != -1) {
668 close(f->shadow_fd);
669 f->shadow_fd = -1;
670 }
671
672 f->engine_pos = 0;
673 return ret;
674}
675
676int file_lookup_open(struct fio_file *f, int flags)
677{
678 struct fio_file *__f;
679 int from_hash;
680
681 __f = lookup_file_hash(f->file_name);
682 if (__f) {
683 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
684 f->lock = __f->lock;
685 from_hash = 1;
686 } else {
687 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
688 from_hash = 0;
689 }
690
691#ifdef WIN32
692 flags |= _O_BINARY;
693#endif
694
695 f->fd = open(f->file_name, flags, 0600);
696 return from_hash;
697}
698
699static int file_close_shadow_fds(struct thread_data *td)
700{
701 struct fio_file *f;
702 int num_closed = 0;
703 unsigned int i;
704
705 for_each_file(td, f, i) {
706 if (f->shadow_fd == -1)
707 continue;
708
709 close(f->shadow_fd);
710 f->shadow_fd = -1;
711 num_closed++;
712 }
713
714 return num_closed;
715}
716
717int generic_open_file(struct thread_data *td, struct fio_file *f)
718{
719 int is_std = 0;
720 int flags = 0;
721 int from_hash = 0;
722
723 dprint(FD_FILE, "fd open %s\n", f->file_name);
724
725 if (!strcmp(f->file_name, "-")) {
726 if (td_rw(td)) {
727 log_err("fio: can't read/write to stdin/out\n");
728 return 1;
729 }
730 is_std = 1;
731
732 /*
733 * move output logging to stderr, if we are writing to stdout
734 */
735 if (td_write(td))
736 f_out = stderr;
737 }
738
739 if (td->o.odirect)
740 flags |= OS_O_DIRECT;
741 flags |= td->o.sync_io;
742 if (td->o.create_on_open && td->o.allow_create)
743 flags |= O_CREAT;
744 if (f->filetype != FIO_TYPE_FILE)
745 flags |= FIO_O_NOATIME;
746
747open_again:
748 if (td_write(td)) {
749 if (!read_only)
750 flags |= O_RDWR;
751
752 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
753 flags |= O_CREAT;
754
755 if (is_std)
756 f->fd = dup(STDOUT_FILENO);
757 else
758 from_hash = file_lookup_open(f, flags);
759 } else if (td_read(td)) {
760 if (td_ioengine_flagged(td, FIO_RO_NEEDS_RW_OPEN) && !read_only)
761 flags |= O_RDWR;
762 else
763 flags |= O_RDONLY;
764
765 if (is_std)
766 f->fd = dup(STDIN_FILENO);
767 else
768 from_hash = file_lookup_open(f, flags);
769 } else if (td_trim(td)) {
770 assert(!td_rw(td)); /* should have matched above */
771 if (!read_only)
772 flags |= O_RDWR;
773 from_hash = file_lookup_open(f, flags);
774 }
775
776 if (f->fd == -1) {
777 char buf[FIO_VERROR_SIZE];
778 int __e = errno;
779
780 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
781 flags &= ~FIO_O_NOATIME;
782 goto open_again;
783 }
784 if (__e == EMFILE && file_close_shadow_fds(td))
785 goto open_again;
786
787 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
788
789 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
790 log_err("fio: looks like your file system does not " \
791 "support direct=1/buffered=0\n");
792 }
793
794 td_verror(td, __e, buf);
795 return 1;
796 }
797
798 if (!from_hash && f->fd != -1) {
799 if (add_file_hash(f)) {
800 int fio_unused ret;
801
802 /*
803 * Stash away descriptor for later close. This is to
804 * work-around a "feature" on Linux, where a close of
805 * an fd that has been opened for write will trigger
806 * udev to call blkid to check partitions, fs id, etc.
807 * That pollutes the device cache, which can slow down
808 * unbuffered accesses.
809 */
810 if (f->shadow_fd == -1)
811 f->shadow_fd = f->fd;
812 else {
813 /*
814 * OK to ignore, we haven't done anything
815 * with it
816 */
817 ret = generic_close_file(td, f);
818 }
819 goto open_again;
820 }
821 }
822
823 return 0;
824}
825
826/*
827 * This function i.e. get_file_size() is the default .get_file_size
828 * implementation of majority of I/O engines.
829 */
830int generic_get_file_size(struct thread_data *td, struct fio_file *f)
831{
832 return get_file_size(td, f);
833}
834
835/*
836 * open/close all files, so that ->real_file_size gets set
837 */
838static int get_file_sizes(struct thread_data *td)
839{
840 struct fio_file *f;
841 unsigned int i;
842 int err = 0;
843
844 for_each_file(td, f, i) {
845 dprint(FD_FILE, "get file size for %p/%d/%s\n", f, i,
846 f->file_name);
847
848 if (td_io_get_file_size(td, f)) {
849 if (td->error != ENOENT) {
850 log_err("%s\n", td->verror);
851 err = 1;
852 break;
853 }
854 clear_error(td);
855 }
856
857 /*
858 * There are corner cases where we end up with -1 for
859 * ->real_file_size due to unsupported file type, etc.
860 * We then just set to size option value divided by number
861 * of files, similar to the way file ->io_size is set.
862 * stat(2) failure doesn't set ->real_file_size to -1.
863 */
864 if (f->real_file_size == -1ULL && td->o.size)
865 f->real_file_size = td->o.size / td->o.nr_files;
866 }
867
868 return err;
869}
870
871struct fio_mount {
872 struct flist_head list;
873 const char *base;
874 char __base[256];
875 unsigned int key;
876};
877
878/*
879 * Get free number of bytes for each file on each unique mount.
880 */
881static unsigned long long get_fs_free_counts(struct thread_data *td)
882{
883 struct flist_head *n, *tmp;
884 unsigned long long ret = 0;
885 struct fio_mount *fm;
886 FLIST_HEAD(list);
887 struct fio_file *f;
888 unsigned int i;
889
890 for_each_file(td, f, i) {
891 struct stat sb;
892 char buf[256];
893
894 if (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_CHAR) {
895 if (f->real_file_size != -1ULL)
896 ret += f->real_file_size;
897 continue;
898 } else if (f->filetype != FIO_TYPE_FILE)
899 continue;
900
901 snprintf(buf, FIO_ARRAY_SIZE(buf), "%s", f->file_name);
902
903 if (stat(buf, &sb) < 0) {
904 if (errno != ENOENT)
905 break;
906 strcpy(buf, ".");
907 if (stat(buf, &sb) < 0)
908 break;
909 }
910
911 fm = NULL;
912 flist_for_each(n, &list) {
913 fm = flist_entry(n, struct fio_mount, list);
914 if (fm->key == sb.st_dev)
915 break;
916
917 fm = NULL;
918 }
919
920 if (fm)
921 continue;
922
923 fm = calloc(1, sizeof(*fm));
924 snprintf(fm->__base, FIO_ARRAY_SIZE(fm->__base), "%s", buf);
925 fm->base = basename(fm->__base);
926 fm->key = sb.st_dev;
927 flist_add(&fm->list, &list);
928 }
929
930 flist_for_each_safe(n, tmp, &list) {
931 unsigned long long sz;
932
933 fm = flist_entry(n, struct fio_mount, list);
934 flist_del(&fm->list);
935
936 sz = get_fs_free_size(fm->base);
937 if (sz && sz != -1ULL)
938 ret += sz;
939
940 free(fm);
941 }
942
943 return ret;
944}
945
946uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
947{
948 bool align = false;
949 struct thread_options *o = &td->o;
950 unsigned long long align_bs;
951 unsigned long long offset;
952 unsigned long long increment;
953
954 if (o->file_append && f->filetype == FIO_TYPE_FILE)
955 return f->real_file_size;
956
957 if (o->offset_increment_percent) {
958 assert(!o->offset_increment);
959 increment = o->offset_increment_percent * f->real_file_size / 100;
960 align = true;
961 } else
962 increment = o->offset_increment;
963
964 if (o->start_offset_percent > 0) {
965 /* calculate the raw offset */
966 offset = (f->real_file_size * o->start_offset_percent / 100) +
967 (td->subjob_number * increment);
968
969 align = true;
970 } else {
971 /* start_offset_percent not set */
972 offset = o->start_offset +
973 td->subjob_number * increment;
974 }
975
976 if (align) {
977 /*
978 * if offset_align is provided, use it
979 */
980 if (fio_option_is_set(o, start_offset_align)) {
981 align_bs = o->start_offset_align;
982 } else {
983 /* else take the minimum block size */
984 align_bs = td_min_bs(td);
985 }
986
987 /*
988 * block align the offset at the next available boundary at
989 * ceiling(offset / align_bs) * align_bs
990 */
991 offset = (offset / align_bs + (offset % align_bs != 0)) * align_bs;
992 }
993
994 return offset;
995}
996
997/*
998 * Find longest path component that exists and return its length
999 */
1000int longest_existing_path(char *path) {
1001 char buf[PATH_MAX];
1002 bool done;
1003 char *buf_pos;
1004 int offset;
1005#ifdef WIN32
1006 DWORD dwAttr;
1007#else
1008 struct stat sb;
1009#endif
1010
1011 sprintf(buf, "%s", path);
1012 done = false;
1013 while (!done) {
1014 buf_pos = strrchr(buf, FIO_OS_PATH_SEPARATOR);
1015 if (!buf_pos) {
1016 offset = 0;
1017 break;
1018 }
1019
1020 *(buf_pos + 1) = '\0';
1021
1022#ifdef WIN32
1023 dwAttr = GetFileAttributesA(buf);
1024 if (dwAttr != INVALID_FILE_ATTRIBUTES) {
1025 done = true;
1026 }
1027#else
1028 if (stat(buf, &sb) == 0)
1029 done = true;
1030#endif
1031 if (done)
1032 offset = buf_pos - buf;
1033 else
1034 *buf_pos = '\0';
1035 }
1036
1037 return offset;
1038}
1039
1040static bool create_work_dirs(struct thread_data *td, const char *fname)
1041{
1042 char path[PATH_MAX];
1043 char *start, *end;
1044 int offset;
1045
1046 snprintf(path, PATH_MAX, "%s", fname);
1047 start = path;
1048
1049 offset = longest_existing_path(path);
1050 end = start + offset;
1051 while ((end = strchr(end, FIO_OS_PATH_SEPARATOR)) != NULL) {
1052 if (end == start) {
1053 end++;
1054 continue;
1055 }
1056 *end = '\0';
1057 errno = 0;
1058 if (fio_mkdir(path, 0700) && errno != EEXIST) {
1059 log_err("fio: failed to create dir (%s): %s\n",
1060 start, strerror(errno));
1061 return false;
1062 }
1063 *end = FIO_OS_PATH_SEPARATOR;
1064 end++;
1065 }
1066 td->flags |= TD_F_DIRS_CREATED;
1067 return true;
1068}
1069
1070/*
1071 * Open the files and setup files sizes, creating files if necessary.
1072 */
1073int setup_files(struct thread_data *td)
1074{
1075 unsigned long long total_size, extend_size;
1076 struct thread_options *o = &td->o;
1077 struct fio_file *f;
1078 unsigned int i, nr_fs_extra = 0;
1079 int err = 0, need_extend;
1080 int old_state;
1081 const unsigned long long bs = td_min_bs(td);
1082 uint64_t fs = 0;
1083
1084 dprint(FD_FILE, "setup files\n");
1085
1086 old_state = td_bump_runstate(td, TD_SETTING_UP);
1087
1088 for_each_file(td, f, i) {
1089 if (!td_ioengine_flagged(td, FIO_DISKLESSIO) &&
1090 strchr(f->file_name, FIO_OS_PATH_SEPARATOR) &&
1091 !(td->flags & TD_F_DIRS_CREATED) &&
1092 !create_work_dirs(td, f->file_name))
1093 goto err_out;
1094 }
1095
1096 /*
1097 * Find out physical size of files or devices for this thread,
1098 * before we determine I/O size and range of our targets.
1099 * If ioengine defines a setup() method, it's responsible for
1100 * opening the files and setting f->real_file_size to indicate
1101 * the valid range for that file.
1102 */
1103 if (td->io_ops->setup)
1104 err = td->io_ops->setup(td);
1105 else
1106 err = get_file_sizes(td);
1107
1108 if (err)
1109 goto err_out;
1110
1111 if (td->o.zone_mode == ZONE_MODE_ZBD) {
1112 err = zbd_init_files(td);
1113 if (err)
1114 goto err_out;
1115 }
1116 zbd_recalc_options_with_zone_granularity(td);
1117
1118 if (o->read_iolog_file)
1119 goto done;
1120
1121 /*
1122 * check sizes. if the files/devices do not exist and the size
1123 * isn't passed to fio, abort.
1124 */
1125 total_size = 0;
1126 for_each_file(td, f, i) {
1127 f->fileno = i;
1128 if (f->real_file_size == -1ULL)
1129 total_size = -1ULL;
1130 else
1131 total_size += f->real_file_size;
1132 }
1133
1134 if (o->fill_device)
1135 td->fill_device_size = get_fs_free_counts(td);
1136
1137 /*
1138 * device/file sizes are zero and no size given, punt
1139 */
1140 if ((!total_size || total_size == -1ULL) && !o->size &&
1141 !td_ioengine_flagged(td, FIO_NOIO) && !o->fill_device &&
1142 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
1143 log_err("%s: you need to specify size=\n", o->name);
1144 td_verror(td, EINVAL, "total_file_size");
1145 goto err_out;
1146 }
1147
1148 /*
1149 * Calculate per-file size and potential extra size for the
1150 * first files, if needed (i.e. if we don't have a fixed size).
1151 */
1152 if (!o->file_size_low && o->nr_files) {
1153 uint64_t all_fs;
1154
1155 fs = o->size / o->nr_files;
1156 all_fs = fs * o->nr_files;
1157
1158 if (all_fs < o->size)
1159 nr_fs_extra = (o->size - all_fs) / bs;
1160 }
1161
1162 /*
1163 * now file sizes are known, so we can set ->io_size. if size= is
1164 * not given, ->io_size is just equal to ->real_file_size. if size
1165 * is given, ->io_size is size / nr_files.
1166 */
1167 extend_size = total_size = 0;
1168 need_extend = 0;
1169 for_each_file(td, f, i) {
1170 f->file_offset = get_start_offset(td, f);
1171
1172 /*
1173 * Update ->io_size depending on options specified.
1174 * ->file_size_low being 0 means filesize option isn't set.
1175 * Non zero ->file_size_low equals ->file_size_high means
1176 * filesize option is set in a fixed size format.
1177 * Non zero ->file_size_low not equals ->file_size_high means
1178 * filesize option is set in a range format.
1179 */
1180 if (!o->file_size_low) {
1181 /*
1182 * no file size or range given, file size is equal to
1183 * total size divided by number of files. If the size
1184 * doesn't divide nicely with the min blocksize,
1185 * make the first files bigger.
1186 */
1187 f->io_size = fs;
1188 if (nr_fs_extra) {
1189 nr_fs_extra--;
1190 f->io_size += bs;
1191 }
1192
1193 /*
1194 * We normally don't come here for regular files, but
1195 * if the result is 0 for a regular file, set it to the
1196 * real file size. This could be size of the existing
1197 * one if it already exists, but otherwise will be set
1198 * to 0. A new file won't be created because
1199 * ->io_size + ->file_offset equals ->real_file_size.
1200 */
1201 if (!f->io_size) {
1202 if (f->file_offset > f->real_file_size)
1203 goto err_offset;
1204 f->io_size = f->real_file_size - f->file_offset;
1205 if (!f->io_size)
1206 log_info("fio: file %s may be ignored\n",
1207 f->file_name);
1208 }
1209 } else if (f->real_file_size < o->file_size_low ||
1210 f->real_file_size > o->file_size_high) {
1211 if (f->file_offset > o->file_size_low)
1212 goto err_offset;
1213 /*
1214 * file size given. if it's fixed, use that. if it's a
1215 * range, generate a random size in-between.
1216 */
1217 if (o->file_size_low == o->file_size_high)
1218 f->io_size = o->file_size_low - f->file_offset;
1219 else {
1220 f->io_size = get_rand_file_size(td)
1221 - f->file_offset;
1222 }
1223 } else
1224 f->io_size = f->real_file_size - f->file_offset;
1225
1226 if (f->io_size == -1ULL)
1227 total_size = -1ULL;
1228 else {
1229 uint64_t io_size;
1230
1231 if (o->size_percent && o->size_percent != 100) {
1232 uint64_t file_size;
1233
1234 file_size = f->io_size + f->file_offset;
1235 f->io_size = (file_size *
1236 o->size_percent) / 100;
1237 if (f->io_size > (file_size - f->file_offset))
1238 f->io_size = file_size - f->file_offset;
1239
1240 f->io_size -= (f->io_size % td_min_bs(td));
1241 }
1242
1243 io_size = f->io_size;
1244 if (o->io_size_percent && o->io_size_percent != 100) {
1245 io_size *= o->io_size_percent;
1246 io_size /= 100;
1247 }
1248
1249 total_size += io_size;
1250 }
1251
1252 if (f->filetype == FIO_TYPE_FILE &&
1253 (f->io_size + f->file_offset) > f->real_file_size) {
1254 if (!td_ioengine_flagged(td, FIO_DISKLESSIO) &&
1255 !o->create_on_open) {
1256 need_extend++;
1257 extend_size += (f->io_size + f->file_offset);
1258 fio_file_set_extend(f);
1259 } else if (!td_ioengine_flagged(td, FIO_DISKLESSIO) ||
1260 (td_ioengine_flagged(td, FIO_DISKLESSIO) &&
1261 td_ioengine_flagged(td, FIO_FAKEIO)))
1262 f->real_file_size = f->io_size + f->file_offset;
1263 }
1264 }
1265
1266 if (td->o.block_error_hist) {
1267 int len;
1268
1269 assert(td->o.nr_files == 1); /* checked in fixup_options */
1270 f = td->files[0];
1271 len = f->io_size / td->o.bs[DDIR_TRIM];
1272 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
1273 log_err("fio: cannot calculate block histogram with "
1274 "%d trim blocks, maximum %d\n",
1275 len, MAX_NR_BLOCK_INFOS);
1276 td_verror(td, EINVAL, "block_error_hist");
1277 goto err_out;
1278 }
1279
1280 td->ts.nr_block_infos = len;
1281 for (i = 0; i < len; i++)
1282 td->ts.block_infos[i] =
1283 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
1284 } else
1285 td->ts.nr_block_infos = 0;
1286
1287 if (!o->size || (total_size && o->size > total_size))
1288 o->size = total_size;
1289
1290 if (o->size < td_min_bs(td)) {
1291 log_err("fio: blocksize is larger than data set range\n");
1292 goto err_out;
1293 }
1294
1295 /*
1296 * See if we need to extend some files, typically needed when our
1297 * target regular files don't exist yet, but our jobs require them
1298 * initially due to read I/Os.
1299 */
1300 if (need_extend) {
1301 temp_stall_ts = 1;
1302 if (output_format & FIO_OUTPUT_NORMAL) {
1303 log_info("%s: Laying out IO file%s (%u file%s / %s%lluMiB)\n",
1304 o->name,
1305 need_extend > 1 ? "s" : "",
1306 need_extend,
1307 need_extend > 1 ? "s" : "",
1308 need_extend > 1 ? "total " : "",
1309 extend_size >> 20);
1310 }
1311
1312 for_each_file(td, f, i) {
1313 unsigned long long old_len = -1ULL, extend_len = -1ULL;
1314
1315 if (!fio_file_extend(f))
1316 continue;
1317
1318 assert(f->filetype == FIO_TYPE_FILE);
1319 fio_file_clear_extend(f);
1320 if (!o->fill_device) {
1321 old_len = f->real_file_size;
1322 extend_len = f->io_size + f->file_offset -
1323 old_len;
1324 }
1325 f->real_file_size = (f->io_size + f->file_offset);
1326 err = extend_file(td, f);
1327 if (err)
1328 break;
1329
1330 err = __file_invalidate_cache(td, f, old_len,
1331 extend_len);
1332
1333 /*
1334 * Shut up static checker
1335 */
1336 if (f->fd != -1)
1337 close(f->fd);
1338
1339 f->fd = -1;
1340 if (err)
1341 break;
1342 }
1343 temp_stall_ts = 0;
1344 }
1345
1346 if (err)
1347 goto err_out;
1348
1349 /*
1350 * Prepopulate files with data. It might be expected to read some
1351 * "real" data instead of zero'ed files (if no writes to file occurred
1352 * prior to a read job). Engine has to provide a way to do that.
1353 */
1354 if (td->io_ops->prepopulate_file) {
1355 temp_stall_ts = 1;
1356
1357 for_each_file(td, f, i) {
1358 if (output_format & FIO_OUTPUT_NORMAL) {
1359 log_info("%s: Prepopulating IO file (%s)\n",
1360 o->name, f->file_name);
1361 }
1362
1363 err = td->io_ops->prepopulate_file(td, f);
1364 if (err)
1365 break;
1366
1367 err = __file_invalidate_cache(td, f, f->file_offset,
1368 f->io_size);
1369
1370 /*
1371 * Shut up static checker
1372 */
1373 if (f->fd != -1)
1374 close(f->fd);
1375
1376 f->fd = -1;
1377 if (err)
1378 break;
1379 }
1380 temp_stall_ts = 0;
1381 }
1382
1383 if (err)
1384 goto err_out;
1385
1386 /*
1387 * iolog already set the total io size, if we read back
1388 * stored entries.
1389 */
1390 if (!o->read_iolog_file) {
1391 if (o->io_size)
1392 td->total_io_size = o->io_size * o->loops;
1393 else
1394 td->total_io_size = o->size * o->loops;
1395 }
1396
1397done:
1398 if (td->o.zone_mode == ZONE_MODE_ZBD) {
1399 err = zbd_setup_files(td);
1400 if (err)
1401 goto err_out;
1402 }
1403
1404 if (o->create_only)
1405 td->done = 1;
1406
1407 td_restore_runstate(td, old_state);
1408
1409 if (td->o.fdp) {
1410 err = fdp_init(td);
1411 if (err)
1412 goto err_out;
1413 }
1414
1415 return 0;
1416
1417err_offset:
1418 log_err("%s: you need to specify valid offset=\n", o->name);
1419err_out:
1420 td_restore_runstate(td, old_state);
1421 return 1;
1422}
1423
1424bool pre_read_files(struct thread_data *td)
1425{
1426 struct fio_file *f;
1427 unsigned int i;
1428
1429 dprint(FD_FILE, "pre_read files\n");
1430
1431 for_each_file(td, f, i) {
1432 if (!pre_read_file(td, f))
1433 return false;
1434 }
1435
1436 return true;
1437}
1438
1439static void __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1440{
1441 unsigned int range_size, seed;
1442 uint64_t nranges;
1443 uint64_t fsize;
1444
1445 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1446 fsize = min(f->real_file_size, f->io_size);
1447
1448 nranges = (fsize + range_size - 1ULL) / range_size;
1449
1450 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1451 if (!td->o.rand_repeatable)
1452 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1453
1454 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1455 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, td->o.random_center.u.f, seed);
1456 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1457 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, td->o.random_center.u.f, seed);
1458 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1459 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, td->o.random_center.u.f, seed);
1460}
1461
1462static bool init_rand_distribution(struct thread_data *td)
1463{
1464 struct fio_file *f;
1465 unsigned int i;
1466 int state;
1467
1468 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM ||
1469 td->o.random_distribution == FIO_RAND_DIST_ZONED ||
1470 td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS)
1471 return false;
1472
1473 state = td_bump_runstate(td, TD_SETTING_UP);
1474
1475 for_each_file(td, f, i)
1476 __init_rand_distribution(td, f);
1477
1478 td_restore_runstate(td, state);
1479 return true;
1480}
1481
1482/*
1483 * Check if the number of blocks exceeds the randomness capability of
1484 * the selected generator. Tausworthe is 32-bit, the others are fully
1485 * 64-bit capable.
1486 */
1487static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1488 uint64_t blocks)
1489{
1490 if (blocks <= FRAND32_MAX)
1491 return 0;
1492 if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1493 return 0;
1494
1495 /*
1496 * If the user hasn't specified a random generator, switch
1497 * to tausworthe64 with informational warning. If the user did
1498 * specify one, just warn.
1499 */
1500 log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1501 f->file_name);
1502
1503 if (!fio_option_is_set(&td->o, random_generator)) {
1504 log_info("fio: Switching to tausworthe64. Use the "
1505 "random_generator= option to get rid of this "
1506 "warning.\n");
1507 td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1508 return 0;
1509 }
1510
1511 /*
1512 * Just make this information to avoid breaking scripts.
1513 */
1514 log_info("fio: Use the random_generator= option to switch to lfsr or "
1515 "tausworthe64.\n");
1516 return 0;
1517}
1518
1519bool init_random_map(struct thread_data *td)
1520{
1521 unsigned long long blocks;
1522 struct fio_file *f;
1523 unsigned int i;
1524
1525 if (init_rand_distribution(td))
1526 return true;
1527 if (!td_random(td))
1528 return true;
1529
1530 for_each_file(td, f, i) {
1531 uint64_t fsize = min(f->real_file_size, f->io_size);
1532
1533 if (td->o.zone_mode == ZONE_MODE_STRIDED)
1534 fsize = td->o.zone_range;
1535
1536 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1537
1538 if (check_rand_gen_limits(td, f, blocks))
1539 return false;
1540
1541 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1542 uint64_t seed;
1543
1544 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1545
1546 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1547 fio_file_set_lfsr(f);
1548 continue;
1549 } else {
1550 log_err("fio: failed initializing LFSR\n");
1551 return false;
1552 }
1553 } else if (!td->o.norandommap) {
1554 f->io_axmap = axmap_new(blocks);
1555 if (f->io_axmap) {
1556 fio_file_set_axmap(f);
1557 continue;
1558 }
1559 } else if (td->o.norandommap)
1560 continue;
1561
1562 if (!td->o.softrandommap) {
1563 log_err("fio: failed allocating random map. If running"
1564 " a large number of jobs, try the 'norandommap'"
1565 " option or set 'softrandommap'. Or give"
1566 " a larger --alloc-size to fio.\n");
1567 return false;
1568 }
1569
1570 log_info("fio: file %s failed allocating random map. Running "
1571 "job without.\n", f->file_name);
1572 }
1573
1574 return true;
1575}
1576
1577void close_files(struct thread_data *td)
1578{
1579 struct fio_file *f;
1580 unsigned int i;
1581
1582 for_each_file(td, f, i) {
1583 if (fio_file_open(f))
1584 td_io_close_file(td, f);
1585 }
1586}
1587
1588void fio_file_free(struct fio_file *f)
1589{
1590 if (fio_file_axmap(f))
1591 axmap_free(f->io_axmap);
1592 if (f->ruhs_info)
1593 sfree(f->ruhs_info);
1594 if (!fio_file_smalloc(f)) {
1595 free(f->file_name);
1596 free(f);
1597 } else {
1598 sfree(f->file_name);
1599 sfree(f);
1600 }
1601}
1602
1603void close_and_free_files(struct thread_data *td)
1604{
1605 struct fio_file *f;
1606 unsigned int i;
1607
1608 dprint(FD_FILE, "close files\n");
1609
1610 for_each_file(td, f, i) {
1611 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1612 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1613 td_io_unlink_file(td, f);
1614 }
1615
1616 if (fio_file_open(f))
1617 td_io_close_file(td, f);
1618
1619 remove_file_hash(f);
1620
1621 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1622 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1623 td_io_unlink_file(td, f);
1624 }
1625
1626 zbd_close_file(f);
1627 fdp_free_ruhs_info(f);
1628 fio_file_free(f);
1629 }
1630
1631 td->o.filename = NULL;
1632 free(td->files);
1633 free(td->file_locks);
1634 td->files_index = 0;
1635 td->files = NULL;
1636 td->file_locks = NULL;
1637 td->o.file_lock_mode = FILE_LOCK_NONE;
1638 td->o.nr_files = 0;
1639}
1640
1641static void get_file_type(struct fio_file *f)
1642{
1643 struct stat sb;
1644
1645 if (!strcmp(f->file_name, "-"))
1646 f->filetype = FIO_TYPE_PIPE;
1647 else
1648 f->filetype = FIO_TYPE_FILE;
1649
1650#ifdef WIN32
1651 /* \\.\ is the device namespace in Windows, where every file is
1652 * a block device */
1653 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1654 f->filetype = FIO_TYPE_BLOCK;
1655#endif
1656
1657 if (!stat(f->file_name, &sb)) {
1658 if (S_ISBLK(sb.st_mode))
1659 f->filetype = FIO_TYPE_BLOCK;
1660 else if (S_ISCHR(sb.st_mode))
1661 f->filetype = FIO_TYPE_CHAR;
1662 else if (S_ISFIFO(sb.st_mode))
1663 f->filetype = FIO_TYPE_PIPE;
1664 }
1665}
1666
1667static bool __is_already_allocated(const char *fname, bool set)
1668{
1669 struct flist_head *entry;
1670 bool ret;
1671
1672 ret = file_bloom_exists(fname, set);
1673 if (!ret)
1674 return ret;
1675
1676 flist_for_each(entry, &filename_list) {
1677 struct file_name *fn;
1678
1679 fn = flist_entry(entry, struct file_name, list);
1680
1681 if (!strcmp(fn->filename, fname))
1682 return true;
1683 }
1684
1685 return false;
1686}
1687
1688static bool is_already_allocated(const char *fname)
1689{
1690 bool ret;
1691
1692 fio_file_hash_lock();
1693 ret = __is_already_allocated(fname, false);
1694 fio_file_hash_unlock();
1695
1696 return ret;
1697}
1698
1699static void set_already_allocated(const char *fname)
1700{
1701 struct file_name *fn;
1702
1703 fn = malloc(sizeof(struct file_name));
1704 fn->filename = strdup(fname);
1705
1706 fio_file_hash_lock();
1707 if (!__is_already_allocated(fname, true)) {
1708 flist_add_tail(&fn->list, &filename_list);
1709 fn = NULL;
1710 }
1711 fio_file_hash_unlock();
1712
1713 if (fn) {
1714 free(fn->filename);
1715 free(fn);
1716 }
1717}
1718
1719static void free_already_allocated(void)
1720{
1721 struct flist_head *entry, *tmp;
1722 struct file_name *fn;
1723
1724 if (flist_empty(&filename_list))
1725 return;
1726
1727 fio_file_hash_lock();
1728 flist_for_each_safe(entry, tmp, &filename_list) {
1729 fn = flist_entry(entry, struct file_name, list);
1730 free(fn->filename);
1731 flist_del(&fn->list);
1732 free(fn);
1733 }
1734
1735 fio_file_hash_unlock();
1736}
1737
1738static struct fio_file *alloc_new_file(struct thread_data *td)
1739{
1740 struct fio_file *f;
1741
1742 if (td_ioengine_flagged(td, FIO_NOFILEHASH))
1743 f = calloc(1, sizeof(*f));
1744 else
1745 f = scalloc(1, sizeof(*f));
1746 if (!f) {
1747 assert(0);
1748 return NULL;
1749 }
1750
1751 f->fd = -1;
1752 f->shadow_fd = -1;
1753 fio_file_reset(td, f);
1754 if (!td_ioengine_flagged(td, FIO_NOFILEHASH))
1755 fio_file_set_smalloc(f);
1756 return f;
1757}
1758
1759bool exists_and_not_regfile(const char *filename)
1760{
1761 struct stat sb;
1762
1763 if (lstat(filename, &sb) == -1)
1764 return false;
1765
1766#ifndef WIN32 /* NOT Windows */
1767 if (S_ISREG(sb.st_mode))
1768 return false;
1769#else
1770 /* \\.\ is the device namespace in Windows, where every file
1771 * is a device node */
1772 if (S_ISREG(sb.st_mode) && strncmp(filename, "\\\\.\\", 4) != 0)
1773 return false;
1774#endif
1775
1776 return true;
1777}
1778
1779int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1780{
1781 int cur_files = td->files_index;
1782 char file_name[PATH_MAX];
1783 struct fio_file *f;
1784 int len = 0;
1785
1786 dprint(FD_FILE, "add file %s\n", fname);
1787
1788 if (td->o.directory)
1789 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob,
1790 td->o.unique_filename);
1791
1792 sprintf(file_name + len, "%s", fname);
1793
1794 /* clean cloned siblings using existing files */
1795 if (numjob && is_already_allocated(file_name) &&
1796 !exists_and_not_regfile(fname))
1797 return 0;
1798
1799 f = alloc_new_file(td);
1800
1801 if (td->files_size <= td->files_index) {
1802 unsigned int new_size = td->o.nr_files + 1;
1803
1804 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1805
1806 td->files = realloc(td->files, new_size * sizeof(f));
1807 if (td->files == NULL) {
1808 log_err("fio: realloc OOM\n");
1809 assert(0);
1810 }
1811 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1812 td->file_locks = realloc(td->file_locks, new_size);
1813 if (!td->file_locks) {
1814 log_err("fio: realloc OOM\n");
1815 assert(0);
1816 }
1817 td->file_locks[cur_files] = FILE_LOCK_NONE;
1818 }
1819 td->files_size = new_size;
1820 }
1821 td->files[cur_files] = f;
1822 f->fileno = cur_files;
1823
1824 /*
1825 * init function, io engine may not be loaded yet
1826 */
1827 if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
1828 f->real_file_size = -1ULL;
1829
1830 if (td_ioengine_flagged(td, FIO_NOFILEHASH))
1831 f->file_name = strdup(file_name);
1832 else
1833 f->file_name = smalloc_strdup(file_name);
1834
1835 /* can't handle smalloc failure from here */
1836 assert(f->file_name);
1837
1838 get_file_type(f);
1839
1840 switch (td->o.file_lock_mode) {
1841 case FILE_LOCK_NONE:
1842 break;
1843 case FILE_LOCK_READWRITE:
1844 f->rwlock = fio_rwlock_init();
1845 break;
1846 case FILE_LOCK_EXCLUSIVE:
1847 f->lock = fio_sem_init(FIO_SEM_UNLOCKED);
1848 break;
1849 default:
1850 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1851 assert(0);
1852 }
1853
1854 td->files_index++;
1855
1856 if (td->o.numjobs > 1)
1857 set_already_allocated(file_name);
1858
1859 if (inc)
1860 td->o.nr_files++;
1861
1862 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1863 cur_files);
1864
1865 return cur_files;
1866}
1867
1868int add_file_exclusive(struct thread_data *td, const char *fname)
1869{
1870 struct fio_file *f;
1871 unsigned int i;
1872
1873 for_each_file(td, f, i) {
1874 if (!strcmp(f->file_name, fname))
1875 return i;
1876 }
1877
1878 return add_file(td, fname, 0, 1);
1879}
1880
1881void get_file(struct fio_file *f)
1882{
1883 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1884 assert(fio_file_open(f));
1885 f->references++;
1886}
1887
1888int put_file(struct thread_data *td, struct fio_file *f)
1889{
1890 int f_ret = 0, ret = 0;
1891
1892 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1893
1894 if (!fio_file_open(f)) {
1895 assert(f->fd == -1);
1896 return 0;
1897 }
1898
1899 assert(f->references);
1900 if (--f->references)
1901 return 0;
1902
1903 disk_util_dec(f->du);
1904
1905 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1906 unlock_file_all(td, f);
1907
1908 if (should_fsync(td) && td->o.fsync_on_close) {
1909 f_ret = fsync(f->fd);
1910 if (f_ret < 0)
1911 f_ret = errno;
1912 }
1913
1914 if (td->io_ops->close_file)
1915 ret = td->io_ops->close_file(td, f);
1916
1917 if (!ret)
1918 ret = f_ret;
1919
1920 td->nr_open_files--;
1921 fio_file_clear_closing(f);
1922 fio_file_clear_open(f);
1923 assert(f->fd == -1);
1924 return ret;
1925}
1926
1927void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1928{
1929 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1930 return;
1931
1932 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1933 if (ddir == DDIR_READ)
1934 fio_rwlock_read(f->rwlock);
1935 else
1936 fio_rwlock_write(f->rwlock);
1937 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1938 fio_sem_down(f->lock);
1939
1940 td->file_locks[f->fileno] = td->o.file_lock_mode;
1941}
1942
1943void unlock_file(struct thread_data *td, struct fio_file *f)
1944{
1945 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1946 return;
1947
1948 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1949 fio_rwlock_unlock(f->rwlock);
1950 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1951 fio_sem_up(f->lock);
1952
1953 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1954}
1955
1956void unlock_file_all(struct thread_data *td, struct fio_file *f)
1957{
1958 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1959 return;
1960 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1961 unlock_file(td, f);
1962}
1963
1964static bool recurse_dir(struct thread_data *td, const char *dirname)
1965{
1966 struct dirent *dir;
1967 bool ret = false;
1968 DIR *D;
1969
1970 D = opendir(dirname);
1971 if (!D) {
1972 char buf[FIO_VERROR_SIZE];
1973
1974 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1975 td_verror(td, errno, buf);
1976 return true;
1977 }
1978
1979 while ((dir = readdir(D)) != NULL) {
1980 char full_path[PATH_MAX];
1981 struct stat sb;
1982
1983 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1984 continue;
1985
1986 sprintf(full_path, "%s%c%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1987
1988 if (lstat(full_path, &sb) == -1) {
1989 if (errno != ENOENT) {
1990 td_verror(td, errno, "stat");
1991 ret = true;
1992 break;
1993 }
1994 }
1995
1996 if (S_ISREG(sb.st_mode)) {
1997 add_file(td, full_path, 0, 1);
1998 continue;
1999 }
2000 if (!S_ISDIR(sb.st_mode))
2001 continue;
2002
2003 ret = recurse_dir(td, full_path);
2004 if (ret)
2005 break;
2006 }
2007
2008 closedir(D);
2009 return ret;
2010}
2011
2012int add_dir_files(struct thread_data *td, const char *path)
2013{
2014 int ret = recurse_dir(td, path);
2015
2016 if (!ret)
2017 log_info("fio: opendir added %d files\n", td->o.nr_files);
2018
2019 return ret;
2020}
2021
2022void dup_files(struct thread_data *td, struct thread_data *org)
2023{
2024 struct fio_file *f;
2025 unsigned int i;
2026
2027 dprint(FD_FILE, "dup files: %d\n", org->files_index);
2028
2029 if (!org->files)
2030 return;
2031
2032 td->files = calloc(org->files_index, sizeof(f));
2033
2034 if (td->o.file_lock_mode != FILE_LOCK_NONE)
2035 td->file_locks = malloc(org->files_index);
2036
2037 assert(org->files_index >= org->o.nr_files);
2038 for_each_file(org, f, i) {
2039 struct fio_file *__f;
2040
2041 __f = alloc_new_file(td);
2042
2043 if (f->file_name) {
2044 if (td_ioengine_flagged(td, FIO_NOFILEHASH))
2045 __f->file_name = strdup(f->file_name);
2046 else
2047 __f->file_name = smalloc_strdup(f->file_name);
2048
2049 /* can't handle smalloc failure from here */
2050 assert(__f->file_name);
2051 __f->filetype = f->filetype;
2052 }
2053
2054 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
2055 __f->lock = f->lock;
2056 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
2057 __f->rwlock = f->rwlock;
2058
2059 td->files[i] = __f;
2060 }
2061}
2062
2063/*
2064 * Returns the index that matches the filename, or -1 if not there
2065 */
2066int get_fileno(struct thread_data *td, const char *fname)
2067{
2068 struct fio_file *f;
2069 unsigned int i;
2070
2071 for_each_file(td, f, i)
2072 if (!strcmp(f->file_name, fname))
2073 return i;
2074
2075 return -1;
2076}
2077
2078/*
2079 * For log usage, where we add/open/close files automatically
2080 */
2081void free_release_files(struct thread_data *td)
2082{
2083 close_files(td);
2084 td->o.nr_files = 0;
2085 td->o.open_files = 0;
2086 td->files_index = 0;
2087}
2088
2089void fio_file_reset(struct thread_data *td, struct fio_file *f)
2090{
2091 int i;
2092
2093 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2094 f->last_pos[i] = f->file_offset;
2095 f->last_start[i] = -1ULL;
2096 }
2097
2098 if (fio_file_axmap(f))
2099 axmap_reset(f->io_axmap);
2100 else if (fio_file_lfsr(f))
2101 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
2102
2103 zbd_file_reset(td, f);
2104}
2105
2106bool fio_files_done(struct thread_data *td)
2107{
2108 struct fio_file *f;
2109 unsigned int i;
2110
2111 for_each_file(td, f, i)
2112 if (!fio_file_done(f))
2113 return false;
2114
2115 return true;
2116}
2117
2118/* free memory used in initialization phase only */
2119void filesetup_mem_free(void)
2120{
2121 free_already_allocated();
2122}
2123
2124/*
2125 * This function is for platforms which support direct I/O but not O_DIRECT.
2126 */
2127int fio_set_directio(struct thread_data *td, struct fio_file *f)
2128{
2129#ifdef FIO_OS_DIRECTIO
2130 int ret = fio_set_odirect(f);
2131
2132 if (ret) {
2133 td_verror(td, ret, "fio_set_directio");
2134#if defined(__sun__)
2135 if (ret == ENOTTY) { /* ENOTTY suggests RAW device or ZFS */
2136 log_err("fio: doing directIO to RAW devices or ZFS not supported\n");
2137 } else {
2138 log_err("fio: the file system does not seem to support direct IO\n");
2139 }
2140#else
2141 log_err("fio: the file system does not seem to support direct IO\n");
2142#endif
2143 return -1;
2144 }
2145
2146 return 0;
2147#else
2148 log_err("fio: direct IO is not supported on this host operating system\n");
2149 return -1;
2150#endif
2151}