change fio_set_odirect() prototype not to use int fd
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18#include "lib/memalign.h"
19
20#ifdef CONFIG_LINUX_FALLOCATE
21#include <linux/falloc.h>
22#endif
23
24static int root_warn;
25
26static FLIST_HEAD(filename_list);
27
28/*
29 * List entry for filename_list
30 */
31struct file_name {
32 struct flist_head list;
33 char *filename;
34};
35
36static inline void clear_error(struct thread_data *td)
37{
38 td->error = 0;
39 td->verror[0] = '\0';
40}
41
42static inline int native_fallocate(struct thread_data *td, struct fio_file *f)
43{
44 bool success;
45
46 success = fio_fallocate(f, 0, f->real_file_size);
47 dprint(FD_FILE, "native fallocate of file %s size %llu was "
48 "%ssuccessful\n", f->file_name,
49 (unsigned long long) f->real_file_size,
50 !success ? "un": "");
51
52 if (success)
53 return 0;
54
55 if (errno == ENOSYS)
56 dprint(FD_FILE, "native fallocate is not implemented\n");
57
58 return -1;
59}
60
61static void fallocate_file(struct thread_data *td, struct fio_file *f)
62{
63 int r;
64
65 if (td->o.fill_device)
66 return;
67
68 switch (td->o.fallocate_mode) {
69 case FIO_FALLOCATE_NATIVE:
70 r = native_fallocate(td, f);
71 if (r != 0 && errno != ENOSYS)
72 log_err("fio: native_fallocate call failed: %s\n",
73 strerror(errno));
74 break;
75 case FIO_FALLOCATE_NONE:
76 break;
77#ifdef CONFIG_POSIX_FALLOCATE
78 case FIO_FALLOCATE_POSIX:
79 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
80 f->file_name,
81 (unsigned long long) f->real_file_size);
82
83 r = posix_fallocate(f->fd, 0, f->real_file_size);
84 if (r > 0)
85 log_err("fio: posix_fallocate fails: %s\n", strerror(r));
86 break;
87#endif /* CONFIG_POSIX_FALLOCATE */
88#ifdef CONFIG_LINUX_FALLOCATE
89 case FIO_FALLOCATE_KEEP_SIZE:
90 dprint(FD_FILE, "fallocate(FALLOC_FL_KEEP_SIZE) "
91 "file %s size %llu\n", f->file_name,
92 (unsigned long long) f->real_file_size);
93
94 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0, f->real_file_size);
95 if (r != 0)
96 td_verror(td, errno, "fallocate");
97
98 break;
99#endif /* CONFIG_LINUX_FALLOCATE */
100 default:
101 log_err("fio: unknown fallocate mode: %d\n", td->o.fallocate_mode);
102 assert(0);
103 }
104}
105
106/*
107 * Leaves f->fd open on success, caller must close
108 */
109static int extend_file(struct thread_data *td, struct fio_file *f)
110{
111 int new_layout = 0, unlink_file = 0, flags;
112 unsigned long long left;
113 unsigned int bs;
114 char *b = NULL;
115
116 if (read_only) {
117 log_err("fio: refusing extend of file due to read-only\n");
118 return 0;
119 }
120
121 /*
122 * check if we need to lay the file out complete again. fio
123 * does that for operations involving reads, or for writes
124 * where overwrite is set
125 */
126 if (td_read(td) ||
127 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
128 (td_write(td) && td_ioengine_flagged(td, FIO_NOEXTEND)))
129 new_layout = 1;
130 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
131 unlink_file = 1;
132
133 if (unlink_file || new_layout) {
134 int ret;
135
136 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
137
138 ret = td_io_unlink_file(td, f);
139 if (ret != 0 && ret != ENOENT) {
140 td_verror(td, errno, "unlink");
141 return 1;
142 }
143 }
144
145 flags = O_WRONLY;
146 if (td->o.allow_create)
147 flags |= O_CREAT;
148 if (new_layout)
149 flags |= O_TRUNC;
150 if (td->o.odirect)
151 flags |= OS_O_DIRECT;
152
153#ifdef WIN32
154 flags |= _O_BINARY;
155#endif
156
157 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
158 f->fd = open(f->file_name, flags, 0644);
159 if (f->fd < 0) {
160 int err = errno;
161
162 if (err == ENOENT && !td->o.allow_create)
163 log_err("fio: file creation disallowed by "
164 "allow_file_create=0\n");
165 else {
166 if (err == EINVAL && (flags & OS_O_DIRECT))
167 log_err("fio: looks like your filesystem "
168 "does not support "
169 "direct=1/buffered=0\n");
170
171 td_verror(td, err, "open");
172 }
173 return 1;
174 }
175
176 fallocate_file(td, f);
177
178 /*
179 * If our jobs don't require regular files initially, we're done.
180 */
181 if (!new_layout)
182 goto done;
183
184 /*
185 * The size will be -1ULL when fill_device is used, so don't truncate
186 * or fallocate this file, just write it
187 */
188 if (!td->o.fill_device) {
189 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
190 (unsigned long long) f->real_file_size);
191 if (ftruncate(f->fd, f->real_file_size) == -1) {
192 if (errno != EFBIG) {
193 td_verror(td, errno, "ftruncate");
194 goto err;
195 }
196 }
197 }
198
199 if (td->o.odirect && !OS_O_DIRECT && fio_set_directio(td, f))
200 goto err;
201
202 left = f->real_file_size;
203 bs = td->o.max_bs[DDIR_WRITE];
204 if (bs > left)
205 bs = left;
206
207 b = fio_memalign(page_size, bs);
208 if (!b) {
209 td_verror(td, errno, "fio_memalign");
210 goto err;
211 }
212
213 while (left && !td->terminate) {
214 ssize_t r;
215
216 if (bs > left)
217 bs = left;
218
219 fill_io_buffer(td, b, bs, bs);
220
221 r = write(f->fd, b, bs);
222
223 if (r > 0) {
224 left -= r;
225 continue;
226 } else {
227 if (r < 0) {
228 int __e = errno;
229
230 if (__e == ENOSPC) {
231 if (td->o.fill_device)
232 break;
233 log_info("fio: ENOSPC on laying out "
234 "file, stopping\n");
235 break;
236 }
237 td_verror(td, errno, "write");
238 } else
239 td_verror(td, EIO, "write");
240
241 break;
242 }
243 }
244
245 if (td->terminate) {
246 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
247 td_io_unlink_file(td, f);
248 } else if (td->o.create_fsync) {
249 if (fsync(f->fd) < 0) {
250 td_verror(td, errno, "fsync");
251 goto err;
252 }
253 }
254 if (td->o.fill_device && !td_write(td)) {
255 fio_file_clear_size_known(f);
256 if (td_io_get_file_size(td, f))
257 goto err;
258 if (f->io_size > f->real_file_size)
259 f->io_size = f->real_file_size;
260 }
261
262 fio_memfree(b, bs);
263done:
264 return 0;
265err:
266 close(f->fd);
267 f->fd = -1;
268 if (b)
269 fio_memfree(b, bs);
270 return 1;
271}
272
273static int pre_read_file(struct thread_data *td, struct fio_file *f)
274{
275 int ret = 0, r, did_open = 0, old_runstate;
276 unsigned long long left;
277 unsigned int bs;
278 char *b;
279
280 if (td_ioengine_flagged(td, FIO_PIPEIO) ||
281 td_ioengine_flagged(td, FIO_NOIO))
282 return 0;
283
284 if (f->filetype == FIO_TYPE_CHAR)
285 return 0;
286
287 if (!fio_file_open(f)) {
288 if (td->io_ops->open_file(td, f)) {
289 log_err("fio: cannot pre-read, failed to open file\n");
290 return 1;
291 }
292 did_open = 1;
293 }
294
295 old_runstate = td_bump_runstate(td, TD_PRE_READING);
296
297 left = f->io_size;
298 bs = td->o.max_bs[DDIR_READ];
299 if (bs > left)
300 bs = left;
301
302 b = malloc(bs);
303 if (!b) {
304 td_verror(td, errno, "malloc");
305 ret = 1;
306 goto error;
307 }
308 memset(b, 0, bs);
309
310 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
311 td_verror(td, errno, "lseek");
312 log_err("fio: failed to lseek pre-read file\n");
313 ret = 1;
314 goto error;
315 }
316
317 while (left && !td->terminate) {
318 if (bs > left)
319 bs = left;
320
321 r = read(f->fd, b, bs);
322
323 if (r == (int) bs) {
324 left -= bs;
325 continue;
326 } else {
327 td_verror(td, EIO, "pre_read");
328 break;
329 }
330 }
331
332error:
333 td_restore_runstate(td, old_runstate);
334
335 if (did_open)
336 td->io_ops->close_file(td, f);
337
338 free(b);
339 return ret;
340}
341
342unsigned long long get_rand_file_size(struct thread_data *td)
343{
344 unsigned long long ret, sized;
345 uint64_t frand_max;
346 unsigned long r;
347
348 frand_max = rand_max(&td->file_size_state);
349 r = __rand(&td->file_size_state);
350 sized = td->o.file_size_high - td->o.file_size_low;
351 ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
352 ret += td->o.file_size_low;
353 ret -= (ret % td->o.rw_min_bs);
354 return ret;
355}
356
357static int file_size(struct thread_data *td, struct fio_file *f)
358{
359 struct stat st;
360
361 if (stat(f->file_name, &st) == -1) {
362 td_verror(td, errno, "fstat");
363 return 1;
364 }
365
366 f->real_file_size = st.st_size;
367 return 0;
368}
369
370static int bdev_size(struct thread_data *td, struct fio_file *f)
371{
372 unsigned long long bytes = 0;
373 int r;
374
375 if (td->io_ops->open_file(td, f)) {
376 log_err("fio: failed opening blockdev %s for size check\n",
377 f->file_name);
378 return 1;
379 }
380
381 r = blockdev_size(f, &bytes);
382 if (r) {
383 td_verror(td, r, "blockdev_size");
384 goto err;
385 }
386
387 if (!bytes) {
388 log_err("%s: zero sized block device?\n", f->file_name);
389 goto err;
390 }
391
392 f->real_file_size = bytes;
393 td->io_ops->close_file(td, f);
394 return 0;
395err:
396 td->io_ops->close_file(td, f);
397 return 1;
398}
399
400static int char_size(struct thread_data *td, struct fio_file *f)
401{
402#ifdef FIO_HAVE_CHARDEV_SIZE
403 unsigned long long bytes = 0;
404 int r;
405
406 if (td->io_ops->open_file(td, f)) {
407 log_err("fio: failed opening chardev %s for size check\n",
408 f->file_name);
409 return 1;
410 }
411
412 r = chardev_size(f, &bytes);
413 if (r) {
414 td_verror(td, r, "chardev_size");
415 goto err;
416 }
417
418 if (!bytes) {
419 log_err("%s: zero sized char device?\n", f->file_name);
420 goto err;
421 }
422
423 f->real_file_size = bytes;
424 td->io_ops->close_file(td, f);
425 return 0;
426err:
427 td->io_ops->close_file(td, f);
428 return 1;
429#else
430 f->real_file_size = -1ULL;
431 return 0;
432#endif
433}
434
435static int get_file_size(struct thread_data *td, struct fio_file *f)
436{
437 int ret = 0;
438
439 if (fio_file_size_known(f))
440 return 0;
441
442 if (f->filetype == FIO_TYPE_FILE)
443 ret = file_size(td, f);
444 else if (f->filetype == FIO_TYPE_BLOCK)
445 ret = bdev_size(td, f);
446 else if (f->filetype == FIO_TYPE_CHAR)
447 ret = char_size(td, f);
448 else
449 f->real_file_size = -1ULL;
450
451 /*
452 * Leave ->real_file_size with 0 since it could be expectation
453 * of initial setup for regular files.
454 */
455 if (ret)
456 return ret;
457
458 /*
459 * If ->real_file_size is -1, a conditional for the message
460 * "offset extends end" is always true, but it makes no sense,
461 * so just return the same value here.
462 */
463 if (f->real_file_size == -1ULL) {
464 log_info("%s: failed to get file size of %s\n", td->o.name,
465 f->file_name);
466 return 1;
467 }
468
469 if (td->o.start_offset && f->file_offset == 0)
470 dprint(FD_FILE, "offset of file %s not initialized yet\n",
471 f->file_name);
472 /*
473 * ->file_offset normally hasn't been initialized yet, so this
474 * is basically always false.
475 */
476 if (f->file_offset > f->real_file_size) {
477 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
478 (unsigned long long) f->file_offset,
479 (unsigned long long) f->real_file_size);
480 return 1;
481 }
482
483 fio_file_set_size_known(f);
484 return 0;
485}
486
487static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
488 unsigned long long off,
489 unsigned long long len)
490{
491 int errval = 0, ret = 0;
492
493#ifdef CONFIG_ESX
494 return 0;
495#endif
496
497 if (len == -1ULL)
498 len = f->io_size;
499 if (off == -1ULL)
500 off = f->file_offset;
501
502 if (len == -1ULL || off == -1ULL)
503 return 0;
504
505 if (td->io_ops->invalidate) {
506 dprint(FD_IO, "invalidate %s cache %s\n", td->io_ops->name,
507 f->file_name);
508 ret = td->io_ops->invalidate(td, f);
509 if (ret < 0)
510 errval = -ret;
511 } else if (f->filetype == FIO_TYPE_FILE) {
512 dprint(FD_IO, "declare unneeded cache %s: %llu/%llu\n",
513 f->file_name, off, len);
514 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
515 if (ret)
516 errval = ret;
517 } else if (f->filetype == FIO_TYPE_BLOCK) {
518 int retry_count = 0;
519
520 dprint(FD_IO, "drop page cache %s\n", f->file_name);
521 ret = blockdev_invalidate_cache(f);
522 while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
523 /*
524 * Linux multipath devices reject ioctl while
525 * the maps are being updated. That window can
526 * last tens of milliseconds; we'll try up to
527 * a quarter of a second.
528 */
529 usleep(10000);
530 ret = blockdev_invalidate_cache(f);
531 }
532 if (ret < 0 && errno == EACCES && geteuid()) {
533 if (!root_warn) {
534 log_err("fio: only root may flush block "
535 "devices. Cache flush bypassed!\n");
536 root_warn = 1;
537 }
538 ret = 0;
539 }
540 if (ret < 0)
541 errval = errno;
542 } else if (f->filetype == FIO_TYPE_CHAR ||
543 f->filetype == FIO_TYPE_PIPE) {
544 dprint(FD_IO, "invalidate not supported %s\n", f->file_name);
545 ret = 0;
546 }
547
548 /*
549 * Cache flushing isn't a fatal condition, and we know it will
550 * happen on some platforms where we don't have the proper
551 * function to flush eg block device caches. So just warn and
552 * continue on our way.
553 */
554 if (errval)
555 log_info("fio: cache invalidation of %s failed: %s\n",
556 f->file_name, strerror(errval));
557
558 return 0;
559
560}
561
562int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
563{
564 if (!fio_file_open(f))
565 return 0;
566
567 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
568}
569
570int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
571{
572 int ret = 0;
573
574 dprint(FD_FILE, "fd close %s\n", f->file_name);
575
576 remove_file_hash(f);
577
578 if (close(f->fd) < 0)
579 ret = errno;
580
581 f->fd = -1;
582
583 if (f->shadow_fd != -1) {
584 close(f->shadow_fd);
585 f->shadow_fd = -1;
586 }
587
588 f->engine_pos = 0;
589 return ret;
590}
591
592int file_lookup_open(struct fio_file *f, int flags)
593{
594 struct fio_file *__f;
595 int from_hash;
596
597 __f = lookup_file_hash(f->file_name);
598 if (__f) {
599 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
600 f->lock = __f->lock;
601 from_hash = 1;
602 } else {
603 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
604 from_hash = 0;
605 }
606
607#ifdef WIN32
608 flags |= _O_BINARY;
609#endif
610
611 f->fd = open(f->file_name, flags, 0600);
612 return from_hash;
613}
614
615static int file_close_shadow_fds(struct thread_data *td)
616{
617 struct fio_file *f;
618 int num_closed = 0;
619 unsigned int i;
620
621 for_each_file(td, f, i) {
622 if (f->shadow_fd == -1)
623 continue;
624
625 close(f->shadow_fd);
626 f->shadow_fd = -1;
627 num_closed++;
628 }
629
630 return num_closed;
631}
632
633int generic_open_file(struct thread_data *td, struct fio_file *f)
634{
635 int is_std = 0;
636 int flags = 0;
637 int from_hash = 0;
638
639 dprint(FD_FILE, "fd open %s\n", f->file_name);
640
641 if (!strcmp(f->file_name, "-")) {
642 if (td_rw(td)) {
643 log_err("fio: can't read/write to stdin/out\n");
644 return 1;
645 }
646 is_std = 1;
647
648 /*
649 * move output logging to stderr, if we are writing to stdout
650 */
651 if (td_write(td))
652 f_out = stderr;
653 }
654
655 if (td_trim(td))
656 goto skip_flags;
657 if (td->o.odirect)
658 flags |= OS_O_DIRECT;
659 if (td->o.oatomic) {
660 if (!FIO_O_ATOMIC) {
661 td_verror(td, EINVAL, "OS does not support atomic IO");
662 return 1;
663 }
664 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
665 }
666 if (td->o.sync_io)
667 flags |= O_SYNC;
668 if (td->o.create_on_open && td->o.allow_create)
669 flags |= O_CREAT;
670skip_flags:
671 if (f->filetype != FIO_TYPE_FILE)
672 flags |= FIO_O_NOATIME;
673
674open_again:
675 if (td_write(td)) {
676 if (!read_only)
677 flags |= O_RDWR;
678
679 if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
680 flags |= O_CREAT;
681
682 if (is_std)
683 f->fd = dup(STDOUT_FILENO);
684 else
685 from_hash = file_lookup_open(f, flags);
686 } else if (td_read(td)) {
687 if (f->filetype == FIO_TYPE_CHAR && !read_only)
688 flags |= O_RDWR;
689 else
690 flags |= O_RDONLY;
691
692 if (is_std)
693 f->fd = dup(STDIN_FILENO);
694 else
695 from_hash = file_lookup_open(f, flags);
696 } else if (td_trim(td)) {
697 assert(!td_rw(td)); /* should have matched above */
698 flags |= O_RDWR;
699 from_hash = file_lookup_open(f, flags);
700 }
701
702 if (f->fd == -1) {
703 char buf[FIO_VERROR_SIZE];
704 int __e = errno;
705
706 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
707 flags &= ~FIO_O_NOATIME;
708 goto open_again;
709 }
710 if (__e == EMFILE && file_close_shadow_fds(td))
711 goto open_again;
712
713 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
714
715 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
716 log_err("fio: looks like your file system does not " \
717 "support direct=1/buffered=0\n");
718 }
719
720 td_verror(td, __e, buf);
721 return 1;
722 }
723
724 if (!from_hash && f->fd != -1) {
725 if (add_file_hash(f)) {
726 int fio_unused ret;
727
728 /*
729 * Stash away descriptor for later close. This is to
730 * work-around a "feature" on Linux, where a close of
731 * an fd that has been opened for write will trigger
732 * udev to call blkid to check partitions, fs id, etc.
733 * That pollutes the device cache, which can slow down
734 * unbuffered accesses.
735 */
736 if (f->shadow_fd == -1)
737 f->shadow_fd = f->fd;
738 else {
739 /*
740 * OK to ignore, we haven't done anything
741 * with it
742 */
743 ret = generic_close_file(td, f);
744 }
745 goto open_again;
746 }
747 }
748
749 return 0;
750}
751
752/*
753 * This function i.e. get_file_size() is the default .get_file_size
754 * implementation of majority of I/O engines.
755 */
756int generic_get_file_size(struct thread_data *td, struct fio_file *f)
757{
758 return get_file_size(td, f);
759}
760
761/*
762 * open/close all files, so that ->real_file_size gets set
763 */
764static int get_file_sizes(struct thread_data *td)
765{
766 struct fio_file *f;
767 unsigned int i;
768 int err = 0;
769
770 for_each_file(td, f, i) {
771 dprint(FD_FILE, "get file size for %p/%d/%s\n", f, i,
772 f->file_name);
773
774 if (td_io_get_file_size(td, f)) {
775 if (td->error != ENOENT) {
776 log_err("%s\n", td->verror);
777 err = 1;
778 break;
779 }
780 clear_error(td);
781 }
782
783 /*
784 * There are corner cases where we end up with -1 for
785 * ->real_file_size due to unsupported file type, etc.
786 * We then just set to size option value divided by number
787 * of files, similar to the way file ->io_size is set.
788 * stat(2) failure doesn't set ->real_file_size to -1.
789 */
790 if (f->real_file_size == -1ULL && td->o.size)
791 f->real_file_size = td->o.size / td->o.nr_files;
792 }
793
794 return err;
795}
796
797struct fio_mount {
798 struct flist_head list;
799 const char *base;
800 char __base[256];
801 unsigned int key;
802};
803
804/*
805 * Get free number of bytes for each file on each unique mount.
806 */
807static unsigned long long get_fs_free_counts(struct thread_data *td)
808{
809 struct flist_head *n, *tmp;
810 unsigned long long ret = 0;
811 struct fio_mount *fm;
812 FLIST_HEAD(list);
813 struct fio_file *f;
814 unsigned int i;
815
816 for_each_file(td, f, i) {
817 struct stat sb;
818 char buf[256];
819
820 if (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_CHAR) {
821 if (f->real_file_size != -1ULL)
822 ret += f->real_file_size;
823 continue;
824 } else if (f->filetype != FIO_TYPE_FILE)
825 continue;
826
827 buf[255] = '\0';
828 strncpy(buf, f->file_name, 255);
829
830 if (stat(buf, &sb) < 0) {
831 if (errno != ENOENT)
832 break;
833 strcpy(buf, ".");
834 if (stat(buf, &sb) < 0)
835 break;
836 }
837
838 fm = NULL;
839 flist_for_each(n, &list) {
840 fm = flist_entry(n, struct fio_mount, list);
841 if (fm->key == sb.st_dev)
842 break;
843
844 fm = NULL;
845 }
846
847 if (fm)
848 continue;
849
850 fm = calloc(1, sizeof(*fm));
851 strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
852 fm->base = basename(fm->__base);
853 fm->key = sb.st_dev;
854 flist_add(&fm->list, &list);
855 }
856
857 flist_for_each_safe(n, tmp, &list) {
858 unsigned long long sz;
859
860 fm = flist_entry(n, struct fio_mount, list);
861 flist_del(&fm->list);
862
863 sz = get_fs_free_size(fm->base);
864 if (sz && sz != -1ULL)
865 ret += sz;
866
867 free(fm);
868 }
869
870 return ret;
871}
872
873uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
874{
875 struct thread_options *o = &td->o;
876 unsigned long long align_bs;
877 unsigned long long offset;
878
879 if (o->file_append && f->filetype == FIO_TYPE_FILE)
880 return f->real_file_size;
881
882 if (o->start_offset_percent > 0) {
883 /*
884 * if blockalign is provided, find the min across read, write,
885 * and trim
886 */
887 if (fio_option_is_set(o, ba)) {
888 align_bs = (unsigned long long) min(o->ba[DDIR_READ], o->ba[DDIR_WRITE]);
889 align_bs = min((unsigned long long) o->ba[DDIR_TRIM], align_bs);
890 } else {
891 /* else take the minimum block size */
892 align_bs = td_min_bs(td);
893 }
894
895 /* calculate the raw offset */
896 offset = (f->real_file_size * o->start_offset_percent / 100) +
897 (td->subjob_number * o->offset_increment);
898
899 /*
900 * block align the offset at the next available boundary at
901 * ceiling(offset / align_bs) * align_bs
902 */
903 offset = (offset / align_bs + (offset % align_bs != 0)) * align_bs;
904
905 } else {
906 /* start_offset_percent not set */
907 offset = o->start_offset +
908 td->subjob_number * o->offset_increment;
909 }
910
911 return offset;
912}
913
914/*
915 * Open the files and setup files sizes, creating files if necessary.
916 */
917int setup_files(struct thread_data *td)
918{
919 unsigned long long total_size, extend_size;
920 struct thread_options *o = &td->o;
921 struct fio_file *f;
922 unsigned int i, nr_fs_extra = 0;
923 int err = 0, need_extend;
924 int old_state;
925 const unsigned int bs = td_min_bs(td);
926 uint64_t fs = 0;
927
928 dprint(FD_FILE, "setup files\n");
929
930 old_state = td_bump_runstate(td, TD_SETTING_UP);
931
932 if (o->read_iolog_file)
933 goto done;
934
935 /*
936 * Find out physical size of files or devices for this thread,
937 * before we determine I/O size and range of our targets.
938 * If ioengine defines a setup() method, it's responsible for
939 * opening the files and setting f->real_file_size to indicate
940 * the valid range for that file.
941 */
942 if (td->io_ops->setup)
943 err = td->io_ops->setup(td);
944 else
945 err = get_file_sizes(td);
946
947 if (err)
948 goto err_out;
949
950 /*
951 * check sizes. if the files/devices do not exist and the size
952 * isn't passed to fio, abort.
953 */
954 total_size = 0;
955 for_each_file(td, f, i) {
956 f->fileno = i;
957 if (f->real_file_size == -1ULL)
958 total_size = -1ULL;
959 else
960 total_size += f->real_file_size;
961 }
962
963 if (o->fill_device)
964 td->fill_device_size = get_fs_free_counts(td);
965
966 /*
967 * device/file sizes are zero and no size given, punt
968 */
969 if ((!total_size || total_size == -1ULL) && !o->size &&
970 !td_ioengine_flagged(td, FIO_NOIO) && !o->fill_device &&
971 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
972 log_err("%s: you need to specify size=\n", o->name);
973 td_verror(td, EINVAL, "total_file_size");
974 goto err_out;
975 }
976
977 /*
978 * Calculate per-file size and potential extra size for the
979 * first files, if needed (i.e. if we don't have a fixed size).
980 */
981 if (!o->file_size_low && o->nr_files) {
982 uint64_t all_fs;
983
984 fs = o->size / o->nr_files;
985 all_fs = fs * o->nr_files;
986
987 if (all_fs < o->size)
988 nr_fs_extra = (o->size - all_fs) / bs;
989 }
990
991 /*
992 * now file sizes are known, so we can set ->io_size. if size= is
993 * not given, ->io_size is just equal to ->real_file_size. if size
994 * is given, ->io_size is size / nr_files.
995 */
996 extend_size = total_size = 0;
997 need_extend = 0;
998 for_each_file(td, f, i) {
999 f->file_offset = get_start_offset(td, f);
1000
1001 /*
1002 * Update ->io_size depending on options specified.
1003 * ->file_size_low being 0 means filesize option isn't set.
1004 * Non zero ->file_size_low equals ->file_size_high means
1005 * filesize option is set in a fixed size format.
1006 * Non zero ->file_size_low not equals ->file_size_high means
1007 * filesize option is set in a range format.
1008 */
1009 if (!o->file_size_low) {
1010 /*
1011 * no file size or range given, file size is equal to
1012 * total size divided by number of files. If the size
1013 * doesn't divide nicely with the min blocksize,
1014 * make the first files bigger.
1015 */
1016 f->io_size = fs;
1017 if (nr_fs_extra) {
1018 nr_fs_extra--;
1019 f->io_size += bs;
1020 }
1021
1022 /*
1023 * We normally don't come here for regular files, but
1024 * if the result is 0 for a regular file, set it to the
1025 * real file size. This could be size of the existing
1026 * one if it already exists, but otherwise will be set
1027 * to 0. A new file won't be created because
1028 * ->io_size + ->file_offset equals ->real_file_size.
1029 */
1030 if (!f->io_size) {
1031 if (f->file_offset > f->real_file_size)
1032 goto err_offset;
1033 f->io_size = f->real_file_size - f->file_offset;
1034 if (!f->io_size)
1035 log_info("fio: file %s may be ignored\n",
1036 f->file_name);
1037 }
1038 } else if (f->real_file_size < o->file_size_low ||
1039 f->real_file_size > o->file_size_high) {
1040 if (f->file_offset > o->file_size_low)
1041 goto err_offset;
1042 /*
1043 * file size given. if it's fixed, use that. if it's a
1044 * range, generate a random size in-between.
1045 */
1046 if (o->file_size_low == o->file_size_high)
1047 f->io_size = o->file_size_low - f->file_offset;
1048 else {
1049 f->io_size = get_rand_file_size(td)
1050 - f->file_offset;
1051 }
1052 } else
1053 f->io_size = f->real_file_size - f->file_offset;
1054
1055 if (f->io_size == -1ULL)
1056 total_size = -1ULL;
1057 else {
1058 if (o->size_percent) {
1059 uint64_t file_size;
1060
1061 file_size = f->io_size + f->file_offset;
1062 f->io_size = (file_size *
1063 o->size_percent) / 100;
1064 if (f->io_size > (file_size - f->file_offset))
1065 f->io_size = file_size - f->file_offset;
1066
1067 f->io_size -= (f->io_size % td_min_bs(td));
1068 }
1069 total_size += f->io_size;
1070 }
1071
1072 if (f->filetype == FIO_TYPE_FILE &&
1073 (f->io_size + f->file_offset) > f->real_file_size &&
1074 !td_ioengine_flagged(td, FIO_DISKLESSIO)) {
1075 if (!o->create_on_open) {
1076 need_extend++;
1077 extend_size += (f->io_size + f->file_offset);
1078 fio_file_set_extend(f);
1079 } else
1080 f->real_file_size = f->io_size + f->file_offset;
1081 }
1082 }
1083
1084 if (td->o.block_error_hist) {
1085 int len;
1086
1087 assert(td->o.nr_files == 1); /* checked in fixup_options */
1088 f = td->files[0];
1089 len = f->io_size / td->o.bs[DDIR_TRIM];
1090 if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
1091 log_err("fio: cannot calculate block histogram with "
1092 "%d trim blocks, maximum %d\n",
1093 len, MAX_NR_BLOCK_INFOS);
1094 td_verror(td, EINVAL, "block_error_hist");
1095 goto err_out;
1096 }
1097
1098 td->ts.nr_block_infos = len;
1099 for (i = 0; i < len; i++)
1100 td->ts.block_infos[i] =
1101 BLOCK_INFO(0, BLOCK_STATE_UNINIT);
1102 } else
1103 td->ts.nr_block_infos = 0;
1104
1105 if (!o->size || (total_size && o->size > total_size))
1106 o->size = total_size;
1107
1108 if (o->size < td_min_bs(td)) {
1109 log_err("fio: blocksize too large for data set\n");
1110 goto err_out;
1111 }
1112
1113 /*
1114 * See if we need to extend some files, typically needed when our
1115 * target regular files don't exist yet, but our jobs require them
1116 * initially due to read I/Os.
1117 */
1118 if (need_extend) {
1119 temp_stall_ts = 1;
1120 if (output_format & FIO_OUTPUT_NORMAL) {
1121 log_info("%s: Laying out IO file%s (%u file%s / %s%lluMiB)\n",
1122 o->name,
1123 need_extend > 1 ? "s" : "",
1124 need_extend,
1125 need_extend > 1 ? "s" : "",
1126 need_extend > 1 ? "total " : "",
1127 extend_size >> 20);
1128 }
1129
1130 for_each_file(td, f, i) {
1131 unsigned long long old_len = -1ULL, extend_len = -1ULL;
1132
1133 if (!fio_file_extend(f))
1134 continue;
1135
1136 assert(f->filetype == FIO_TYPE_FILE);
1137 fio_file_clear_extend(f);
1138 if (!o->fill_device) {
1139 old_len = f->real_file_size;
1140 extend_len = f->io_size + f->file_offset -
1141 old_len;
1142 }
1143 f->real_file_size = (f->io_size + f->file_offset);
1144 err = extend_file(td, f);
1145 if (err)
1146 break;
1147
1148 err = __file_invalidate_cache(td, f, old_len,
1149 extend_len);
1150
1151 /*
1152 * Shut up static checker
1153 */
1154 if (f->fd != -1)
1155 close(f->fd);
1156
1157 f->fd = -1;
1158 if (err)
1159 break;
1160 }
1161 temp_stall_ts = 0;
1162 }
1163
1164 if (err)
1165 goto err_out;
1166
1167 if (!o->zone_size)
1168 o->zone_size = o->size;
1169
1170 /*
1171 * iolog already set the total io size, if we read back
1172 * stored entries.
1173 */
1174 if (!o->read_iolog_file) {
1175 if (o->io_size)
1176 td->total_io_size = o->io_size * o->loops;
1177 else
1178 td->total_io_size = o->size * o->loops;
1179 }
1180
1181done:
1182 if (o->create_only)
1183 td->done = 1;
1184
1185 td_restore_runstate(td, old_state);
1186 return 0;
1187err_offset:
1188 log_err("%s: you need to specify valid offset=\n", o->name);
1189err_out:
1190 td_restore_runstate(td, old_state);
1191 return 1;
1192}
1193
1194int pre_read_files(struct thread_data *td)
1195{
1196 struct fio_file *f;
1197 unsigned int i;
1198
1199 dprint(FD_FILE, "pre_read files\n");
1200
1201 for_each_file(td, f, i) {
1202 if (pre_read_file(td, f))
1203 return -1;
1204 }
1205
1206 return 0;
1207}
1208
1209static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1210{
1211 unsigned int range_size, seed;
1212 unsigned long nranges;
1213 uint64_t fsize;
1214
1215 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1216 fsize = min(f->real_file_size, f->io_size);
1217
1218 nranges = (fsize + range_size - 1) / range_size;
1219
1220 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1221 if (!td->o.rand_repeatable)
1222 seed = td->rand_seeds[4];
1223
1224 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1225 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1226 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1227 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1228 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1229 gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1230
1231 return 1;
1232}
1233
1234static int init_rand_distribution(struct thread_data *td)
1235{
1236 struct fio_file *f;
1237 unsigned int i;
1238 int state;
1239
1240 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1241 return 0;
1242
1243 state = td_bump_runstate(td, TD_SETTING_UP);
1244
1245 for_each_file(td, f, i)
1246 __init_rand_distribution(td, f);
1247
1248 td_restore_runstate(td, state);
1249
1250 return 1;
1251}
1252
1253/*
1254 * Check if the number of blocks exceeds the randomness capability of
1255 * the selected generator. Tausworthe is 32-bit, the others are fullly
1256 * 64-bit capable.
1257 */
1258static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1259 uint64_t blocks)
1260{
1261 if (blocks <= FRAND32_MAX)
1262 return 0;
1263 if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1264 return 0;
1265
1266 /*
1267 * If the user hasn't specified a random generator, switch
1268 * to tausworthe64 with informational warning. If the user did
1269 * specify one, just warn.
1270 */
1271 log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1272 f->file_name);
1273
1274 if (!fio_option_is_set(&td->o, random_generator)) {
1275 log_info("fio: Switching to tausworthe64. Use the "
1276 "random_generator= option to get rid of this "
1277 "warning.\n");
1278 td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1279 return 0;
1280 }
1281
1282 /*
1283 * Just make this information to avoid breaking scripts.
1284 */
1285 log_info("fio: Use the random_generator= option to switch to lfsr or "
1286 "tausworthe64.\n");
1287 return 0;
1288}
1289
1290int init_random_map(struct thread_data *td)
1291{
1292 unsigned long long blocks;
1293 struct fio_file *f;
1294 unsigned int i;
1295
1296 if (init_rand_distribution(td))
1297 return 0;
1298 if (!td_random(td))
1299 return 0;
1300
1301 for_each_file(td, f, i) {
1302 uint64_t fsize = min(f->real_file_size, f->io_size);
1303
1304 blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1305
1306 if (check_rand_gen_limits(td, f, blocks))
1307 return 1;
1308
1309 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1310 unsigned long seed;
1311
1312 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1313
1314 if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1315 fio_file_set_lfsr(f);
1316 continue;
1317 }
1318 } else if (!td->o.norandommap) {
1319 f->io_axmap = axmap_new(blocks);
1320 if (f->io_axmap) {
1321 fio_file_set_axmap(f);
1322 continue;
1323 }
1324 } else if (td->o.norandommap)
1325 continue;
1326
1327 if (!td->o.softrandommap) {
1328 log_err("fio: failed allocating random map. If running"
1329 " a large number of jobs, try the 'norandommap'"
1330 " option or set 'softrandommap'. Or give"
1331 " a larger --alloc-size to fio.\n");
1332 return 1;
1333 }
1334
1335 log_info("fio: file %s failed allocating random map. Running "
1336 "job without.\n", f->file_name);
1337 }
1338
1339 return 0;
1340}
1341
1342void close_files(struct thread_data *td)
1343{
1344 struct fio_file *f;
1345 unsigned int i;
1346
1347 for_each_file(td, f, i) {
1348 if (fio_file_open(f))
1349 td_io_close_file(td, f);
1350 }
1351}
1352
1353void close_and_free_files(struct thread_data *td)
1354{
1355 struct fio_file *f;
1356 unsigned int i;
1357
1358 dprint(FD_FILE, "close files\n");
1359
1360 for_each_file(td, f, i) {
1361 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1362 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1363 td_io_unlink_file(td, f);
1364 }
1365
1366 if (fio_file_open(f))
1367 td_io_close_file(td, f);
1368
1369 remove_file_hash(f);
1370
1371 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1372 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1373 td_io_unlink_file(td, f);
1374 }
1375
1376 sfree(f->file_name);
1377 f->file_name = NULL;
1378 if (fio_file_axmap(f)) {
1379 axmap_free(f->io_axmap);
1380 f->io_axmap = NULL;
1381 }
1382 sfree(f);
1383 }
1384
1385 td->o.filename = NULL;
1386 free(td->files);
1387 free(td->file_locks);
1388 td->files_index = 0;
1389 td->files = NULL;
1390 td->file_locks = NULL;
1391 td->o.file_lock_mode = FILE_LOCK_NONE;
1392 td->o.nr_files = 0;
1393}
1394
1395static void get_file_type(struct fio_file *f)
1396{
1397 struct stat sb;
1398
1399 if (!strcmp(f->file_name, "-"))
1400 f->filetype = FIO_TYPE_PIPE;
1401 else
1402 f->filetype = FIO_TYPE_FILE;
1403
1404#ifdef WIN32
1405 /* \\.\ is the device namespace in Windows, where every file is
1406 * a block device */
1407 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1408 f->filetype = FIO_TYPE_BLOCK;
1409#endif
1410
1411 if (!stat(f->file_name, &sb)) {
1412 if (S_ISBLK(sb.st_mode))
1413 f->filetype = FIO_TYPE_BLOCK;
1414 else if (S_ISCHR(sb.st_mode))
1415 f->filetype = FIO_TYPE_CHAR;
1416 else if (S_ISFIFO(sb.st_mode))
1417 f->filetype = FIO_TYPE_PIPE;
1418 }
1419}
1420
1421static bool __is_already_allocated(const char *fname, bool set)
1422{
1423 struct flist_head *entry;
1424 bool ret;
1425
1426 ret = file_bloom_exists(fname, set);
1427 if (!ret)
1428 return ret;
1429
1430 flist_for_each(entry, &filename_list) {
1431 struct file_name *fn;
1432
1433 fn = flist_entry(entry, struct file_name, list);
1434
1435 if (!strcmp(fn->filename, fname))
1436 return true;
1437 }
1438
1439 return false;
1440}
1441
1442static bool is_already_allocated(const char *fname)
1443{
1444 bool ret;
1445
1446 fio_file_hash_lock();
1447 ret = __is_already_allocated(fname, false);
1448 fio_file_hash_unlock();
1449
1450 return ret;
1451}
1452
1453static void set_already_allocated(const char *fname)
1454{
1455 struct file_name *fn;
1456
1457 fn = malloc(sizeof(struct file_name));
1458 fn->filename = strdup(fname);
1459
1460 fio_file_hash_lock();
1461 if (!__is_already_allocated(fname, true)) {
1462 flist_add_tail(&fn->list, &filename_list);
1463 fn = NULL;
1464 }
1465 fio_file_hash_unlock();
1466
1467 if (fn) {
1468 free(fn->filename);
1469 free(fn);
1470 }
1471}
1472
1473static void free_already_allocated(void)
1474{
1475 struct flist_head *entry, *tmp;
1476 struct file_name *fn;
1477
1478 if (flist_empty(&filename_list))
1479 return;
1480
1481 fio_file_hash_lock();
1482 flist_for_each_safe(entry, tmp, &filename_list) {
1483 fn = flist_entry(entry, struct file_name, list);
1484 free(fn->filename);
1485 flist_del(&fn->list);
1486 free(fn);
1487 }
1488
1489 fio_file_hash_unlock();
1490}
1491
1492static struct fio_file *alloc_new_file(struct thread_data *td)
1493{
1494 struct fio_file *f;
1495
1496 f = smalloc(sizeof(*f));
1497 if (!f) {
1498 assert(0);
1499 return NULL;
1500 }
1501
1502 f->fd = -1;
1503 f->shadow_fd = -1;
1504 fio_file_reset(td, f);
1505 return f;
1506}
1507
1508bool exists_and_not_regfile(const char *filename)
1509{
1510 struct stat sb;
1511
1512 if (lstat(filename, &sb) == -1)
1513 return false;
1514
1515#ifndef WIN32 /* NOT Windows */
1516 if (S_ISREG(sb.st_mode))
1517 return false;
1518#else
1519 /* \\.\ is the device namespace in Windows, where every file
1520 * is a device node */
1521 if (S_ISREG(sb.st_mode) && strncmp(filename, "\\\\.\\", 4) != 0)
1522 return false;
1523#endif
1524
1525 return true;
1526}
1527
1528int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1529{
1530 int cur_files = td->files_index;
1531 char file_name[PATH_MAX];
1532 struct fio_file *f;
1533 int len = 0;
1534
1535 dprint(FD_FILE, "add file %s\n", fname);
1536
1537 if (td->o.directory)
1538 len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob,
1539 td->o.unique_filename);
1540
1541 sprintf(file_name + len, "%s", fname);
1542
1543 /* clean cloned siblings using existing files */
1544 if (numjob && is_already_allocated(file_name) &&
1545 !exists_and_not_regfile(fname))
1546 return 0;
1547
1548 f = alloc_new_file(td);
1549
1550 if (td->files_size <= td->files_index) {
1551 unsigned int new_size = td->o.nr_files + 1;
1552
1553 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1554
1555 td->files = realloc(td->files, new_size * sizeof(f));
1556 if (td->files == NULL) {
1557 log_err("fio: realloc OOM\n");
1558 assert(0);
1559 }
1560 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1561 td->file_locks = realloc(td->file_locks, new_size);
1562 if (!td->file_locks) {
1563 log_err("fio: realloc OOM\n");
1564 assert(0);
1565 }
1566 td->file_locks[cur_files] = FILE_LOCK_NONE;
1567 }
1568 td->files_size = new_size;
1569 }
1570 td->files[cur_files] = f;
1571 f->fileno = cur_files;
1572
1573 /*
1574 * init function, io engine may not be loaded yet
1575 */
1576 if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
1577 f->real_file_size = -1ULL;
1578
1579 f->file_name = smalloc_strdup(file_name);
1580 if (!f->file_name)
1581 assert(0);
1582
1583 get_file_type(f);
1584
1585 switch (td->o.file_lock_mode) {
1586 case FILE_LOCK_NONE:
1587 break;
1588 case FILE_LOCK_READWRITE:
1589 f->rwlock = fio_rwlock_init();
1590 break;
1591 case FILE_LOCK_EXCLUSIVE:
1592 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1593 break;
1594 default:
1595 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1596 assert(0);
1597 }
1598
1599 td->files_index++;
1600 if (f->filetype == FIO_TYPE_FILE)
1601 td->nr_normal_files++;
1602
1603 set_already_allocated(file_name);
1604
1605 if (inc)
1606 td->o.nr_files++;
1607
1608 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1609 cur_files);
1610
1611 return cur_files;
1612}
1613
1614int add_file_exclusive(struct thread_data *td, const char *fname)
1615{
1616 struct fio_file *f;
1617 unsigned int i;
1618
1619 for_each_file(td, f, i) {
1620 if (!strcmp(f->file_name, fname))
1621 return i;
1622 }
1623
1624 return add_file(td, fname, 0, 1);
1625}
1626
1627void get_file(struct fio_file *f)
1628{
1629 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1630 assert(fio_file_open(f));
1631 f->references++;
1632}
1633
1634int put_file(struct thread_data *td, struct fio_file *f)
1635{
1636 int f_ret = 0, ret = 0;
1637
1638 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1639
1640 if (!fio_file_open(f)) {
1641 assert(f->fd == -1);
1642 return 0;
1643 }
1644
1645 assert(f->references);
1646 if (--f->references)
1647 return 0;
1648
1649 if (should_fsync(td) && td->o.fsync_on_close) {
1650 f_ret = fsync(f->fd);
1651 if (f_ret < 0)
1652 f_ret = errno;
1653 }
1654
1655 if (td->io_ops->close_file)
1656 ret = td->io_ops->close_file(td, f);
1657
1658 if (!ret)
1659 ret = f_ret;
1660
1661 td->nr_open_files--;
1662 fio_file_clear_open(f);
1663 assert(f->fd == -1);
1664 return ret;
1665}
1666
1667void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1668{
1669 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1670 return;
1671
1672 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1673 if (ddir == DDIR_READ)
1674 fio_rwlock_read(f->rwlock);
1675 else
1676 fio_rwlock_write(f->rwlock);
1677 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1678 fio_mutex_down(f->lock);
1679
1680 td->file_locks[f->fileno] = td->o.file_lock_mode;
1681}
1682
1683void unlock_file(struct thread_data *td, struct fio_file *f)
1684{
1685 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1686 return;
1687
1688 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1689 fio_rwlock_unlock(f->rwlock);
1690 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1691 fio_mutex_up(f->lock);
1692
1693 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1694}
1695
1696void unlock_file_all(struct thread_data *td, struct fio_file *f)
1697{
1698 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1699 return;
1700 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1701 unlock_file(td, f);
1702}
1703
1704static int recurse_dir(struct thread_data *td, const char *dirname)
1705{
1706 struct dirent *dir;
1707 int ret = 0;
1708 DIR *D;
1709
1710 D = opendir(dirname);
1711 if (!D) {
1712 char buf[FIO_VERROR_SIZE];
1713
1714 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1715 td_verror(td, errno, buf);
1716 return 1;
1717 }
1718
1719 while ((dir = readdir(D)) != NULL) {
1720 char full_path[PATH_MAX];
1721 struct stat sb;
1722
1723 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1724 continue;
1725
1726 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1727
1728 if (lstat(full_path, &sb) == -1) {
1729 if (errno != ENOENT) {
1730 td_verror(td, errno, "stat");
1731 ret = 1;
1732 break;
1733 }
1734 }
1735
1736 if (S_ISREG(sb.st_mode)) {
1737 add_file(td, full_path, 0, 1);
1738 continue;
1739 }
1740 if (!S_ISDIR(sb.st_mode))
1741 continue;
1742
1743 ret = recurse_dir(td, full_path);
1744 if (ret)
1745 break;
1746 }
1747
1748 closedir(D);
1749 return ret;
1750}
1751
1752int add_dir_files(struct thread_data *td, const char *path)
1753{
1754 int ret = recurse_dir(td, path);
1755
1756 if (!ret)
1757 log_info("fio: opendir added %d files\n", td->o.nr_files);
1758
1759 return ret;
1760}
1761
1762void dup_files(struct thread_data *td, struct thread_data *org)
1763{
1764 struct fio_file *f;
1765 unsigned int i;
1766
1767 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1768
1769 if (!org->files)
1770 return;
1771
1772 td->files = malloc(org->files_index * sizeof(f));
1773
1774 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1775 td->file_locks = malloc(org->files_index);
1776
1777 for_each_file(org, f, i) {
1778 struct fio_file *__f;
1779
1780 __f = alloc_new_file(td);
1781
1782 if (f->file_name) {
1783 __f->file_name = smalloc_strdup(f->file_name);
1784 if (!__f->file_name)
1785 assert(0);
1786
1787 __f->filetype = f->filetype;
1788 }
1789
1790 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1791 __f->lock = f->lock;
1792 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1793 __f->rwlock = f->rwlock;
1794
1795 td->files[i] = __f;
1796 }
1797}
1798
1799/*
1800 * Returns the index that matches the filename, or -1 if not there
1801 */
1802int get_fileno(struct thread_data *td, const char *fname)
1803{
1804 struct fio_file *f;
1805 unsigned int i;
1806
1807 for_each_file(td, f, i)
1808 if (!strcmp(f->file_name, fname))
1809 return i;
1810
1811 return -1;
1812}
1813
1814/*
1815 * For log usage, where we add/open/close files automatically
1816 */
1817void free_release_files(struct thread_data *td)
1818{
1819 close_files(td);
1820 td->o.nr_files = 0;
1821 td->o.open_files = 0;
1822 td->files_index = 0;
1823 td->nr_normal_files = 0;
1824}
1825
1826void fio_file_reset(struct thread_data *td, struct fio_file *f)
1827{
1828 int i;
1829
1830 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1831 f->last_pos[i] = f->file_offset;
1832 f->last_start[i] = -1ULL;
1833 }
1834
1835 if (fio_file_axmap(f))
1836 axmap_reset(f->io_axmap);
1837 else if (fio_file_lfsr(f))
1838 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1839}
1840
1841bool fio_files_done(struct thread_data *td)
1842{
1843 struct fio_file *f;
1844 unsigned int i;
1845
1846 for_each_file(td, f, i)
1847 if (!fio_file_done(f))
1848 return false;
1849
1850 return true;
1851}
1852
1853/* free memory used in initialization phase only */
1854void filesetup_mem_free(void)
1855{
1856 free_already_allocated();
1857}
1858
1859/*
1860 * This function is for platforms which support direct I/O but not O_DIRECT.
1861 */
1862int fio_set_directio(struct thread_data *td, struct fio_file *f)
1863{
1864#ifdef FIO_OS_DIRECTIO
1865 int ret = fio_set_odirect(f);
1866
1867 if (ret) {
1868 td_verror(td, ret, "fio_set_directio");
1869#if defined(__sun__)
1870 if (ret == ENOTTY) { /* ENOTTY suggests RAW device or ZFS */
1871 log_err("fio: doing directIO to RAW devices or ZFS not supported\n");
1872 } else {
1873 log_err("fio: the file system does not seem to support direct IO\n");
1874 }
1875#else
1876 log_err("fio: the file system does not seem to support direct IO\n");
1877#endif
1878 return -1;
1879 }
1880
1881 return 0;
1882#else
1883 return -1;
1884#endif
1885}