lib/getopt_long: clear 'optarg' every time
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "os/os.h"
15#include "hash.h"
16#include "lib/axmap.h"
17
18#ifdef CONFIG_LINUX_FALLOCATE
19#include <linux/falloc.h>
20#endif
21
22static int root_warn;
23
24static inline void clear_error(struct thread_data *td)
25{
26 td->error = 0;
27 td->verror[0] = '\0';
28}
29
30/*
31 * Leaves f->fd open on success, caller must close
32 */
33static int extend_file(struct thread_data *td, struct fio_file *f)
34{
35 int r, new_layout = 0, unlink_file = 0, flags;
36 unsigned long long left;
37 unsigned int bs;
38 char *b;
39
40 if (read_only) {
41 log_err("fio: refusing extend of file due to read-only\n");
42 return 0;
43 }
44
45 /*
46 * check if we need to lay the file out complete again. fio
47 * does that for operations involving reads, or for writes
48 * where overwrite is set
49 */
50 if (td_read(td) || (td_write(td) && td->o.overwrite) ||
51 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
52 new_layout = 1;
53 if (td_write(td) && !td->o.overwrite)
54 unlink_file = 1;
55
56 if (unlink_file || new_layout) {
57 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
58 if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
59 td_verror(td, errno, "unlink");
60 return 1;
61 }
62 }
63
64 flags = O_WRONLY | O_CREAT;
65 if (new_layout)
66 flags |= O_TRUNC;
67
68 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
69 f->fd = open(f->file_name, flags, 0644);
70 if (f->fd < 0) {
71 td_verror(td, errno, "open");
72 return 1;
73 }
74
75#ifdef CONFIG_POSIX_FALLOCATE
76 if (!td->o.fill_device) {
77 switch (td->o.fallocate_mode) {
78 case FIO_FALLOCATE_NONE:
79 break;
80 case FIO_FALLOCATE_POSIX:
81 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
82 f->file_name,
83 (unsigned long long) f->real_file_size);
84
85 r = posix_fallocate(f->fd, 0, f->real_file_size);
86 if (r > 0) {
87 log_err("fio: posix_fallocate fails: %s\n",
88 strerror(r));
89 }
90 break;
91#ifdef CONFIG_LINUX_FALLOCATE
92 case FIO_FALLOCATE_KEEP_SIZE:
93 dprint(FD_FILE,
94 "fallocate(FALLOC_FL_KEEP_SIZE) "
95 "file %s size %llu\n", f->file_name,
96 (unsigned long long) f->real_file_size);
97
98 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
99 f->real_file_size);
100 if (r != 0) {
101 td_verror(td, errno, "fallocate");
102 }
103 break;
104#endif /* CONFIG_LINUX_FALLOCATE */
105 default:
106 log_err("fio: unknown fallocate mode: %d\n",
107 td->o.fallocate_mode);
108 assert(0);
109 }
110 }
111#endif /* CONFIG_POSIX_FALLOCATE */
112
113 if (!new_layout)
114 goto done;
115
116 /*
117 * The size will be -1ULL when fill_device is used, so don't truncate
118 * or fallocate this file, just write it
119 */
120 if (!td->o.fill_device) {
121 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
122 (unsigned long long) f->real_file_size);
123 if (ftruncate(f->fd, f->real_file_size) == -1) {
124 td_verror(td, errno, "ftruncate");
125 goto err;
126 }
127 }
128
129 b = malloc(td->o.max_bs[DDIR_WRITE]);
130 memset(b, 0, td->o.max_bs[DDIR_WRITE]);
131
132 left = f->real_file_size;
133 while (left && !td->terminate) {
134 bs = td->o.max_bs[DDIR_WRITE];
135 if (bs > left)
136 bs = left;
137
138 r = write(f->fd, b, bs);
139
140 if (r > 0) {
141 left -= r;
142 continue;
143 } else {
144 if (r < 0) {
145 int __e = errno;
146
147 if (__e == ENOSPC) {
148 if (td->o.fill_device)
149 break;
150 log_info("fio: ENOSPC on laying out "
151 "file, stopping\n");
152 break;
153 }
154 td_verror(td, errno, "write");
155 } else
156 td_verror(td, EIO, "write");
157
158 break;
159 }
160 }
161
162 if (td->terminate) {
163 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
164 unlink(f->file_name);
165 } else if (td->o.create_fsync) {
166 if (fsync(f->fd) < 0) {
167 td_verror(td, errno, "fsync");
168 goto err;
169 }
170 }
171 if (td->o.fill_device && !td_write(td)) {
172 fio_file_clear_size_known(f);
173 if (td_io_get_file_size(td, f))
174 goto err;
175 if (f->io_size > f->real_file_size)
176 f->io_size = f->real_file_size;
177 }
178
179 free(b);
180done:
181 return 0;
182err:
183 close(f->fd);
184 f->fd = -1;
185 return 1;
186}
187
188static int pre_read_file(struct thread_data *td, struct fio_file *f)
189{
190 int r, did_open = 0, old_runstate;
191 unsigned long long left;
192 unsigned int bs;
193 char *b;
194
195 if (td->io_ops->flags & FIO_PIPEIO)
196 return 0;
197
198 if (!fio_file_open(f)) {
199 if (td->io_ops->open_file(td, f)) {
200 log_err("fio: cannot pre-read, failed to open file\n");
201 return 1;
202 }
203 did_open = 1;
204 }
205
206 old_runstate = td->runstate;
207 td_set_runstate(td, TD_PRE_READING);
208
209 bs = td->o.max_bs[DDIR_READ];
210 b = malloc(bs);
211 memset(b, 0, bs);
212
213 lseek(f->fd, f->file_offset, SEEK_SET);
214 left = f->io_size;
215
216 while (left && !td->terminate) {
217 if (bs > left)
218 bs = left;
219
220 r = read(f->fd, b, bs);
221
222 if (r == (int) bs) {
223 left -= bs;
224 continue;
225 } else {
226 td_verror(td, EIO, "pre_read");
227 break;
228 }
229 }
230
231 td_set_runstate(td, old_runstate);
232
233 if (did_open)
234 td->io_ops->close_file(td, f);
235 free(b);
236 return 0;
237}
238
239static unsigned long long get_rand_file_size(struct thread_data *td)
240{
241 unsigned long long ret, sized;
242 unsigned long r;
243
244 if (td->o.use_os_rand) {
245 r = os_random_long(&td->file_size_state);
246 sized = td->o.file_size_high - td->o.file_size_low;
247 ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0)));
248 } else {
249 r = __rand(&td->__file_size_state);
250 sized = td->o.file_size_high - td->o.file_size_low;
251 ret = (unsigned long long) ((double) sized * (r / (FRAND_MAX + 1.0)));
252 }
253
254 ret += td->o.file_size_low;
255 ret -= (ret % td->o.rw_min_bs);
256 return ret;
257}
258
259static int file_size(struct thread_data *td, struct fio_file *f)
260{
261 struct stat st;
262
263 if (stat(f->file_name, &st) == -1) {
264 td_verror(td, errno, "fstat");
265 return 1;
266 }
267
268 f->real_file_size = st.st_size;
269 return 0;
270}
271
272static int bdev_size(struct thread_data *td, struct fio_file *f)
273{
274 unsigned long long bytes = 0;
275 int r;
276
277 if (td->io_ops->open_file(td, f)) {
278 log_err("fio: failed opening blockdev %s for size check\n",
279 f->file_name);
280 return 1;
281 }
282
283 r = blockdev_size(f, &bytes);
284 if (r) {
285 td_verror(td, r, "blockdev_size");
286 goto err;
287 }
288
289 if (!bytes) {
290 log_err("%s: zero sized block device?\n", f->file_name);
291 goto err;
292 }
293
294 f->real_file_size = bytes;
295 td->io_ops->close_file(td, f);
296 return 0;
297err:
298 td->io_ops->close_file(td, f);
299 return 1;
300}
301
302static int char_size(struct thread_data *td, struct fio_file *f)
303{
304#ifdef FIO_HAVE_CHARDEV_SIZE
305 unsigned long long bytes = 0;
306 int r;
307
308 if (td->io_ops->open_file(td, f)) {
309 log_err("fio: failed opening blockdev %s for size check\n",
310 f->file_name);
311 return 1;
312 }
313
314 r = chardev_size(f, &bytes);
315 if (r) {
316 td_verror(td, r, "chardev_size");
317 goto err;
318 }
319
320 if (!bytes) {
321 log_err("%s: zero sized char device?\n", f->file_name);
322 goto err;
323 }
324
325 f->real_file_size = bytes;
326 td->io_ops->close_file(td, f);
327 return 0;
328err:
329 td->io_ops->close_file(td, f);
330 return 1;
331#else
332 f->real_file_size = -1ULL;
333 return 0;
334#endif
335}
336
337static int get_file_size(struct thread_data *td, struct fio_file *f)
338{
339 int ret = 0;
340
341 if (fio_file_size_known(f))
342 return 0;
343
344 if (f->filetype == FIO_TYPE_FILE)
345 ret = file_size(td, f);
346 else if (f->filetype == FIO_TYPE_BD)
347 ret = bdev_size(td, f);
348 else if (f->filetype == FIO_TYPE_CHAR)
349 ret = char_size(td, f);
350 else
351 f->real_file_size = -1;
352
353 if (ret)
354 return ret;
355
356 if (f->file_offset > f->real_file_size) {
357 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
358 f->file_offset, f->real_file_size);
359 return 1;
360 }
361
362 fio_file_set_size_known(f);
363 return 0;
364}
365
366static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
367 unsigned long long off,
368 unsigned long long len)
369{
370 int ret = 0;
371
372 if (len == -1ULL)
373 len = f->io_size;
374 if (off == -1ULL)
375 off = f->file_offset;
376
377 if (len == -1ULL || off == -1ULL)
378 return 0;
379
380 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
381 len);
382
383 /*
384 * FIXME: add blockdev flushing too
385 */
386 if (f->mmap_ptr) {
387 ret = posix_madvise(f->mmap_ptr, f->mmap_sz, POSIX_MADV_DONTNEED);
388#ifdef FIO_MADV_FREE
389 (void) posix_madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE);
390#endif
391 } else if (f->filetype == FIO_TYPE_FILE) {
392 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
393 } else if (f->filetype == FIO_TYPE_BD) {
394 ret = blockdev_invalidate_cache(f);
395 if (ret < 0 && errno == EACCES && geteuid()) {
396 if (!root_warn) {
397 log_err("fio: only root may flush block "
398 "devices. Cache flush bypassed!\n");
399 root_warn = 1;
400 }
401 ret = 0;
402 }
403 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
404 ret = 0;
405
406 if (ret < 0) {
407 td_verror(td, errno, "invalidate_cache");
408 return 1;
409 } else if (ret > 0) {
410 td_verror(td, ret, "invalidate_cache");
411 return 1;
412 }
413
414 return ret;
415
416}
417
418int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
419{
420 if (!fio_file_open(f))
421 return 0;
422
423 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
424}
425
426int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
427{
428 int ret = 0;
429
430 dprint(FD_FILE, "fd close %s\n", f->file_name);
431
432 remove_file_hash(f);
433
434 if (close(f->fd) < 0)
435 ret = errno;
436
437 f->fd = -1;
438
439 if (f->shadow_fd != -1) {
440 close(f->shadow_fd);
441 f->shadow_fd = -1;
442 }
443
444 return ret;
445}
446
447int file_lookup_open(struct fio_file *f, int flags)
448{
449 struct fio_file *__f;
450 int from_hash;
451
452 __f = lookup_file_hash(f->file_name);
453 if (__f) {
454 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
455 /*
456 * racy, need the __f->lock locked
457 */
458 f->lock = __f->lock;
459 f->lock_owner = __f->lock_owner;
460 f->lock_batch = __f->lock_batch;
461 f->lock_ddir = __f->lock_ddir;
462 from_hash = 1;
463 } else {
464 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
465 from_hash = 0;
466 }
467
468 f->fd = open(f->file_name, flags, 0600);
469 return from_hash;
470}
471
472static int file_close_shadow_fds(struct thread_data *td)
473{
474 struct fio_file *f;
475 int num_closed = 0;
476 unsigned int i;
477
478 for_each_file(td, f, i) {
479 if (f->shadow_fd == -1)
480 continue;
481
482 close(f->shadow_fd);
483 f->shadow_fd = -1;
484 num_closed++;
485 }
486
487 return num_closed;
488}
489
490int generic_open_file(struct thread_data *td, struct fio_file *f)
491{
492 int is_std = 0;
493 int flags = 0;
494 int from_hash = 0;
495
496 dprint(FD_FILE, "fd open %s\n", f->file_name);
497
498 if (td_trim(td) && f->filetype != FIO_TYPE_BD) {
499 log_err("fio: trim only applies to block device\n");
500 return 1;
501 }
502
503 if (!strcmp(f->file_name, "-")) {
504 if (td_rw(td)) {
505 log_err("fio: can't read/write to stdin/out\n");
506 return 1;
507 }
508 is_std = 1;
509
510 /*
511 * move output logging to stderr, if we are writing to stdout
512 */
513 if (td_write(td))
514 f_out = stderr;
515 }
516
517 if (td_trim(td))
518 goto skip_flags;
519 if (td->o.odirect)
520 flags |= OS_O_DIRECT;
521 if (td->o.sync_io)
522 flags |= O_SYNC;
523 if (td->o.create_on_open)
524 flags |= O_CREAT;
525skip_flags:
526 if (f->filetype != FIO_TYPE_FILE)
527 flags |= FIO_O_NOATIME;
528
529open_again:
530 if (td_write(td)) {
531 if (!read_only)
532 flags |= O_RDWR;
533
534 if (f->filetype == FIO_TYPE_FILE)
535 flags |= O_CREAT;
536
537 if (is_std)
538 f->fd = dup(STDOUT_FILENO);
539 else
540 from_hash = file_lookup_open(f, flags);
541 } else if (td_read(td)) {
542 if (f->filetype == FIO_TYPE_CHAR && !read_only)
543 flags |= O_RDWR;
544 else
545 flags |= O_RDONLY;
546
547 if (is_std)
548 f->fd = dup(STDIN_FILENO);
549 else
550 from_hash = file_lookup_open(f, flags);
551 } else { //td trim
552 flags |= O_RDWR;
553 from_hash = file_lookup_open(f, flags);
554 }
555
556 if (f->fd == -1) {
557 char buf[FIO_VERROR_SIZE];
558 int __e = errno;
559
560 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
561 flags &= ~FIO_O_NOATIME;
562 goto open_again;
563 }
564 if (__e == EMFILE && file_close_shadow_fds(td))
565 goto open_again;
566
567 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
568
569 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
570 log_err("fio: looks like your file system does not " \
571 "support direct=1/buffered=0\n");
572 }
573
574 td_verror(td, __e, buf);
575 }
576
577 if (!from_hash && f->fd != -1) {
578 if (add_file_hash(f)) {
579 int fio_unused ret;
580
581 /*
582 * Stash away descriptor for later close. This is to
583 * work-around a "feature" on Linux, where a close of
584 * an fd that has been opened for write will trigger
585 * udev to call blkid to check partitions, fs id, etc.
586 * That polutes the device cache, which can slow down
587 * unbuffered accesses.
588 */
589 if (f->shadow_fd == -1)
590 f->shadow_fd = f->fd;
591 else {
592 /*
593 * OK to ignore, we haven't done anything
594 * with it
595 */
596 ret = generic_close_file(td, f);
597 }
598 goto open_again;
599 }
600 }
601
602 return 0;
603}
604
605int generic_get_file_size(struct thread_data *td, struct fio_file *f)
606{
607 return get_file_size(td, f);
608}
609
610/*
611 * open/close all files, so that ->real_file_size gets set
612 */
613static int get_file_sizes(struct thread_data *td)
614{
615 struct fio_file *f;
616 unsigned int i;
617 int err = 0;
618
619 for_each_file(td, f, i) {
620 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
621 f->file_name);
622
623 if (td_io_get_file_size(td, f)) {
624 if (td->error != ENOENT) {
625 log_err("%s\n", td->verror);
626 err = 1;
627 }
628 clear_error(td);
629 }
630
631 if (f->real_file_size == -1ULL && td->o.size)
632 f->real_file_size = td->o.size / td->o.nr_files;
633 }
634
635 return err;
636}
637
638struct fio_mount {
639 struct flist_head list;
640 const char *base;
641 char __base[256];
642 unsigned int key;
643};
644
645/*
646 * Get free number of bytes for each file on each unique mount.
647 */
648static unsigned long long get_fs_free_counts(struct thread_data *td)
649{
650 struct flist_head *n, *tmp;
651 unsigned long long ret = 0;
652 struct fio_mount *fm;
653 FLIST_HEAD(list);
654 struct fio_file *f;
655 unsigned int i;
656
657 for_each_file(td, f, i) {
658 struct stat sb;
659 char buf[256];
660
661 if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
662 if (f->real_file_size != -1ULL)
663 ret += f->real_file_size;
664 continue;
665 } else if (f->filetype != FIO_TYPE_FILE)
666 continue;
667
668 strcpy(buf, f->file_name);
669
670 if (stat(buf, &sb) < 0) {
671 if (errno != ENOENT)
672 break;
673 strcpy(buf, ".");
674 if (stat(buf, &sb) < 0)
675 break;
676 }
677
678 fm = NULL;
679 flist_for_each(n, &list) {
680 fm = flist_entry(n, struct fio_mount, list);
681 if (fm->key == sb.st_dev)
682 break;
683
684 fm = NULL;
685 }
686
687 if (fm)
688 continue;
689
690 fm = malloc(sizeof(*fm));
691 strcpy(fm->__base, buf);
692 fm->base = basename(fm->__base);
693 fm->key = sb.st_dev;
694 flist_add(&fm->list, &list);
695 }
696
697 flist_for_each_safe(n, tmp, &list) {
698 unsigned long long sz;
699
700 fm = flist_entry(n, struct fio_mount, list);
701 flist_del(&fm->list);
702
703 sz = get_fs_size(fm->base);
704 if (sz && sz != -1ULL)
705 ret += sz;
706
707 free(fm);
708 }
709
710 return ret;
711}
712
713uint64_t get_start_offset(struct thread_data *td)
714{
715 return td->o.start_offset +
716 (td->thread_number - 1) * td->o.offset_increment;
717}
718
719/*
720 * Open the files and setup files sizes, creating files if necessary.
721 */
722int setup_files(struct thread_data *td)
723{
724 unsigned long long total_size, extend_size;
725 struct fio_file *f;
726 unsigned int i;
727 int err = 0, need_extend;
728
729 dprint(FD_FILE, "setup files\n");
730
731 if (td->o.read_iolog_file)
732 goto done;
733
734 /*
735 * if ioengine defines a setup() method, it's responsible for
736 * opening the files and setting f->real_file_size to indicate
737 * the valid range for that file.
738 */
739 if (td->io_ops->setup)
740 err = td->io_ops->setup(td);
741 else
742 err = get_file_sizes(td);
743
744 if (err)
745 return err;
746
747 /*
748 * check sizes. if the files/devices do not exist and the size
749 * isn't passed to fio, abort.
750 */
751 total_size = 0;
752 for_each_file(td, f, i) {
753 if (f->real_file_size == -1ULL)
754 total_size = -1ULL;
755 else
756 total_size += f->real_file_size;
757 }
758
759 if (td->o.fill_device)
760 td->fill_device_size = get_fs_free_counts(td);
761
762 /*
763 * device/file sizes are zero and no size given, punt
764 */
765 if ((!total_size || total_size == -1ULL) && !td->o.size &&
766 !(td->io_ops->flags & FIO_NOIO) && !td->o.fill_device) {
767 log_err("%s: you need to specify size=\n", td->o.name);
768 td_verror(td, EINVAL, "total_file_size");
769 return 1;
770 }
771
772 /*
773 * now file sizes are known, so we can set ->io_size. if size= is
774 * not given, ->io_size is just equal to ->real_file_size. if size
775 * is given, ->io_size is size / nr_files.
776 */
777 extend_size = total_size = 0;
778 need_extend = 0;
779 for_each_file(td, f, i) {
780 f->file_offset = get_start_offset(td);
781
782 if (!td->o.file_size_low) {
783 /*
784 * no file size range given, file size is equal to
785 * total size divided by number of files. if that is
786 * zero, set it to the real file size.
787 */
788 f->io_size = td->o.size / td->o.nr_files;
789 if (!f->io_size)
790 f->io_size = f->real_file_size - f->file_offset;
791 } else if (f->real_file_size < td->o.file_size_low ||
792 f->real_file_size > td->o.file_size_high) {
793 if (f->file_offset > td->o.file_size_low)
794 goto err_offset;
795 /*
796 * file size given. if it's fixed, use that. if it's a
797 * range, generate a random size in-between.
798 */
799 if (td->o.file_size_low == td->o.file_size_high) {
800 f->io_size = td->o.file_size_low
801 - f->file_offset;
802 } else {
803 f->io_size = get_rand_file_size(td)
804 - f->file_offset;
805 }
806 } else
807 f->io_size = f->real_file_size - f->file_offset;
808
809 if (f->io_size == -1ULL)
810 total_size = -1ULL;
811 else {
812 if (td->o.size_percent)
813 f->io_size = (f->io_size * td->o.size_percent) / 100;
814 total_size += f->io_size;
815 }
816
817 if (f->filetype == FIO_TYPE_FILE &&
818 (f->io_size + f->file_offset) > f->real_file_size &&
819 !(td->io_ops->flags & FIO_DISKLESSIO)) {
820 if (!td->o.create_on_open) {
821 need_extend++;
822 extend_size += (f->io_size + f->file_offset);
823 } else
824 f->real_file_size = f->io_size + f->file_offset;
825 fio_file_set_extend(f);
826 }
827 }
828
829 if (!td->o.size || td->o.size > total_size)
830 td->o.size = total_size;
831
832 /*
833 * See if we need to extend some files
834 */
835 if (need_extend) {
836 temp_stall_ts = 1;
837 if (output_format == FIO_OUTPUT_NORMAL)
838 log_info("%s: Laying out IO file(s) (%u file(s) /"
839 " %lluMB)\n", td->o.name, need_extend,
840 extend_size >> 20);
841
842 for_each_file(td, f, i) {
843 unsigned long long old_len = -1ULL, extend_len = -1ULL;
844
845 if (!fio_file_extend(f))
846 continue;
847
848 assert(f->filetype == FIO_TYPE_FILE);
849 fio_file_clear_extend(f);
850 if (!td->o.fill_device) {
851 old_len = f->real_file_size;
852 extend_len = f->io_size + f->file_offset -
853 old_len;
854 }
855 f->real_file_size = (f->io_size + f->file_offset);
856 err = extend_file(td, f);
857 if (err)
858 break;
859
860 err = __file_invalidate_cache(td, f, old_len,
861 extend_len);
862 close(f->fd);
863 f->fd = -1;
864 if (err)
865 break;
866 }
867 temp_stall_ts = 0;
868 }
869
870 if (err)
871 return err;
872
873 if (!td->o.zone_size)
874 td->o.zone_size = td->o.size;
875
876 /*
877 * iolog already set the total io size, if we read back
878 * stored entries.
879 */
880 if (!td->o.read_iolog_file)
881 td->total_io_size = td->o.size * td->o.loops;
882
883done:
884 if (td->o.create_only)
885 td->done = 1;
886
887 return 0;
888err_offset:
889 log_err("%s: you need to specify valid offset=\n", td->o.name);
890 return 1;
891}
892
893int pre_read_files(struct thread_data *td)
894{
895 struct fio_file *f;
896 unsigned int i;
897
898 dprint(FD_FILE, "pre_read files\n");
899
900 for_each_file(td, f, i) {
901 pre_read_file(td, f);
902 }
903
904 return 1;
905}
906
907static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
908{
909 unsigned int range_size, seed;
910 unsigned long nranges;
911 uint64_t file_size;
912
913 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
914 file_size = min(f->real_file_size, f->io_size);
915
916 nranges = (file_size + range_size - 1) / range_size;
917
918 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
919 if (!td->o.rand_repeatable)
920 seed = td->rand_seeds[4];
921
922 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
923 zipf_init(&f->zipf, nranges, td->o.zipf_theta, seed);
924 else
925 pareto_init(&f->zipf, nranges, td->o.pareto_h, seed);
926
927 return 1;
928}
929
930static int init_rand_distribution(struct thread_data *td)
931{
932 struct fio_file *f;
933 unsigned int i;
934 int state;
935
936 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
937 return 0;
938
939 state = td->runstate;
940 td_set_runstate(td, TD_SETTING_UP);
941 for_each_file(td, f, i)
942 __init_rand_distribution(td, f);
943 td_set_runstate(td, state);
944
945 return 1;
946}
947
948int init_random_map(struct thread_data *td)
949{
950 unsigned long long blocks;
951 struct fio_file *f;
952 unsigned int i;
953
954 if (init_rand_distribution(td))
955 return 0;
956 if (!td_random(td))
957 return 0;
958
959 for_each_file(td, f, i) {
960 uint64_t file_size = min(f->real_file_size, f->io_size);
961
962 blocks = file_size / (unsigned long long) td->o.rw_min_bs;
963
964 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
965 unsigned long seed;
966
967 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
968
969 if (!lfsr_init(&f->lfsr, blocks, seed, seed & 0xF))
970 continue;
971 } else if (!td->o.norandommap) {
972 f->io_axmap = axmap_new(blocks);
973 if (f->io_axmap)
974 continue;
975 } else if (td->o.norandommap)
976 continue;
977
978 if (!td->o.softrandommap) {
979 log_err("fio: failed allocating random map. If running"
980 " a large number of jobs, try the 'norandommap'"
981 " option or set 'softrandommap'. Or give"
982 " a larger --alloc-size to fio.\n");
983 return 1;
984 }
985
986 log_info("fio: file %s failed allocating random map. Running "
987 "job without.\n", f->file_name);
988 }
989
990 return 0;
991}
992
993void close_files(struct thread_data *td)
994{
995 struct fio_file *f;
996 unsigned int i;
997
998 for_each_file(td, f, i) {
999 if (fio_file_open(f))
1000 td_io_close_file(td, f);
1001 }
1002}
1003
1004void close_and_free_files(struct thread_data *td)
1005{
1006 struct fio_file *f;
1007 unsigned int i;
1008
1009 dprint(FD_FILE, "close files\n");
1010
1011 for_each_file(td, f, i) {
1012 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1013 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1014 unlink(f->file_name);
1015 }
1016
1017 if (fio_file_open(f))
1018 td_io_close_file(td, f);
1019
1020 remove_file_hash(f);
1021
1022 sfree(f->file_name);
1023 f->file_name = NULL;
1024 axmap_free(f->io_axmap);
1025 f->io_axmap = NULL;
1026 sfree(f);
1027 }
1028
1029 td->o.filename = NULL;
1030 free(td->files);
1031 td->files_index = 0;
1032 td->files = NULL;
1033 td->o.nr_files = 0;
1034}
1035
1036static void get_file_type(struct fio_file *f)
1037{
1038 struct stat sb;
1039
1040 if (!strcmp(f->file_name, "-"))
1041 f->filetype = FIO_TYPE_PIPE;
1042 else
1043 f->filetype = FIO_TYPE_FILE;
1044
1045 /* \\.\ is the device namespace in Windows, where every file is
1046 * a block device */
1047 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1048 f->filetype = FIO_TYPE_BD;
1049
1050 if (!stat(f->file_name, &sb)) {
1051 if (S_ISBLK(sb.st_mode))
1052 f->filetype = FIO_TYPE_BD;
1053 else if (S_ISCHR(sb.st_mode))
1054 f->filetype = FIO_TYPE_CHAR;
1055 else if (S_ISFIFO(sb.st_mode))
1056 f->filetype = FIO_TYPE_PIPE;
1057 }
1058}
1059
1060int add_file(struct thread_data *td, const char *fname)
1061{
1062 int cur_files = td->files_index;
1063 char file_name[PATH_MAX];
1064 struct fio_file *f;
1065 int len = 0;
1066
1067 dprint(FD_FILE, "add file %s\n", fname);
1068
1069 f = smalloc(sizeof(*f));
1070 if (!f) {
1071 log_err("fio: smalloc OOM\n");
1072 assert(0);
1073 }
1074
1075 f->fd = -1;
1076 f->shadow_fd = -1;
1077 fio_file_reset(td, f);
1078
1079 if (td->files_size <= td->files_index) {
1080 unsigned int new_size = td->o.nr_files + 1;
1081
1082 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1083
1084 td->files = realloc(td->files, new_size * sizeof(f));
1085 if (td->files == NULL) {
1086 log_err("fio: realloc OOM\n");
1087 assert(0);
1088 }
1089 td->files_size = new_size;
1090 }
1091 td->files[cur_files] = f;
1092 f->fileno = cur_files;
1093
1094 /*
1095 * init function, io engine may not be loaded yet
1096 */
1097 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1098 f->real_file_size = -1ULL;
1099
1100 if (td->o.directory)
1101 len = sprintf(file_name, "%s/", td->o.directory);
1102
1103 sprintf(file_name + len, "%s", fname);
1104 f->file_name = smalloc_strdup(file_name);
1105 if (!f->file_name) {
1106 log_err("fio: smalloc OOM\n");
1107 assert(0);
1108 }
1109
1110 get_file_type(f);
1111
1112 switch (td->o.file_lock_mode) {
1113 case FILE_LOCK_NONE:
1114 break;
1115 case FILE_LOCK_READWRITE:
1116 f->lock = fio_mutex_rw_init();
1117 break;
1118 case FILE_LOCK_EXCLUSIVE:
1119 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1120 break;
1121 default:
1122 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1123 assert(0);
1124 }
1125
1126 td->files_index++;
1127 if (f->filetype == FIO_TYPE_FILE)
1128 td->nr_normal_files++;
1129
1130 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1131 cur_files);
1132
1133 return cur_files;
1134}
1135
1136int add_file_exclusive(struct thread_data *td, const char *fname)
1137{
1138 struct fio_file *f;
1139 unsigned int i;
1140
1141 for_each_file(td, f, i) {
1142 if (!strcmp(f->file_name, fname))
1143 return i;
1144 }
1145
1146 return add_file(td, fname);
1147}
1148
1149void get_file(struct fio_file *f)
1150{
1151 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1152 assert(fio_file_open(f));
1153 f->references++;
1154}
1155
1156int put_file(struct thread_data *td, struct fio_file *f)
1157{
1158 int f_ret = 0, ret = 0;
1159
1160 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1161
1162 if (!fio_file_open(f)) {
1163 assert(f->fd == -1);
1164 return 0;
1165 }
1166
1167 assert(f->references);
1168 if (--f->references)
1169 return 0;
1170
1171 if (should_fsync(td) && td->o.fsync_on_close)
1172 f_ret = fsync(f->fd);
1173
1174 if (td->io_ops->close_file)
1175 ret = td->io_ops->close_file(td, f);
1176
1177 if (!ret)
1178 ret = f_ret;
1179
1180 td->nr_open_files--;
1181 fio_file_clear_open(f);
1182 assert(f->fd == -1);
1183 return ret;
1184}
1185
1186void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1187{
1188 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1189 return;
1190
1191 if (f->lock_owner == td && f->lock_batch--)
1192 return;
1193
1194 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1195 if (ddir == DDIR_READ)
1196 fio_mutex_down_read(f->lock);
1197 else
1198 fio_mutex_down_write(f->lock);
1199 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1200 fio_mutex_down(f->lock);
1201
1202 f->lock_owner = td;
1203 f->lock_batch = td->o.lockfile_batch;
1204 f->lock_ddir = ddir;
1205}
1206
1207void unlock_file(struct thread_data *td, struct fio_file *f)
1208{
1209 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1210 return;
1211 if (f->lock_batch)
1212 return;
1213
1214 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1215 const int is_read = f->lock_ddir == DDIR_READ;
1216 int val = fio_mutex_getval(f->lock);
1217
1218 if ((is_read && val == 1) || (!is_read && val == -1))
1219 f->lock_owner = NULL;
1220
1221 if (is_read)
1222 fio_mutex_up_read(f->lock);
1223 else
1224 fio_mutex_up_write(f->lock);
1225 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) {
1226 int val = fio_mutex_getval(f->lock);
1227
1228 if (val == 0)
1229 f->lock_owner = NULL;
1230
1231 fio_mutex_up(f->lock);
1232 }
1233}
1234
1235void unlock_file_all(struct thread_data *td, struct fio_file *f)
1236{
1237 if (f->lock_owner != td)
1238 return;
1239
1240 f->lock_batch = 0;
1241 unlock_file(td, f);
1242}
1243
1244static int recurse_dir(struct thread_data *td, const char *dirname)
1245{
1246 struct dirent *dir;
1247 int ret = 0;
1248 DIR *D;
1249
1250 D = opendir(dirname);
1251 if (!D) {
1252 char buf[FIO_VERROR_SIZE];
1253
1254 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1255 td_verror(td, errno, buf);
1256 return 1;
1257 }
1258
1259 while ((dir = readdir(D)) != NULL) {
1260 char full_path[PATH_MAX];
1261 struct stat sb;
1262
1263 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1264 continue;
1265
1266 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1267
1268 if (lstat(full_path, &sb) == -1) {
1269 if (errno != ENOENT) {
1270 td_verror(td, errno, "stat");
1271 return 1;
1272 }
1273 }
1274
1275 if (S_ISREG(sb.st_mode)) {
1276 add_file(td, full_path);
1277 td->o.nr_files++;
1278 continue;
1279 }
1280 if (!S_ISDIR(sb.st_mode))
1281 continue;
1282
1283 ret = recurse_dir(td, full_path);
1284 if (ret)
1285 break;
1286 }
1287
1288 closedir(D);
1289 return ret;
1290}
1291
1292int add_dir_files(struct thread_data *td, const char *path)
1293{
1294 int ret = recurse_dir(td, path);
1295
1296 if (!ret)
1297 log_info("fio: opendir added %d files\n", td->o.nr_files);
1298
1299 return ret;
1300}
1301
1302void dup_files(struct thread_data *td, struct thread_data *org)
1303{
1304 struct fio_file *f;
1305 unsigned int i;
1306
1307 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1308
1309 if (!org->files)
1310 return;
1311
1312 td->files = malloc(org->files_index * sizeof(f));
1313
1314 for_each_file(org, f, i) {
1315 struct fio_file *__f;
1316
1317 __f = smalloc(sizeof(*__f));
1318 if (!__f) {
1319 log_err("fio: smalloc OOM\n");
1320 assert(0);
1321 }
1322 __f->fd = -1;
1323 fio_file_reset(td, __f);
1324
1325 if (f->file_name) {
1326 __f->file_name = smalloc_strdup(f->file_name);
1327 if (!__f->file_name) {
1328 log_err("fio: smalloc OOM\n");
1329 assert(0);
1330 }
1331
1332 __f->filetype = f->filetype;
1333 }
1334
1335 td->files[i] = __f;
1336 }
1337}
1338
1339/*
1340 * Returns the index that matches the filename, or -1 if not there
1341 */
1342int get_fileno(struct thread_data *td, const char *fname)
1343{
1344 struct fio_file *f;
1345 unsigned int i;
1346
1347 for_each_file(td, f, i)
1348 if (!strcmp(f->file_name, fname))
1349 return i;
1350
1351 return -1;
1352}
1353
1354/*
1355 * For log usage, where we add/open/close files automatically
1356 */
1357void free_release_files(struct thread_data *td)
1358{
1359 close_files(td);
1360 td->files_index = 0;
1361 td->nr_normal_files = 0;
1362}
1363
1364void fio_file_reset(struct thread_data *td, struct fio_file *f)
1365{
1366 f->last_pos = f->file_offset;
1367 f->last_start = -1ULL;
1368 if (f->io_axmap)
1369 axmap_reset(f->io_axmap);
1370 if (td->o.random_generator == FIO_RAND_GEN_LFSR)
1371 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1372}