file: fix potential buffer overrun in get_fs_free_counts()
[fio.git] / filesetup.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27static inline void clear_error(struct thread_data *td)
28{
29 td->error = 0;
30 td->verror[0] = '\0';
31}
32
33/*
34 * Leaves f->fd open on success, caller must close
35 */
36static int extend_file(struct thread_data *td, struct fio_file *f)
37{
38 int r, new_layout = 0, unlink_file = 0, flags;
39 unsigned long long left;
40 unsigned int bs;
41 char *b = NULL;
42
43 if (read_only) {
44 log_err("fio: refusing extend of file due to read-only\n");
45 return 0;
46 }
47
48 /*
49 * check if we need to lay the file out complete again. fio
50 * does that for operations involving reads, or for writes
51 * where overwrite is set
52 */
53 if (td_read(td) ||
54 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
56 new_layout = 1;
57 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
58 unlink_file = 1;
59
60 if (unlink_file || new_layout) {
61 dprint(FD_FILE, "layout unlink %s\n", f->file_name);
62 if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
63 td_verror(td, errno, "unlink");
64 return 1;
65 }
66 }
67
68 flags = O_WRONLY | O_CREAT;
69 if (new_layout)
70 flags |= O_TRUNC;
71
72 dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
73 f->fd = open(f->file_name, flags, 0644);
74 if (f->fd < 0) {
75 td_verror(td, errno, "open");
76 return 1;
77 }
78
79#ifdef CONFIG_POSIX_FALLOCATE
80 if (!td->o.fill_device) {
81 switch (td->o.fallocate_mode) {
82 case FIO_FALLOCATE_NONE:
83 break;
84 case FIO_FALLOCATE_POSIX:
85 dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
86 f->file_name,
87 (unsigned long long) f->real_file_size);
88
89 r = posix_fallocate(f->fd, 0, f->real_file_size);
90 if (r > 0) {
91 log_err("fio: posix_fallocate fails: %s\n",
92 strerror(r));
93 }
94 break;
95#ifdef CONFIG_LINUX_FALLOCATE
96 case FIO_FALLOCATE_KEEP_SIZE:
97 dprint(FD_FILE,
98 "fallocate(FALLOC_FL_KEEP_SIZE) "
99 "file %s size %llu\n", f->file_name,
100 (unsigned long long) f->real_file_size);
101
102 r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
103 f->real_file_size);
104 if (r != 0)
105 td_verror(td, errno, "fallocate");
106
107 break;
108#endif /* CONFIG_LINUX_FALLOCATE */
109 default:
110 log_err("fio: unknown fallocate mode: %d\n",
111 td->o.fallocate_mode);
112 assert(0);
113 }
114 }
115#endif /* CONFIG_POSIX_FALLOCATE */
116
117 if (!new_layout)
118 goto done;
119
120 /*
121 * The size will be -1ULL when fill_device is used, so don't truncate
122 * or fallocate this file, just write it
123 */
124 if (!td->o.fill_device) {
125 dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
126 (unsigned long long) f->real_file_size);
127 if (ftruncate(f->fd, f->real_file_size) == -1) {
128 if (errno != EFBIG) {
129 td_verror(td, errno, "ftruncate");
130 goto err;
131 }
132 }
133 }
134
135 b = malloc(td->o.max_bs[DDIR_WRITE]);
136
137 left = f->real_file_size;
138 while (left && !td->terminate) {
139 bs = td->o.max_bs[DDIR_WRITE];
140 if (bs > left)
141 bs = left;
142
143 fill_io_buffer(td, b, bs, bs);
144
145 r = write(f->fd, b, bs);
146
147 if (r > 0) {
148 left -= r;
149 continue;
150 } else {
151 if (r < 0) {
152 int __e = errno;
153
154 if (__e == ENOSPC) {
155 if (td->o.fill_device)
156 break;
157 log_info("fio: ENOSPC on laying out "
158 "file, stopping\n");
159 break;
160 }
161 td_verror(td, errno, "write");
162 } else
163 td_verror(td, EIO, "write");
164
165 break;
166 }
167 }
168
169 if (td->terminate) {
170 dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
171 unlink(f->file_name);
172 } else if (td->o.create_fsync) {
173 if (fsync(f->fd) < 0) {
174 td_verror(td, errno, "fsync");
175 goto err;
176 }
177 }
178 if (td->o.fill_device && !td_write(td)) {
179 fio_file_clear_size_known(f);
180 if (td_io_get_file_size(td, f))
181 goto err;
182 if (f->io_size > f->real_file_size)
183 f->io_size = f->real_file_size;
184 }
185
186 free(b);
187done:
188 return 0;
189err:
190 close(f->fd);
191 f->fd = -1;
192 if (b)
193 free(b);
194 return 1;
195}
196
197static int pre_read_file(struct thread_data *td, struct fio_file *f)
198{
199 int ret = 0, r, did_open = 0, old_runstate;
200 unsigned long long left;
201 unsigned int bs;
202 char *b;
203
204 if (td->io_ops->flags & FIO_PIPEIO)
205 return 0;
206
207 if (!fio_file_open(f)) {
208 if (td->io_ops->open_file(td, f)) {
209 log_err("fio: cannot pre-read, failed to open file\n");
210 return 1;
211 }
212 did_open = 1;
213 }
214
215 old_runstate = td_bump_runstate(td, TD_PRE_READING);
216
217 bs = td->o.max_bs[DDIR_READ];
218 b = malloc(bs);
219 memset(b, 0, bs);
220
221 if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
222 td_verror(td, errno, "lseek");
223 log_err("fio: failed to lseek pre-read file\n");
224 ret = 1;
225 goto error;
226 }
227
228 left = f->io_size;
229
230 while (left && !td->terminate) {
231 if (bs > left)
232 bs = left;
233
234 r = read(f->fd, b, bs);
235
236 if (r == (int) bs) {
237 left -= bs;
238 continue;
239 } else {
240 td_verror(td, EIO, "pre_read");
241 break;
242 }
243 }
244
245error:
246 td_restore_runstate(td, old_runstate);
247
248 if (did_open)
249 td->io_ops->close_file(td, f);
250
251 free(b);
252 return ret;
253}
254
255static unsigned long long get_rand_file_size(struct thread_data *td)
256{
257 unsigned long long ret, sized;
258 unsigned long r;
259
260 if (td->o.use_os_rand) {
261 r = os_random_long(&td->file_size_state);
262 sized = td->o.file_size_high - td->o.file_size_low;
263 ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0)));
264 } else {
265 r = __rand(&td->__file_size_state);
266 sized = td->o.file_size_high - td->o.file_size_low;
267 ret = (unsigned long long) ((double) sized * (r / (FRAND_MAX + 1.0)));
268 }
269
270 ret += td->o.file_size_low;
271 ret -= (ret % td->o.rw_min_bs);
272 return ret;
273}
274
275static int file_size(struct thread_data *td, struct fio_file *f)
276{
277 struct stat st;
278
279 if (stat(f->file_name, &st) == -1) {
280 td_verror(td, errno, "fstat");
281 return 1;
282 }
283
284 f->real_file_size = st.st_size;
285 return 0;
286}
287
288static int bdev_size(struct thread_data *td, struct fio_file *f)
289{
290 unsigned long long bytes = 0;
291 int r;
292
293 if (td->io_ops->open_file(td, f)) {
294 log_err("fio: failed opening blockdev %s for size check\n",
295 f->file_name);
296 return 1;
297 }
298
299 r = blockdev_size(f, &bytes);
300 if (r) {
301 td_verror(td, r, "blockdev_size");
302 goto err;
303 }
304
305 if (!bytes) {
306 log_err("%s: zero sized block device?\n", f->file_name);
307 goto err;
308 }
309
310 f->real_file_size = bytes;
311 td->io_ops->close_file(td, f);
312 return 0;
313err:
314 td->io_ops->close_file(td, f);
315 return 1;
316}
317
318static int char_size(struct thread_data *td, struct fio_file *f)
319{
320#ifdef FIO_HAVE_CHARDEV_SIZE
321 unsigned long long bytes = 0;
322 int r;
323
324 if (td->io_ops->open_file(td, f)) {
325 log_err("fio: failed opening blockdev %s for size check\n",
326 f->file_name);
327 return 1;
328 }
329
330 r = chardev_size(f, &bytes);
331 if (r) {
332 td_verror(td, r, "chardev_size");
333 goto err;
334 }
335
336 if (!bytes) {
337 log_err("%s: zero sized char device?\n", f->file_name);
338 goto err;
339 }
340
341 f->real_file_size = bytes;
342 td->io_ops->close_file(td, f);
343 return 0;
344err:
345 td->io_ops->close_file(td, f);
346 return 1;
347#else
348 f->real_file_size = -1ULL;
349 return 0;
350#endif
351}
352
353static int get_file_size(struct thread_data *td, struct fio_file *f)
354{
355 int ret = 0;
356
357 if (fio_file_size_known(f))
358 return 0;
359
360 if (f->filetype == FIO_TYPE_FILE)
361 ret = file_size(td, f);
362 else if (f->filetype == FIO_TYPE_BD)
363 ret = bdev_size(td, f);
364 else if (f->filetype == FIO_TYPE_CHAR)
365 ret = char_size(td, f);
366 else
367 f->real_file_size = -1;
368
369 if (ret)
370 return ret;
371
372 if (f->file_offset > f->real_file_size) {
373 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
374 (unsigned long long) f->file_offset,
375 (unsigned long long) f->real_file_size);
376 return 1;
377 }
378
379 fio_file_set_size_known(f);
380 return 0;
381}
382
383static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
384 unsigned long long off,
385 unsigned long long len)
386{
387 int ret = 0;
388
389 if (len == -1ULL)
390 len = f->io_size;
391 if (off == -1ULL)
392 off = f->file_offset;
393
394 if (len == -1ULL || off == -1ULL)
395 return 0;
396
397 dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
398 len);
399
400 if (f->mmap_ptr) {
401 ret = posix_madvise(f->mmap_ptr, f->mmap_sz, POSIX_MADV_DONTNEED);
402#ifdef FIO_MADV_FREE
403 if (f->filetype == FIO_TYPE_BD)
404 (void) posix_madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE);
405#endif
406 } else if (f->filetype == FIO_TYPE_FILE) {
407 ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
408 } else if (f->filetype == FIO_TYPE_BD) {
409 ret = blockdev_invalidate_cache(f);
410 if (ret < 0 && errno == EACCES && geteuid()) {
411 if (!root_warn) {
412 log_err("fio: only root may flush block "
413 "devices. Cache flush bypassed!\n");
414 root_warn = 1;
415 }
416 ret = 0;
417 }
418 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
419 ret = 0;
420
421 /*
422 * Cache flushing isn't a fatal condition, and we know it will
423 * happen on some platforms where we don't have the proper
424 * function to flush eg block device caches. So just warn and
425 * continue on our way.
426 */
427 if (ret) {
428 log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errno));
429 ret = 0;
430 }
431
432 return 0;
433
434}
435
436int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
437{
438 if (!fio_file_open(f))
439 return 0;
440
441 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
442}
443
444int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
445{
446 int ret = 0;
447
448 dprint(FD_FILE, "fd close %s\n", f->file_name);
449
450 remove_file_hash(f);
451
452 if (close(f->fd) < 0)
453 ret = errno;
454
455 f->fd = -1;
456
457 if (f->shadow_fd != -1) {
458 close(f->shadow_fd);
459 f->shadow_fd = -1;
460 }
461
462 f->engine_data = 0;
463 return ret;
464}
465
466int file_lookup_open(struct fio_file *f, int flags)
467{
468 struct fio_file *__f;
469 int from_hash;
470
471 __f = lookup_file_hash(f->file_name);
472 if (__f) {
473 dprint(FD_FILE, "found file in hash %s\n", f->file_name);
474 /*
475 * racy, need the __f->lock locked
476 */
477 f->lock = __f->lock;
478 from_hash = 1;
479 } else {
480 dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
481 from_hash = 0;
482 }
483
484 f->fd = open(f->file_name, flags, 0600);
485 return from_hash;
486}
487
488static int file_close_shadow_fds(struct thread_data *td)
489{
490 struct fio_file *f;
491 int num_closed = 0;
492 unsigned int i;
493
494 for_each_file(td, f, i) {
495 if (f->shadow_fd == -1)
496 continue;
497
498 close(f->shadow_fd);
499 f->shadow_fd = -1;
500 num_closed++;
501 }
502
503 return num_closed;
504}
505
506int generic_open_file(struct thread_data *td, struct fio_file *f)
507{
508 int is_std = 0;
509 int flags = 0;
510 int from_hash = 0;
511
512 dprint(FD_FILE, "fd open %s\n", f->file_name);
513
514 if (td_trim(td) && f->filetype != FIO_TYPE_BD) {
515 log_err("fio: trim only applies to block device\n");
516 return 1;
517 }
518
519 if (!strcmp(f->file_name, "-")) {
520 if (td_rw(td)) {
521 log_err("fio: can't read/write to stdin/out\n");
522 return 1;
523 }
524 is_std = 1;
525
526 /*
527 * move output logging to stderr, if we are writing to stdout
528 */
529 if (td_write(td))
530 f_out = stderr;
531 }
532
533 if (td_trim(td))
534 goto skip_flags;
535 if (td->o.odirect)
536 flags |= OS_O_DIRECT;
537 if (td->o.oatomic) {
538 if (!FIO_O_ATOMIC) {
539 td_verror(td, EINVAL, "OS does not support atomic IO");
540 return 1;
541 }
542 flags |= OS_O_DIRECT | FIO_O_ATOMIC;
543 }
544 if (td->o.sync_io)
545 flags |= O_SYNC;
546 if (td->o.create_on_open)
547 flags |= O_CREAT;
548skip_flags:
549 if (f->filetype != FIO_TYPE_FILE)
550 flags |= FIO_O_NOATIME;
551
552open_again:
553 if (td_write(td)) {
554 if (!read_only)
555 flags |= O_RDWR;
556
557 if (f->filetype == FIO_TYPE_FILE)
558 flags |= O_CREAT;
559
560 if (is_std)
561 f->fd = dup(STDOUT_FILENO);
562 else
563 from_hash = file_lookup_open(f, flags);
564 } else if (td_read(td)) {
565 if (f->filetype == FIO_TYPE_CHAR && !read_only)
566 flags |= O_RDWR;
567 else
568 flags |= O_RDONLY;
569
570 if (is_std)
571 f->fd = dup(STDIN_FILENO);
572 else
573 from_hash = file_lookup_open(f, flags);
574 } else { //td trim
575 flags |= O_RDWR;
576 from_hash = file_lookup_open(f, flags);
577 }
578
579 if (f->fd == -1) {
580 char buf[FIO_VERROR_SIZE];
581 int __e = errno;
582
583 if (__e == EPERM && (flags & FIO_O_NOATIME)) {
584 flags &= ~FIO_O_NOATIME;
585 goto open_again;
586 }
587 if (__e == EMFILE && file_close_shadow_fds(td))
588 goto open_again;
589
590 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
591
592 if (__e == EINVAL && (flags & OS_O_DIRECT)) {
593 log_err("fio: looks like your file system does not " \
594 "support direct=1/buffered=0\n");
595 }
596
597 td_verror(td, __e, buf);
598 }
599
600 if (!from_hash && f->fd != -1) {
601 if (add_file_hash(f)) {
602 int fio_unused ret;
603
604 /*
605 * Stash away descriptor for later close. This is to
606 * work-around a "feature" on Linux, where a close of
607 * an fd that has been opened for write will trigger
608 * udev to call blkid to check partitions, fs id, etc.
609 * That pollutes the device cache, which can slow down
610 * unbuffered accesses.
611 */
612 if (f->shadow_fd == -1)
613 f->shadow_fd = f->fd;
614 else {
615 /*
616 * OK to ignore, we haven't done anything
617 * with it
618 */
619 ret = generic_close_file(td, f);
620 }
621 goto open_again;
622 }
623 }
624
625 return 0;
626}
627
628int generic_get_file_size(struct thread_data *td, struct fio_file *f)
629{
630 return get_file_size(td, f);
631}
632
633/*
634 * open/close all files, so that ->real_file_size gets set
635 */
636static int get_file_sizes(struct thread_data *td)
637{
638 struct fio_file *f;
639 unsigned int i;
640 int err = 0;
641
642 for_each_file(td, f, i) {
643 dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
644 f->file_name);
645
646 if (td_io_get_file_size(td, f)) {
647 if (td->error != ENOENT) {
648 log_err("%s\n", td->verror);
649 err = 1;
650 }
651 clear_error(td);
652 }
653
654 if (f->real_file_size == -1ULL && td->o.size)
655 f->real_file_size = td->o.size / td->o.nr_files;
656 }
657
658 return err;
659}
660
661struct fio_mount {
662 struct flist_head list;
663 const char *base;
664 char __base[256];
665 unsigned int key;
666};
667
668/*
669 * Get free number of bytes for each file on each unique mount.
670 */
671static unsigned long long get_fs_free_counts(struct thread_data *td)
672{
673 struct flist_head *n, *tmp;
674 unsigned long long ret = 0;
675 struct fio_mount *fm;
676 FLIST_HEAD(list);
677 struct fio_file *f;
678 unsigned int i;
679
680 for_each_file(td, f, i) {
681 struct stat sb;
682 char buf[256];
683
684 if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
685 if (f->real_file_size != -1ULL)
686 ret += f->real_file_size;
687 continue;
688 } else if (f->filetype != FIO_TYPE_FILE)
689 continue;
690
691 buf[255] = '\0';
692 strncpy(buf, f->file_name, 255);
693
694 if (stat(buf, &sb) < 0) {
695 if (errno != ENOENT)
696 break;
697 strcpy(buf, ".");
698 if (stat(buf, &sb) < 0)
699 break;
700 }
701
702 fm = NULL;
703 flist_for_each(n, &list) {
704 fm = flist_entry(n, struct fio_mount, list);
705 if (fm->key == sb.st_dev)
706 break;
707
708 fm = NULL;
709 }
710
711 if (fm)
712 continue;
713
714 fm = malloc(sizeof(*fm));
715 strcpy(fm->__base, buf);
716 fm->base = basename(fm->__base);
717 fm->key = sb.st_dev;
718 flist_add(&fm->list, &list);
719 }
720
721 flist_for_each_safe(n, tmp, &list) {
722 unsigned long long sz;
723
724 fm = flist_entry(n, struct fio_mount, list);
725 flist_del(&fm->list);
726
727 sz = get_fs_size(fm->base);
728 if (sz && sz != -1ULL)
729 ret += sz;
730
731 free(fm);
732 }
733
734 return ret;
735}
736
737uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
738{
739 struct thread_options *o = &td->o;
740
741 if (o->file_append && f->filetype == FIO_TYPE_FILE)
742 return f->real_file_size;
743
744 return td->o.start_offset +
745 (td->thread_number - 1) * td->o.offset_increment;
746}
747
748/*
749 * Open the files and setup files sizes, creating files if necessary.
750 */
751int setup_files(struct thread_data *td)
752{
753 unsigned long long total_size, extend_size;
754 struct thread_options *o = &td->o;
755 struct fio_file *f;
756 unsigned int i, nr_fs_extra = 0;
757 int err = 0, need_extend;
758 int old_state;
759 const unsigned int bs = td_min_bs(td);
760 uint64_t fs = 0;
761
762 dprint(FD_FILE, "setup files\n");
763
764 old_state = td_bump_runstate(td, TD_SETTING_UP);
765
766 if (o->read_iolog_file)
767 goto done;
768
769 /*
770 * if ioengine defines a setup() method, it's responsible for
771 * opening the files and setting f->real_file_size to indicate
772 * the valid range for that file.
773 */
774 if (td->io_ops->setup)
775 err = td->io_ops->setup(td);
776 else
777 err = get_file_sizes(td);
778
779 if (err)
780 goto err_out;
781
782 /*
783 * check sizes. if the files/devices do not exist and the size
784 * isn't passed to fio, abort.
785 */
786 total_size = 0;
787 for_each_file(td, f, i) {
788 if (f->real_file_size == -1ULL)
789 total_size = -1ULL;
790 else
791 total_size += f->real_file_size;
792 }
793
794 if (o->fill_device)
795 td->fill_device_size = get_fs_free_counts(td);
796
797 /*
798 * device/file sizes are zero and no size given, punt
799 */
800 if ((!total_size || total_size == -1ULL) && !o->size &&
801 !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
802 !(o->nr_files && (o->file_size_low || o->file_size_high))) {
803 log_err("%s: you need to specify size=\n", o->name);
804 td_verror(td, EINVAL, "total_file_size");
805 goto err_out;
806 }
807
808 /*
809 * Calculate per-file size and potential extra size for the
810 * first files, if needed.
811 */
812 if (!o->file_size_low && o->nr_files) {
813 uint64_t all_fs;
814
815 fs = o->size / o->nr_files;
816 all_fs = fs * o->nr_files;
817
818 if (all_fs < o->size)
819 nr_fs_extra = (o->size - all_fs) / bs;
820 }
821
822 /*
823 * now file sizes are known, so we can set ->io_size. if size= is
824 * not given, ->io_size is just equal to ->real_file_size. if size
825 * is given, ->io_size is size / nr_files.
826 */
827 extend_size = total_size = 0;
828 need_extend = 0;
829 for_each_file(td, f, i) {
830 f->file_offset = get_start_offset(td, f);
831
832 if (!o->file_size_low) {
833 /*
834 * no file size range given, file size is equal to
835 * total size divided by number of files. If that is
836 * zero, set it to the real file size. If the size
837 * doesn't divide nicely with the min blocksize,
838 * make the first files bigger.
839 */
840 f->io_size = fs;
841 if (nr_fs_extra) {
842 nr_fs_extra--;
843 f->io_size += bs;
844 }
845
846 if (!f->io_size)
847 f->io_size = f->real_file_size - f->file_offset;
848 } else if (f->real_file_size < o->file_size_low ||
849 f->real_file_size > o->file_size_high) {
850 if (f->file_offset > o->file_size_low)
851 goto err_offset;
852 /*
853 * file size given. if it's fixed, use that. if it's a
854 * range, generate a random size in-between.
855 */
856 if (o->file_size_low == o->file_size_high)
857 f->io_size = o->file_size_low - f->file_offset;
858 else {
859 f->io_size = get_rand_file_size(td)
860 - f->file_offset;
861 }
862 } else
863 f->io_size = f->real_file_size - f->file_offset;
864
865 if (f->io_size == -1ULL)
866 total_size = -1ULL;
867 else {
868 if (o->size_percent)
869 f->io_size = (f->io_size * o->size_percent) / 100;
870 total_size += f->io_size;
871 }
872
873 if (f->filetype == FIO_TYPE_FILE &&
874 (f->io_size + f->file_offset) > f->real_file_size &&
875 !(td->io_ops->flags & FIO_DISKLESSIO)) {
876 if (!o->create_on_open) {
877 need_extend++;
878 extend_size += (f->io_size + f->file_offset);
879 } else
880 f->real_file_size = f->io_size + f->file_offset;
881 fio_file_set_extend(f);
882 }
883 }
884
885 if (!o->size || o->size > total_size)
886 o->size = total_size;
887
888 /*
889 * See if we need to extend some files
890 */
891 if (need_extend) {
892 temp_stall_ts = 1;
893 if (output_format == FIO_OUTPUT_NORMAL)
894 log_info("%s: Laying out IO file(s) (%u file(s) /"
895 " %lluMB)\n", o->name, need_extend,
896 extend_size >> 20);
897
898 for_each_file(td, f, i) {
899 unsigned long long old_len = -1ULL, extend_len = -1ULL;
900
901 if (!fio_file_extend(f))
902 continue;
903
904 assert(f->filetype == FIO_TYPE_FILE);
905 fio_file_clear_extend(f);
906 if (!o->fill_device) {
907 old_len = f->real_file_size;
908 extend_len = f->io_size + f->file_offset -
909 old_len;
910 }
911 f->real_file_size = (f->io_size + f->file_offset);
912 err = extend_file(td, f);
913 if (err)
914 break;
915
916 err = __file_invalidate_cache(td, f, old_len,
917 extend_len);
918 close(f->fd);
919 f->fd = -1;
920 if (err)
921 break;
922 }
923 temp_stall_ts = 0;
924 }
925
926 if (err)
927 goto err_out;
928
929 if (!o->zone_size)
930 o->zone_size = o->size;
931
932 /*
933 * iolog already set the total io size, if we read back
934 * stored entries.
935 */
936 if (!o->read_iolog_file)
937 td->total_io_size = o->size * o->loops;
938
939done:
940 if (o->create_only)
941 td->done = 1;
942
943 td_restore_runstate(td, old_state);
944 return 0;
945err_offset:
946 log_err("%s: you need to specify valid offset=\n", o->name);
947err_out:
948 td_restore_runstate(td, old_state);
949 return 1;
950}
951
952int pre_read_files(struct thread_data *td)
953{
954 struct fio_file *f;
955 unsigned int i;
956
957 dprint(FD_FILE, "pre_read files\n");
958
959 for_each_file(td, f, i) {
960 pre_read_file(td, f);
961 }
962
963 return 1;
964}
965
966static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
967{
968 unsigned int range_size, seed;
969 unsigned long nranges;
970 uint64_t file_size;
971
972 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
973 file_size = min(f->real_file_size, f->io_size);
974
975 nranges = (file_size + range_size - 1) / range_size;
976
977 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
978 if (!td->o.rand_repeatable)
979 seed = td->rand_seeds[4];
980
981 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
982 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
983 else
984 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
985
986 return 1;
987}
988
989static int init_rand_distribution(struct thread_data *td)
990{
991 struct fio_file *f;
992 unsigned int i;
993 int state;
994
995 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
996 return 0;
997
998 state = td_bump_runstate(td, TD_SETTING_UP);
999
1000 for_each_file(td, f, i)
1001 __init_rand_distribution(td, f);
1002
1003 td_restore_runstate(td, state);
1004
1005 return 1;
1006}
1007
1008int init_random_map(struct thread_data *td)
1009{
1010 unsigned long long blocks;
1011 struct fio_file *f;
1012 unsigned int i;
1013
1014 if (init_rand_distribution(td))
1015 return 0;
1016 if (!td_random(td))
1017 return 0;
1018
1019 for_each_file(td, f, i) {
1020 uint64_t file_size = min(f->real_file_size, f->io_size);
1021
1022 blocks = file_size / (unsigned long long) td->o.rw_min_bs;
1023
1024 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1025 unsigned long seed;
1026
1027 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1028
1029 if (!lfsr_init(&f->lfsr, blocks, seed, 0))
1030 continue;
1031 } else if (!td->o.norandommap) {
1032 f->io_axmap = axmap_new(blocks);
1033 if (f->io_axmap)
1034 continue;
1035 } else if (td->o.norandommap)
1036 continue;
1037
1038 if (!td->o.softrandommap) {
1039 log_err("fio: failed allocating random map. If running"
1040 " a large number of jobs, try the 'norandommap'"
1041 " option or set 'softrandommap'. Or give"
1042 " a larger --alloc-size to fio.\n");
1043 return 1;
1044 }
1045
1046 log_info("fio: file %s failed allocating random map. Running "
1047 "job without.\n", f->file_name);
1048 }
1049
1050 return 0;
1051}
1052
1053void close_files(struct thread_data *td)
1054{
1055 struct fio_file *f;
1056 unsigned int i;
1057
1058 for_each_file(td, f, i) {
1059 if (fio_file_open(f))
1060 td_io_close_file(td, f);
1061 }
1062}
1063
1064void close_and_free_files(struct thread_data *td)
1065{
1066 struct fio_file *f;
1067 unsigned int i;
1068
1069 dprint(FD_FILE, "close files\n");
1070
1071 for_each_file(td, f, i) {
1072 if (fio_file_open(f))
1073 td_io_close_file(td, f);
1074
1075 remove_file_hash(f);
1076
1077 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1078 dprint(FD_FILE, "free unlink %s\n", f->file_name);
1079 unlink(f->file_name);
1080 }
1081
1082 sfree(f->file_name);
1083 f->file_name = NULL;
1084 axmap_free(f->io_axmap);
1085 f->io_axmap = NULL;
1086 sfree(f);
1087 }
1088
1089 td->o.filename = NULL;
1090 free(td->files);
1091 free(td->file_locks);
1092 td->files_index = 0;
1093 td->files = NULL;
1094 td->file_locks = NULL;
1095 td->o.file_lock_mode = FILE_LOCK_NONE;
1096 td->o.nr_files = 0;
1097}
1098
1099static void get_file_type(struct fio_file *f)
1100{
1101 struct stat sb;
1102
1103 if (!strcmp(f->file_name, "-"))
1104 f->filetype = FIO_TYPE_PIPE;
1105 else
1106 f->filetype = FIO_TYPE_FILE;
1107
1108 /* \\.\ is the device namespace in Windows, where every file is
1109 * a block device */
1110 if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1111 f->filetype = FIO_TYPE_BD;
1112
1113 if (!stat(f->file_name, &sb)) {
1114 if (S_ISBLK(sb.st_mode))
1115 f->filetype = FIO_TYPE_BD;
1116 else if (S_ISCHR(sb.st_mode))
1117 f->filetype = FIO_TYPE_CHAR;
1118 else if (S_ISFIFO(sb.st_mode))
1119 f->filetype = FIO_TYPE_PIPE;
1120 }
1121}
1122
1123static int __is_already_allocated(const char *fname)
1124{
1125 struct flist_head *entry;
1126 char *filename;
1127
1128 if (flist_empty(&filename_list))
1129 return 0;
1130
1131 flist_for_each(entry, &filename_list) {
1132 filename = flist_entry(entry, struct file_name, list)->filename;
1133
1134 if (strcmp(filename, fname) == 0)
1135 return 1;
1136 }
1137
1138 return 0;
1139}
1140
1141static int is_already_allocated(const char *fname)
1142{
1143 int ret;
1144
1145 fio_file_hash_lock();
1146 ret = __is_already_allocated(fname);
1147 fio_file_hash_unlock();
1148 return ret;
1149}
1150
1151static void set_already_allocated(const char *fname)
1152{
1153 struct file_name *fn;
1154
1155 fn = malloc(sizeof(struct file_name));
1156 fn->filename = strdup(fname);
1157
1158 fio_file_hash_lock();
1159 if (!__is_already_allocated(fname)) {
1160 flist_add_tail(&fn->list, &filename_list);
1161 fn = NULL;
1162 }
1163 fio_file_hash_unlock();
1164
1165 if (fn) {
1166 free(fn->filename);
1167 free(fn);
1168 }
1169}
1170
1171
1172static void free_already_allocated(void)
1173{
1174 struct flist_head *entry, *tmp;
1175 struct file_name *fn;
1176
1177 if (flist_empty(&filename_list))
1178 return;
1179
1180 fio_file_hash_lock();
1181 flist_for_each_safe(entry, tmp, &filename_list) {
1182 fn = flist_entry(entry, struct file_name, list);
1183 free(fn->filename);
1184 flist_del(&fn->list);
1185 free(fn);
1186 }
1187
1188 fio_file_hash_unlock();
1189}
1190
1191static struct fio_file *alloc_new_file(struct thread_data *td)
1192{
1193 struct fio_file *f;
1194
1195 f = smalloc(sizeof(*f));
1196 if (!f) {
1197 log_err("fio: smalloc OOM\n");
1198 assert(0);
1199 return NULL;
1200 }
1201
1202 f->fd = -1;
1203 f->shadow_fd = -1;
1204 fio_file_reset(td, f);
1205 return f;
1206}
1207
1208int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1209{
1210 int cur_files = td->files_index;
1211 char file_name[PATH_MAX];
1212 struct fio_file *f;
1213 int len = 0;
1214
1215 dprint(FD_FILE, "add file %s\n", fname);
1216
1217 if (td->o.directory)
1218 len = set_name_idx(file_name, td->o.directory, numjob);
1219
1220 sprintf(file_name + len, "%s", fname);
1221
1222 /* clean cloned siblings using existing files */
1223 if (numjob && is_already_allocated(file_name))
1224 return 0;
1225
1226 f = alloc_new_file(td);
1227
1228 if (td->files_size <= td->files_index) {
1229 unsigned int new_size = td->o.nr_files + 1;
1230
1231 dprint(FD_FILE, "resize file array to %d files\n", new_size);
1232
1233 td->files = realloc(td->files, new_size * sizeof(f));
1234 if (td->files == NULL) {
1235 log_err("fio: realloc OOM\n");
1236 assert(0);
1237 }
1238 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1239 td->file_locks = realloc(td->file_locks, new_size);
1240 if (!td->file_locks) {
1241 log_err("fio: realloc OOM\n");
1242 assert(0);
1243 }
1244 td->file_locks[cur_files] = FILE_LOCK_NONE;
1245 }
1246 td->files_size = new_size;
1247 }
1248 td->files[cur_files] = f;
1249 f->fileno = cur_files;
1250
1251 /*
1252 * init function, io engine may not be loaded yet
1253 */
1254 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1255 f->real_file_size = -1ULL;
1256
1257 f->file_name = smalloc_strdup(file_name);
1258 if (!f->file_name) {
1259 log_err("fio: smalloc OOM\n");
1260 assert(0);
1261 }
1262
1263 get_file_type(f);
1264
1265 switch (td->o.file_lock_mode) {
1266 case FILE_LOCK_NONE:
1267 break;
1268 case FILE_LOCK_READWRITE:
1269 f->rwlock = fio_rwlock_init();
1270 break;
1271 case FILE_LOCK_EXCLUSIVE:
1272 f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1273 break;
1274 default:
1275 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1276 assert(0);
1277 }
1278
1279 td->files_index++;
1280 if (f->filetype == FIO_TYPE_FILE)
1281 td->nr_normal_files++;
1282
1283 set_already_allocated(file_name);
1284
1285 if (inc)
1286 td->o.nr_files++;
1287
1288 dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1289 cur_files);
1290
1291 return cur_files;
1292}
1293
1294int add_file_exclusive(struct thread_data *td, const char *fname)
1295{
1296 struct fio_file *f;
1297 unsigned int i;
1298
1299 for_each_file(td, f, i) {
1300 if (!strcmp(f->file_name, fname))
1301 return i;
1302 }
1303
1304 return add_file(td, fname, 0, 1);
1305}
1306
1307void get_file(struct fio_file *f)
1308{
1309 dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1310 assert(fio_file_open(f));
1311 f->references++;
1312}
1313
1314int put_file(struct thread_data *td, struct fio_file *f)
1315{
1316 int f_ret = 0, ret = 0;
1317
1318 dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1319
1320 if (!fio_file_open(f)) {
1321 assert(f->fd == -1);
1322 return 0;
1323 }
1324
1325 assert(f->references);
1326 if (--f->references)
1327 return 0;
1328
1329 if (should_fsync(td) && td->o.fsync_on_close)
1330 f_ret = fsync(f->fd);
1331
1332 if (td->io_ops->close_file)
1333 ret = td->io_ops->close_file(td, f);
1334
1335 if (!ret)
1336 ret = f_ret;
1337
1338 td->nr_open_files--;
1339 fio_file_clear_open(f);
1340 assert(f->fd == -1);
1341 return ret;
1342}
1343
1344void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1345{
1346 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1347 return;
1348
1349 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1350 if (ddir == DDIR_READ)
1351 fio_rwlock_read(f->rwlock);
1352 else
1353 fio_rwlock_write(f->rwlock);
1354 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1355 fio_mutex_down(f->lock);
1356
1357 td->file_locks[f->fileno] = td->o.file_lock_mode;
1358}
1359
1360void unlock_file(struct thread_data *td, struct fio_file *f)
1361{
1362 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1363 return;
1364
1365 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1366 fio_rwlock_unlock(f->rwlock);
1367 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1368 fio_mutex_up(f->lock);
1369
1370 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1371}
1372
1373void unlock_file_all(struct thread_data *td, struct fio_file *f)
1374{
1375 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1376 return;
1377 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1378 unlock_file(td, f);
1379}
1380
1381static int recurse_dir(struct thread_data *td, const char *dirname)
1382{
1383 struct dirent *dir;
1384 int ret = 0;
1385 DIR *D;
1386
1387 D = opendir(dirname);
1388 if (!D) {
1389 char buf[FIO_VERROR_SIZE];
1390
1391 snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1392 td_verror(td, errno, buf);
1393 return 1;
1394 }
1395
1396 while ((dir = readdir(D)) != NULL) {
1397 char full_path[PATH_MAX];
1398 struct stat sb;
1399
1400 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1401 continue;
1402
1403 sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1404
1405 if (lstat(full_path, &sb) == -1) {
1406 if (errno != ENOENT) {
1407 td_verror(td, errno, "stat");
1408 ret = 1;
1409 break;
1410 }
1411 }
1412
1413 if (S_ISREG(sb.st_mode)) {
1414 add_file(td, full_path, 0, 1);
1415 continue;
1416 }
1417 if (!S_ISDIR(sb.st_mode))
1418 continue;
1419
1420 ret = recurse_dir(td, full_path);
1421 if (ret)
1422 break;
1423 }
1424
1425 closedir(D);
1426 return ret;
1427}
1428
1429int add_dir_files(struct thread_data *td, const char *path)
1430{
1431 int ret = recurse_dir(td, path);
1432
1433 if (!ret)
1434 log_info("fio: opendir added %d files\n", td->o.nr_files);
1435
1436 return ret;
1437}
1438
1439void dup_files(struct thread_data *td, struct thread_data *org)
1440{
1441 struct fio_file *f;
1442 unsigned int i;
1443
1444 dprint(FD_FILE, "dup files: %d\n", org->files_index);
1445
1446 if (!org->files)
1447 return;
1448
1449 td->files = malloc(org->files_index * sizeof(f));
1450
1451 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1452 td->file_locks = malloc(org->files_index);
1453
1454 for_each_file(org, f, i) {
1455 struct fio_file *__f;
1456
1457 __f = alloc_new_file(td);
1458
1459 if (f->file_name) {
1460 __f->file_name = smalloc_strdup(f->file_name);
1461 if (!__f->file_name) {
1462 log_err("fio: smalloc OOM\n");
1463 assert(0);
1464 }
1465
1466 __f->filetype = f->filetype;
1467 }
1468
1469 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1470 __f->lock = f->lock;
1471 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1472 __f->rwlock = f->rwlock;
1473
1474 td->files[i] = __f;
1475 }
1476}
1477
1478/*
1479 * Returns the index that matches the filename, or -1 if not there
1480 */
1481int get_fileno(struct thread_data *td, const char *fname)
1482{
1483 struct fio_file *f;
1484 unsigned int i;
1485
1486 for_each_file(td, f, i)
1487 if (!strcmp(f->file_name, fname))
1488 return i;
1489
1490 return -1;
1491}
1492
1493/*
1494 * For log usage, where we add/open/close files automatically
1495 */
1496void free_release_files(struct thread_data *td)
1497{
1498 close_files(td);
1499 td->files_index = 0;
1500 td->nr_normal_files = 0;
1501}
1502
1503void fio_file_reset(struct thread_data *td, struct fio_file *f)
1504{
1505 f->last_pos = f->file_offset;
1506 f->last_start = -1ULL;
1507 if (f->io_axmap)
1508 axmap_reset(f->io_axmap);
1509 if (td->o.random_generator == FIO_RAND_GEN_LFSR)
1510 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1511}
1512
1513int fio_files_done(struct thread_data *td)
1514{
1515 struct fio_file *f;
1516 unsigned int i;
1517
1518 for_each_file(td, f, i)
1519 if (!fio_file_done(f))
1520 return 0;
1521
1522 return 1;
1523}
1524
1525/* free memory used in initialization phase only */
1526void filesetup_mem_free(void)
1527{
1528 free_already_allocated();
1529}