13 static int extend_file(struct thread_data *td, struct fio_file *f)
15 int r, new_layout = 0, unlink_file = 0, flags;
16 unsigned long long left;
21 * check if we need to lay the file out complete again. fio
22 * does that for operations involving reads, or for writes
23 * where overwrite is set
25 if (td_read(td) || (td_write(td) && td->o.overwrite))
27 if (td_write(td) && !td->o.overwrite)
30 if ((unlink_file || new_layout) && (f->flags & FIO_FILE_EXISTS)) {
31 if (unlink(f->file_name) < 0) {
32 td_verror(td, errno, "unlink");
37 flags = O_WRONLY | O_CREAT;
41 f->fd = open(f->file_name, flags, 0644);
43 td_verror(td, errno, "open");
47 if (ftruncate(f->fd, f->real_file_size) == -1) {
48 td_verror(td, errno, "ftruncate");
55 if (posix_fallocate(f->fd, 0, f->real_file_size) < 0) {
56 td_verror(td, errno, "posix_fallocate");
60 b = malloc(td->o.max_bs[DDIR_WRITE]);
61 memset(b, 0, td->o.max_bs[DDIR_WRITE]);
63 left = f->real_file_size;
64 while (left && !td->terminate) {
65 bs = td->o.max_bs[DDIR_WRITE];
69 r = write(f->fd, b, bs);
76 td_verror(td, errno, "write");
78 td_verror(td, EIO, "write");
86 else if (td->o.create_fsync)
100 static unsigned long long get_rand_file_size(struct thread_data *td)
102 unsigned long long ret;
105 r = os_random_long(&td->file_size_state);
106 ret = td->o.file_size_low + (unsigned long long) ((double) td->o.file_size_high * (r / (RAND_MAX + 1.0)));
107 ret -= (ret % td->o.rw_min_bs);
111 static int file_size(struct thread_data *td, struct fio_file *f)
115 if (fstat(f->fd, &st) == -1) {
116 td_verror(td, errno, "fstat");
120 f->real_file_size = st.st_size;
124 static int bdev_size(struct thread_data *td, struct fio_file *f)
126 unsigned long long bytes;
129 r = blockdev_size(f->fd, &bytes);
131 td_verror(td, r, "blockdev_size");
135 f->real_file_size = bytes;
139 static int get_file_size(struct thread_data *td, struct fio_file *f)
143 if (f->flags & FIO_SIZE_KNOWN)
146 if (f->filetype == FIO_TYPE_FILE)
147 ret = file_size(td, f);
148 else if (f->filetype == FIO_TYPE_BD)
149 ret = bdev_size(td, f);
151 f->real_file_size = -1;
156 if (f->file_offset > f->real_file_size) {
157 log_err("%s: offset extends end (%Lu > %Lu)\n", td->o.name, f->file_offset, f->real_file_size);
161 f->flags |= FIO_SIZE_KNOWN;
165 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
173 * FIXME: add blockdev flushing too
176 ret = madvise(f->mmap, f->io_size, MADV_DONTNEED);
177 else if (f->filetype == FIO_TYPE_FILE)
178 ret = fadvise(f->fd, f->file_offset, f->io_size, POSIX_FADV_DONTNEED);
179 else if (f->filetype == FIO_TYPE_BD) {
180 ret = blockdev_invalidate_cache(f->fd);
181 if (ret < 0 && errno == EACCES && geteuid()) {
182 log_err("fio: only root may flush block devices. Cache flush bypassed!\n");
185 } else if (f->filetype == FIO_TYPE_CHAR)
189 td_verror(td, errno, "invalidate_cache");
196 void generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
202 int generic_open_file(struct thread_data *td, struct fio_file *f)
207 flags |= OS_O_DIRECT;
211 if (td_write(td) || td_rw(td)) {
214 if (f->filetype == FIO_TYPE_FILE)
217 f->fd = open(f->file_name, flags, 0600);
219 if (f->filetype == FIO_TYPE_CHAR)
224 f->fd = open(f->file_name, flags);
228 char buf[FIO_VERROR_SIZE];
231 snprintf(buf, sizeof(buf) - 1, "open(%s)", f->file_name);
233 td_verror(td, __e, buf);
236 if (get_file_size(td, f))
245 int open_files(struct thread_data *td)
251 for_each_file(td, f, i) {
252 err = td_io_open_file(td, f);
254 if (td->error == EMFILE) {
255 log_err("fio: limited open files to: %d\n", td->nr_open_files);
256 td->o.open_files = td->nr_open_files;
263 if (td->o.open_files == td->nr_open_files)
270 for_each_file(td, f, i)
271 td_io_close_file(td, f);
277 * open/close all files, so that ->real_file_size gets set
279 static void get_file_sizes(struct thread_data *td)
284 for_each_file(td, f, i) {
285 if (td->io_ops->open_file(td, f))
288 td->io_ops->close_file(td, f);
290 if (f->real_file_size == -1ULL && td->o.size)
291 f->real_file_size = td->o.size / td->o.nr_files;
296 * Open the files and setup files sizes, creating files if necessary.
298 int setup_files(struct thread_data *td)
300 unsigned long long total_size, extend_size;
303 int err = 0, need_extend;
306 * if ioengine defines a setup() method, it's responsible for
307 * opening the files and setting f->real_file_size to indicate
308 * the valid range for that file.
310 if (td->io_ops->setup)
311 err = td->io_ops->setup(td);
319 * check sizes. if the files/devices do not exist and the size
320 * isn't passed to fio, abort.
323 for_each_file(td, f, i) {
324 if (f->real_file_size == -1ULL)
327 total_size += f->real_file_size;
331 * device/file sizes are zero and no size given, punt
333 if ((!total_size || total_size == -1ULL) && !td->o.size) {
334 log_err("%s: you need to specify size=\n", td->o.name);
335 td_verror(td, EINVAL, "total_file_size");
340 * now file sizes are known, so we can set ->io_size. if size= is
341 * not given, ->io_size is just equal to ->real_file_size. if size
342 * is given, ->io_size is size / nr_files.
344 extend_size = total_size = 0;
346 for_each_file(td, f, i) {
347 if (!td->o.file_size_low) {
349 * no file size range given, file size is equal to
350 * total size divided by number of files. if that is
351 * zero, set it to the real file size.
353 f->io_size = td->o.size / td->o.nr_files;
355 f->io_size = f->real_file_size;
356 } else if (f->real_file_size < td->o.file_size_low ||
357 f->real_file_size > td->o.file_size_high) {
359 * file size given. if it's fixed, use that. if it's a
360 * range, generate a random size in-between.
362 if (td->o.file_size_low == td->o.file_size_high)
363 f->io_size = td->o.file_size_low;
365 f->io_size = get_rand_file_size(td);
367 f->io_size = f->real_file_size;
369 if (f->io_size == -1ULL)
372 total_size += f->io_size;
374 if (f->filetype == FIO_TYPE_FILE &&
375 f->io_size > f->real_file_size &&
376 !(td->io_ops->flags & FIO_DISKLESSIO)) {
378 extend_size += f->io_size;
379 f->flags |= FIO_FILE_EXTEND;
384 td->o.size = total_size;
387 * See if we need to extend some files
391 log_info("%s: Laying out IO file(s) (%u files / %LuMiB)\n",
392 td->o.name, need_extend, extend_size >> 20);
394 for_each_file(td, f, i) {
395 if (!(f->flags & FIO_FILE_EXTEND))
398 assert(f->filetype == FIO_TYPE_FILE);
399 f->flags &= ~FIO_FILE_EXTEND;
400 f->real_file_size = f->io_size;
401 err = extend_file(td, f);
411 if (!td->o.zone_size)
412 td->o.zone_size = td->o.size;
414 td->total_io_size = td->o.size * td->o.loops;
418 int init_random_map(struct thread_data *td)
420 int num_maps, blocks;
424 if (td->o.norandommap)
427 for_each_file(td, f, i) {
428 blocks = (f->real_file_size + td->o.rw_min_bs - 1) / td->o.rw_min_bs;
429 num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
430 f->file_map = malloc(num_maps * sizeof(long));
432 log_err("fio: failed allocating random map. If running a large number of jobs, try the 'norandommap' option\n");
435 f->num_maps = num_maps;
436 memset(f->file_map, 0, num_maps * sizeof(long));
442 void close_files(struct thread_data *td)
447 for_each_file(td, f, i) {
448 if ((f->flags & FIO_FILE_UNLINK) &&
449 f->filetype == FIO_TYPE_FILE)
450 unlink(f->file_name);
452 td_io_close_file(td, f);
463 td->o.filename = NULL;
469 static void get_file_type(struct fio_file *f)
473 f->filetype = FIO_TYPE_FILE;
475 if (!lstat(f->file_name, &sb)) {
476 if (S_ISBLK(sb.st_mode))
477 f->filetype = FIO_TYPE_BD;
478 else if (S_ISCHR(sb.st_mode))
479 f->filetype = FIO_TYPE_CHAR;
483 void add_file(struct thread_data *td, const char *fname)
485 int cur_files = td->files_index;
486 char file_name[PATH_MAX];
490 td->files = realloc(td->files, (cur_files + 1) * sizeof(*f));
492 f = &td->files[cur_files];
493 memset(f, 0, sizeof(*f));
497 len = sprintf(file_name, "%s/", td->o.directory);
499 sprintf(file_name + len, "%s", fname);
500 f->file_name = strdup(file_name);
505 if (f->filetype == FIO_TYPE_FILE)
506 td->nr_normal_files++;
509 void get_file(struct fio_file *f)
514 void put_file(struct thread_data *td, struct fio_file *f)
516 if (!(f->flags & FIO_FILE_OPEN))
519 assert(f->references);
523 if (should_fsync(td) && td->o.fsync_on_close)
526 if (td->io_ops->close_file)
527 td->io_ops->close_file(td, f);
530 f->flags &= ~FIO_FILE_OPEN;
533 static int recurse_dir(struct thread_data *td, const char *dirname)
539 D = opendir(dirname);
541 char buf[FIO_VERROR_SIZE];
543 snprintf(buf, FIO_VERROR_SIZE - 1, "opendir(%s)", dirname);
544 td_verror(td, errno, buf);
548 while ((dir = readdir(D)) != NULL) {
549 char full_path[PATH_MAX];
552 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
555 sprintf(full_path, "%s/%s", dirname, dir->d_name);
557 if (lstat(full_path, &sb) == -1) {
558 if (errno != ENOENT) {
559 td_verror(td, errno, "stat");
564 if (S_ISREG(sb.st_mode)) {
565 add_file(td, full_path);
569 if (!S_ISDIR(sb.st_mode))
572 if ((ret = recurse_dir(td, full_path)) != 0)
580 int add_dir_files(struct thread_data *td, const char *path)
582 int ret = recurse_dir(td, path);
585 log_info("fio: opendir added %d files\n", td->o.nr_files);
590 void dup_files(struct thread_data *td, struct thread_data *org)
599 bytes = org->files_index * sizeof(*f);
600 td->files = malloc(bytes);
601 memcpy(td->files, org->files, bytes);
603 for_each_file(td, f, i) {
605 f->file_name = strdup(f->file_name);