12 static int extend_file(struct thread_data *td, struct fio_file *f)
14 int r, new_layout = 0, unlink_file = 0, flags;
15 unsigned long long left;
20 * check if we need to lay the file out complete again. fio
21 * does that for operations involving reads, or for writes
22 * where overwrite is set
24 if (td_read(td) || (td_write(td) && td->o.overwrite))
26 if (td_write(td) && !td->o.overwrite)
29 if ((unlink_file || new_layout) && (f->flags & FIO_FILE_EXISTS)) {
30 if (unlink(f->file_name) < 0) {
31 td_verror(td, errno, "unlink");
36 flags = O_WRONLY | O_CREAT;
40 f->fd = open(f->file_name, flags, 0644);
42 td_verror(td, errno, "open");
46 if (ftruncate(f->fd, f->real_file_size) == -1) {
47 td_verror(td, errno, "ftruncate");
54 if (posix_fallocate(f->fd, 0, f->real_file_size) < 0) {
55 td_verror(td, errno, "posix_fallocate");
59 b = malloc(td->o.max_bs[DDIR_WRITE]);
60 memset(b, 0, td->o.max_bs[DDIR_WRITE]);
62 left = f->real_file_size;
63 while (left && !td->terminate) {
64 bs = td->o.max_bs[DDIR_WRITE];
68 r = write(f->fd, b, bs);
75 td_verror(td, errno, "write");
77 td_verror(td, EIO, "write");
85 else if (td->o.create_fsync)
99 static unsigned long long get_rand_file_size(struct thread_data *td)
101 unsigned long long ret;
104 r = os_random_long(&td->file_size_state);
105 ret = td->o.file_size_low + (unsigned long long) ((double) td->o.file_size_high * (r / (RAND_MAX + 1.0)));
106 ret -= (ret % td->o.rw_min_bs);
110 static int file_size(struct thread_data *td, struct fio_file *f)
114 if (fstat(f->fd, &st) == -1) {
115 td_verror(td, errno, "fstat");
119 f->real_file_size = st.st_size;
123 static int bdev_size(struct thread_data *td, struct fio_file *f)
125 unsigned long long bytes;
128 r = blockdev_size(f->fd, &bytes);
130 td_verror(td, r, "blockdev_size");
134 f->real_file_size = bytes;
138 static int get_file_size(struct thread_data *td, struct fio_file *f)
142 if (f->flags & FIO_SIZE_KNOWN)
145 if (f->filetype == FIO_TYPE_FILE)
146 ret = file_size(td, f);
147 else if (f->filetype == FIO_TYPE_BD)
148 ret = bdev_size(td, f);
150 f->real_file_size = -1;
155 if (f->file_offset > f->real_file_size) {
156 log_err("%s: offset extends end (%Lu > %Lu)\n", td->o.name, f->file_offset, f->real_file_size);
160 f->flags |= FIO_SIZE_KNOWN;
164 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
172 * FIXME: add blockdev flushing too
175 ret = madvise(f->mmap, f->io_size, MADV_DONTNEED);
176 else if (f->filetype == FIO_TYPE_FILE)
177 ret = fadvise(f->fd, f->file_offset, f->io_size, POSIX_FADV_DONTNEED);
178 else if (f->filetype == FIO_TYPE_BD) {
179 ret = blockdev_invalidate_cache(f->fd);
180 if (ret < 0 && errno == EACCES && geteuid()) {
181 log_err("fio: only root may flush block devices. Cache flush bypassed!\n");
184 } else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
188 td_verror(td, errno, "invalidate_cache");
195 void generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
201 int generic_open_file(struct thread_data *td, struct fio_file *f)
206 flags |= OS_O_DIRECT;
210 if (td_write(td) || td_rw(td)) {
213 if (f->filetype == FIO_TYPE_FILE)
216 f->fd = open(f->file_name, flags, 0600);
218 if (f->filetype == FIO_TYPE_CHAR)
223 f->fd = open(f->file_name, flags);
227 char buf[FIO_VERROR_SIZE];
230 snprintf(buf, sizeof(buf) - 1, "open(%s)", f->file_name);
232 td_verror(td, __e, buf);
235 if (get_file_size(td, f))
244 int open_files(struct thread_data *td)
250 for_each_file(td, f, i) {
251 err = td_io_open_file(td, f);
253 if (td->error == EMFILE) {
254 log_err("fio: limited open files to: %d\n", td->nr_open_files);
255 td->o.open_files = td->nr_open_files;
262 if (td->o.open_files == td->nr_open_files)
269 for_each_file(td, f, i)
270 td_io_close_file(td, f);
276 * open/close all files, so that ->real_file_size gets set
278 static int get_file_sizes(struct thread_data *td)
284 for_each_file(td, f, i) {
285 if (td->io_ops->open_file(td, f)) {
286 if (td->error != ENOENT) {
287 log_err("%s\n", td->verror);
292 if (td->io_ops->close_file)
293 td->io_ops->close_file(td, f);
296 if (f->real_file_size == -1ULL && td->o.size)
297 f->real_file_size = td->o.size / td->o.nr_files;
304 * Open the files and setup files sizes, creating files if necessary.
306 int setup_files(struct thread_data *td)
308 unsigned long long total_size, extend_size;
311 int err = 0, need_extend;
314 * if ioengine defines a setup() method, it's responsible for
315 * opening the files and setting f->real_file_size to indicate
316 * the valid range for that file.
318 if (td->io_ops->setup)
319 err = td->io_ops->setup(td);
321 err = get_file_sizes(td);
327 * check sizes. if the files/devices do not exist and the size
328 * isn't passed to fio, abort.
331 for_each_file(td, f, i) {
332 if (f->real_file_size == -1ULL)
335 total_size += f->real_file_size;
339 * device/file sizes are zero and no size given, punt
341 if ((!total_size || total_size == -1ULL) && !td->o.size) {
342 log_err("%s: you need to specify size=\n", td->o.name);
343 td_verror(td, EINVAL, "total_file_size");
348 * now file sizes are known, so we can set ->io_size. if size= is
349 * not given, ->io_size is just equal to ->real_file_size. if size
350 * is given, ->io_size is size / nr_files.
352 extend_size = total_size = 0;
354 for_each_file(td, f, i) {
355 if (!td->o.file_size_low) {
357 * no file size range given, file size is equal to
358 * total size divided by number of files. if that is
359 * zero, set it to the real file size.
361 f->io_size = td->o.size / td->o.nr_files;
363 f->io_size = f->real_file_size;
364 } else if (f->real_file_size < td->o.file_size_low ||
365 f->real_file_size > td->o.file_size_high) {
367 * file size given. if it's fixed, use that. if it's a
368 * range, generate a random size in-between.
370 if (td->o.file_size_low == td->o.file_size_high)
371 f->io_size = td->o.file_size_low;
373 f->io_size = get_rand_file_size(td);
375 f->io_size = f->real_file_size;
377 if (f->io_size == -1ULL)
380 total_size += f->io_size;
382 if (f->filetype == FIO_TYPE_FILE &&
383 f->io_size > f->real_file_size &&
384 !(td->io_ops->flags & FIO_DISKLESSIO)) {
386 extend_size += f->io_size;
387 f->flags |= FIO_FILE_EXTEND;
392 td->o.size = total_size;
395 * See if we need to extend some files
399 log_info("%s: Laying out IO file(s) (%u files / %LuMiB)\n",
400 td->o.name, need_extend, extend_size >> 20);
402 for_each_file(td, f, i) {
403 if (!(f->flags & FIO_FILE_EXTEND))
406 assert(f->filetype == FIO_TYPE_FILE);
407 f->flags &= ~FIO_FILE_EXTEND;
408 f->real_file_size = f->io_size;
409 err = extend_file(td, f);
419 if (!td->o.zone_size)
420 td->o.zone_size = td->o.size;
422 td->total_io_size = td->o.size * td->o.loops;
426 int init_random_map(struct thread_data *td)
428 int num_maps, blocks;
432 if (td->o.norandommap)
435 for_each_file(td, f, i) {
436 blocks = (f->real_file_size + td->o.rw_min_bs - 1) / td->o.rw_min_bs;
437 num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
438 f->file_map = malloc(num_maps * sizeof(long));
440 log_err("fio: failed allocating random map. If running a large number of jobs, try the 'norandommap' option\n");
443 f->num_maps = num_maps;
444 memset(f->file_map, 0, num_maps * sizeof(long));
450 void close_files(struct thread_data *td)
455 for_each_file(td, f, i) {
456 if ((f->flags & FIO_FILE_UNLINK) &&
457 f->filetype == FIO_TYPE_FILE)
458 unlink(f->file_name);
460 td_io_close_file(td, f);
471 td->o.filename = NULL;
477 static void get_file_type(struct fio_file *f)
481 f->filetype = FIO_TYPE_FILE;
483 if (!lstat(f->file_name, &sb)) {
484 if (S_ISBLK(sb.st_mode))
485 f->filetype = FIO_TYPE_BD;
486 else if (S_ISCHR(sb.st_mode))
487 f->filetype = FIO_TYPE_CHAR;
488 else if (S_ISFIFO(sb.st_mode))
489 f->filetype = FIO_TYPE_PIPE;
493 void add_file(struct thread_data *td, const char *fname)
495 int cur_files = td->files_index;
496 char file_name[PATH_MAX];
500 td->files = realloc(td->files, (cur_files + 1) * sizeof(*f));
502 f = &td->files[cur_files];
503 memset(f, 0, sizeof(*f));
507 * init function, io engine may not be loaded yet
509 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
510 f->real_file_size = -1ULL;
513 len = sprintf(file_name, "%s/", td->o.directory);
515 sprintf(file_name + len, "%s", fname);
516 f->file_name = strdup(file_name);
521 if (f->filetype == FIO_TYPE_FILE)
522 td->nr_normal_files++;
525 void get_file(struct fio_file *f)
530 void put_file(struct thread_data *td, struct fio_file *f)
532 if (!(f->flags & FIO_FILE_OPEN))
535 assert(f->references);
539 if (should_fsync(td) && td->o.fsync_on_close)
542 if (td->io_ops->close_file)
543 td->io_ops->close_file(td, f);
546 f->flags &= ~FIO_FILE_OPEN;
549 static int recurse_dir(struct thread_data *td, const char *dirname)
555 D = opendir(dirname);
557 char buf[FIO_VERROR_SIZE];
559 snprintf(buf, FIO_VERROR_SIZE - 1, "opendir(%s)", dirname);
560 td_verror(td, errno, buf);
564 while ((dir = readdir(D)) != NULL) {
565 char full_path[PATH_MAX];
568 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
571 sprintf(full_path, "%s/%s", dirname, dir->d_name);
573 if (lstat(full_path, &sb) == -1) {
574 if (errno != ENOENT) {
575 td_verror(td, errno, "stat");
580 if (S_ISREG(sb.st_mode)) {
581 add_file(td, full_path);
585 if (!S_ISDIR(sb.st_mode))
588 if ((ret = recurse_dir(td, full_path)) != 0)
596 int add_dir_files(struct thread_data *td, const char *path)
598 int ret = recurse_dir(td, path);
601 log_info("fio: opendir added %d files\n", td->o.nr_files);
606 void dup_files(struct thread_data *td, struct thread_data *org)
615 bytes = org->files_index * sizeof(*f);
616 td->files = malloc(bytes);
617 memcpy(td->files, org->files, bytes);
619 for_each_file(td, f, i) {
621 f->file_name = strdup(f->file_name);