X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=filesetup.c;h=e456186b950fe5704c32af2c9da2c450dd83b82b;hp=371b40589355848a0b2b63468cece91dcb7815dc;hb=c97f1ad6d2a2fb4fe9f3b15e40158aac21e5699e;hpb=89ac1d48971578ccb0645c292d4a058340aeb909 diff --git a/filesetup.c b/filesetup.c index 371b4058..e456186b 100644 --- a/filesetup.c +++ b/filesetup.c @@ -12,8 +12,10 @@ #include "smalloc.h" #include "filehash.h" #include "os/os.h" +#include "hash.h" +#include "lib/axmap.h" -#ifdef FIO_HAVE_LINUX_FALLOCATE +#ifdef CONFIG_LINUX_FALLOCATE #include #endif @@ -70,14 +72,15 @@ static int extend_file(struct thread_data *td, struct fio_file *f) return 1; } -#ifdef FIO_HAVE_FALLOCATE +#ifdef CONFIG_POSIX_FALLOCATE if (!td->o.fill_device) { switch (td->o.fallocate_mode) { case FIO_FALLOCATE_NONE: break; case FIO_FALLOCATE_POSIX: dprint(FD_FILE, "posix_fallocate file %s size %llu\n", - f->file_name, f->real_file_size); + f->file_name, + (unsigned long long) f->real_file_size); r = posix_fallocate(f->fd, 0, f->real_file_size); if (r > 0) { @@ -85,12 +88,12 @@ static int extend_file(struct thread_data *td, struct fio_file *f) strerror(r)); } break; -#ifdef FIO_HAVE_LINUX_FALLOCATE +#ifdef CONFIG_LINUX_FALLOCATE case FIO_FALLOCATE_KEEP_SIZE: dprint(FD_FILE, "fallocate(FALLOC_FL_KEEP_SIZE) " - "file %s size %llu\n", - f->file_name, f->real_file_size); + "file %s size %llu\n", f->file_name, + (unsigned long long) f->real_file_size); r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0, f->real_file_size); @@ -98,14 +101,14 @@ static int extend_file(struct thread_data *td, struct fio_file *f) td_verror(td, errno, "fallocate"); } break; -#endif /* FIO_HAVE_LINUX_FALLOCATE */ +#endif /* CONFIG_LINUX_FALLOCATE */ default: log_err("fio: unknown fallocate mode: %d\n", td->o.fallocate_mode); assert(0); } } -#endif /* FIO_HAVE_FALLOCATE */ +#endif /* CONFIG_POSIX_FALLOCATE */ if (!new_layout) goto done; @@ -116,7 +119,7 @@ static int extend_file(struct thread_data *td, struct fio_file *f) */ if (!td->o.fill_device) { dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name, - f->real_file_size); + (unsigned long long) f->real_file_size); if (ftruncate(f->fd, f->real_file_size) == -1) { td_verror(td, errno, "ftruncate"); goto err; @@ -432,10 +435,16 @@ int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f) ret = errno; f->fd = -1; + + if (f->shadow_fd != -1) { + close(f->shadow_fd); + f->shadow_fd = -1; + } + return ret; } -static int file_lookup_open(struct fio_file *f, int flags) +int file_lookup_open(struct fio_file *f, int flags) { struct fio_file *__f; int from_hash; @@ -447,9 +456,6 @@ static int file_lookup_open(struct fio_file *f, int flags) * racy, need the __f->lock locked */ f->lock = __f->lock; - f->lock_owner = __f->lock_owner; - f->lock_batch = __f->lock_batch; - f->lock_ddir = __f->lock_ddir; from_hash = 1; } else { dprint(FD_FILE, "file not found in hash %s\n", f->file_name); @@ -460,6 +466,24 @@ static int file_lookup_open(struct fio_file *f, int flags) return from_hash; } +static int file_close_shadow_fds(struct thread_data *td) +{ + struct fio_file *f; + int num_closed = 0; + unsigned int i; + + for_each_file(td, f, i) { + if (f->shadow_fd == -1) + continue; + + close(f->shadow_fd); + f->shadow_fd = -1; + num_closed++; + } + + return num_closed; +} + int generic_open_file(struct thread_data *td, struct fio_file *f) { int is_std = 0; @@ -468,6 +492,11 @@ int generic_open_file(struct thread_data *td, struct fio_file *f) dprint(FD_FILE, "fd open %s\n", f->file_name); + if (td_trim(td) && f->filetype != FIO_TYPE_BD) { + log_err("fio: trim only applies to block device\n"); + return 1; + } + if (!strcmp(f->file_name, "-")) { if (td_rw(td)) { log_err("fio: can't read/write to stdin/out\n"); @@ -482,14 +511,17 @@ int generic_open_file(struct thread_data *td, struct fio_file *f) f_out = stderr; } + if (td_trim(td)) + goto skip_flags; if (td->o.odirect) flags |= OS_O_DIRECT; if (td->o.sync_io) flags |= O_SYNC; - if (f->filetype != FIO_TYPE_FILE) - flags |= FIO_O_NOATIME; if (td->o.create_on_open) flags |= O_CREAT; +skip_flags: + if (f->filetype != FIO_TYPE_FILE) + flags |= FIO_O_NOATIME; open_again: if (td_write(td)) { @@ -503,7 +535,7 @@ open_again: f->fd = dup(STDOUT_FILENO); else from_hash = file_lookup_open(f, flags); - } else { + } else if (td_read(td)) { if (f->filetype == FIO_TYPE_CHAR && !read_only) flags |= O_RDWR; else @@ -513,6 +545,9 @@ open_again: f->fd = dup(STDIN_FILENO); else from_hash = file_lookup_open(f, flags); + } else { //td trim + flags |= O_RDWR; + from_hash = file_lookup_open(f, flags); } if (f->fd == -1) { @@ -523,8 +558,10 @@ open_again: flags &= ~FIO_O_NOATIME; goto open_again; } + if (__e == EMFILE && file_close_shadow_fds(td)) + goto open_again; - snprintf(buf, sizeof(buf) - 1, "open(%s)", f->file_name); + snprintf(buf, sizeof(buf), "open(%s)", f->file_name); if (__e == EINVAL && (flags & OS_O_DIRECT)) { log_err("fio: looks like your file system does not " \ @@ -539,9 +576,22 @@ open_again: int fio_unused ret; /* - * OK to ignore, we haven't done anything with it + * Stash away descriptor for later close. This is to + * work-around a "feature" on Linux, where a close of + * an fd that has been opened for write will trigger + * udev to call blkid to check partitions, fs id, etc. + * That polutes the device cache, which can slow down + * unbuffered accesses. */ - ret = generic_close_file(td, f); + if (f->shadow_fd == -1) + f->shadow_fd = f->fd; + else { + /* + * OK to ignore, we haven't done anything + * with it + */ + ret = generic_close_file(td, f); + } goto open_again; } } @@ -657,6 +707,12 @@ static unsigned long long get_fs_free_counts(struct thread_data *td) return ret; } +uint64_t get_start_offset(struct thread_data *td) +{ + return td->o.start_offset + + (td->thread_number - 1) * td->o.offset_increment; +} + /* * Open the files and setup files sizes, creating files if necessary. */ @@ -718,8 +774,7 @@ int setup_files(struct thread_data *td) extend_size = total_size = 0; need_extend = 0; for_each_file(td, f, i) { - f->file_offset = td->o.start_offset + - (td->thread_number - 1) * td->o.offset_increment; + f->file_offset = get_start_offset(td); if (!td->o.file_size_low) { /* @@ -750,8 +805,11 @@ int setup_files(struct thread_data *td) if (f->io_size == -1ULL) total_size = -1ULL; - else + else { + if (td->o.size_percent) + f->io_size = (f->io_size * td->o.size_percent) / 100; total_size += f->io_size; + } if (f->filetype == FIO_TYPE_FILE && (f->io_size + f->file_offset) > f->real_file_size && @@ -765,9 +823,6 @@ int setup_files(struct thread_data *td) } } - if (td->o.size_percent) - total_size = (total_size * td->o.size_percent) / 100; - if (!td->o.size || td->o.size > total_size) td->o.size = total_size; @@ -776,7 +831,7 @@ int setup_files(struct thread_data *td) */ if (need_extend) { temp_stall_ts = 1; - if (!terse_output) + if (output_format == FIO_OUTPUT_NORMAL) log_info("%s: Laying out IO file(s) (%u file(s) /" " %lluMB)\n", td->o.name, need_extend, extend_size >> 20); @@ -846,28 +901,76 @@ int pre_read_files(struct thread_data *td) return 1; } +static int __init_rand_distribution(struct thread_data *td, struct fio_file *f) +{ + unsigned int range_size, seed; + unsigned long nranges; + uint64_t file_size; + + range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]); + file_size = min(f->real_file_size, f->io_size); + + nranges = (file_size + range_size - 1) / range_size; + + seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number; + if (!td->o.rand_repeatable) + seed = td->rand_seeds[4]; + + if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) + zipf_init(&f->zipf, nranges, td->o.zipf_theta, seed); + else + pareto_init(&f->zipf, nranges, td->o.pareto_h, seed); + + return 1; +} + +static int init_rand_distribution(struct thread_data *td) +{ + struct fio_file *f; + unsigned int i; + int state; + + if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) + return 0; + + state = td->runstate; + td_set_runstate(td, TD_SETTING_UP); + for_each_file(td, f, i) + __init_rand_distribution(td, f); + td_set_runstate(td, state); + + return 1; +} + int init_random_map(struct thread_data *td) { - unsigned long long blocks, num_maps; + unsigned long long blocks; struct fio_file *f; unsigned int i; - if (td->o.norandommap || !td_random(td)) + if (init_rand_distribution(td)) + return 0; + if (!td_random(td)) return 0; for_each_file(td, f, i) { - blocks = (f->real_file_size + td->o.rw_min_bs - 1) / - (unsigned long long) td->o.rw_min_bs; - num_maps = (blocks + BLOCKS_PER_MAP - 1) / - (unsigned long long) BLOCKS_PER_MAP; - if (num_maps == (unsigned long) num_maps) { - f->file_map = smalloc(num_maps * sizeof(unsigned long)); - if (f->file_map) { - f->num_maps = num_maps; + uint64_t file_size = min(f->real_file_size, f->io_size); + + blocks = file_size / (unsigned long long) td->o.rw_min_bs; + + if (td->o.random_generator == FIO_RAND_GEN_LFSR) { + unsigned long seed; + + seed = td->rand_seeds[FIO_RAND_BLOCK_OFF]; + + if (!lfsr_init(&f->lfsr, blocks, seed, seed & 0xF)) continue; - } - } else - f->file_map = NULL; + } else if (!td->o.norandommap) { + f->io_axmap = axmap_new(blocks); + if (f->io_axmap) + continue; + } else if (td->o.norandommap) + continue; if (!td->o.softrandommap) { log_err("fio: failed allocating random map. If running" @@ -879,7 +982,6 @@ int init_random_map(struct thread_data *td) log_info("fio: file %s failed allocating random map. Running " "job without.\n", f->file_name); - f->num_maps = 0; } return 0; @@ -916,15 +1018,17 @@ void close_and_free_files(struct thread_data *td) sfree(f->file_name); f->file_name = NULL; - sfree(f->file_map); - f->file_map = NULL; + axmap_free(f->io_axmap); + f->io_axmap = NULL; sfree(f); } td->o.filename = NULL; free(td->files); + free(td->file_locks); td->files_index = 0; td->files = NULL; + td->file_locks = NULL; td->o.nr_files = 0; } @@ -968,14 +1072,27 @@ int add_file(struct thread_data *td, const char *fname) } f->fd = -1; - fio_file_reset(f); + f->shadow_fd = -1; + fio_file_reset(td, f); if (td->files_size <= td->files_index) { - int new_size = td->o.nr_files + 1; + unsigned int new_size = td->o.nr_files + 1; dprint(FD_FILE, "resize file array to %d files\n", new_size); td->files = realloc(td->files, new_size * sizeof(f)); + if (td->files == NULL) { + log_err("fio: realloc OOM\n"); + assert(0); + } + if (td->o.file_lock_mode != FILE_LOCK_NONE) { + td->file_locks = realloc(td->file_locks, new_size); + if (!td->file_locks) { + log_err("fio: realloc OOM\n"); + assert(0); + } + td->file_locks[cur_files] = FILE_LOCK_NONE; + } td->files_size = new_size; } td->files[cur_files] = f; @@ -1003,10 +1120,10 @@ int add_file(struct thread_data *td, const char *fname) case FILE_LOCK_NONE: break; case FILE_LOCK_READWRITE: - f->lock = fio_mutex_rw_init(); + f->rwlock = fio_rwlock_init(); break; case FILE_LOCK_EXCLUSIVE: - f->lock = fio_mutex_init(1); + f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED); break; default: log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode); @@ -1078,57 +1195,34 @@ void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir) if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE) return; - if (f->lock_owner == td && f->lock_batch--) - return; - if (td->o.file_lock_mode == FILE_LOCK_READWRITE) { if (ddir == DDIR_READ) - fio_mutex_down_read(f->lock); + fio_rwlock_read(f->rwlock); else - fio_mutex_down_write(f->lock); + fio_rwlock_write(f->rwlock); } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) fio_mutex_down(f->lock); - f->lock_owner = td; - f->lock_batch = td->o.lockfile_batch; - f->lock_ddir = ddir; + td->file_locks[f->fileno] = td->o.file_lock_mode; } void unlock_file(struct thread_data *td, struct fio_file *f) { if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE) return; - if (f->lock_batch) - return; - - if (td->o.file_lock_mode == FILE_LOCK_READWRITE) { - const int is_read = f->lock_ddir == DDIR_READ; - int val = fio_mutex_getval(f->lock); - - if ((is_read && val == 1) || (!is_read && val == -1)) - f->lock_owner = NULL; - - if (is_read) - fio_mutex_up_read(f->lock); - else - fio_mutex_up_write(f->lock); - } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) { - int val = fio_mutex_getval(f->lock); - - if (val == 0) - f->lock_owner = NULL; + if (td->o.file_lock_mode == FILE_LOCK_READWRITE) + fio_rwlock_unlock(f->rwlock); + else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) fio_mutex_up(f->lock); - } + + td->file_locks[f->fileno] = FILE_LOCK_NONE; } void unlock_file_all(struct thread_data *td, struct fio_file *f) { - if (f->lock_owner != td) - return; - - f->lock_batch = 0; - unlock_file(td, f); + if (td->file_locks[f->fileno] != FILE_LOCK_NONE) + unlock_file(td, f); } static int recurse_dir(struct thread_data *td, const char *dirname) @@ -1141,7 +1235,7 @@ static int recurse_dir(struct thread_data *td, const char *dirname) if (!D) { char buf[FIO_VERROR_SIZE]; - snprintf(buf, FIO_VERROR_SIZE - 1, "opendir(%s)", dirname); + snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname); td_verror(td, errno, buf); return 1; } @@ -1201,6 +1295,9 @@ void dup_files(struct thread_data *td, struct thread_data *org) td->files = malloc(org->files_index * sizeof(f)); + if (td->o.file_lock_mode != FILE_LOCK_NONE) + td->file_locks = malloc(org->files_index); + for_each_file(org, f, i) { struct fio_file *__f; @@ -1210,7 +1307,7 @@ void dup_files(struct thread_data *td, struct thread_data *org) assert(0); } __f->fd = -1; - fio_file_reset(__f); + fio_file_reset(td, __f); if (f->file_name) { __f->file_name = smalloc_strdup(f->file_name); @@ -1250,3 +1347,13 @@ void free_release_files(struct thread_data *td) td->files_index = 0; td->nr_normal_files = 0; } + +void fio_file_reset(struct thread_data *td, struct fio_file *f) +{ + f->last_pos = f->file_offset; + f->last_start = -1ULL; + if (f->io_axmap) + axmap_reset(f->io_axmap); + if (td->o.random_generator == FIO_RAND_GEN_LFSR) + lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]); +}