X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=filesetup.c;h=f0e3b34fd8ae1e90424c04a37ec1cba4b71e4a38;hp=5aadf126311b9322668c3e835b673126949fa4d5;hb=bcbfeefa7bce8383cf85fe59ced91f54821dfbd2;hpb=98ffb8f3ecebed9984d1744f142eb8be10c14dbd diff --git a/filesetup.c b/filesetup.c index 5aadf126..f0e3b34f 100644 --- a/filesetup.c +++ b/filesetup.c @@ -11,6 +11,7 @@ #include "fio.h" #include "smalloc.h" #include "filehash.h" +#include "options.h" #include "os/os.h" #include "hash.h" #include "lib/axmap.h" @@ -21,6 +22,8 @@ static int root_warn; +static FLIST_HEAD(filename_list); + static inline void clear_error(struct thread_data *td) { td->error = 0; @@ -79,7 +82,8 @@ static int extend_file(struct thread_data *td, struct fio_file *f) break; case FIO_FALLOCATE_POSIX: dprint(FD_FILE, "posix_fallocate file %s size %llu\n", - f->file_name, f->real_file_size); + f->file_name, + (unsigned long long) f->real_file_size); r = posix_fallocate(f->fd, 0, f->real_file_size); if (r > 0) { @@ -91,14 +95,14 @@ static int extend_file(struct thread_data *td, struct fio_file *f) case FIO_FALLOCATE_KEEP_SIZE: dprint(FD_FILE, "fallocate(FALLOC_FL_KEEP_SIZE) " - "file %s size %llu\n", - f->file_name, f->real_file_size); + "file %s size %llu\n", f->file_name, + (unsigned long long) f->real_file_size); r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0, f->real_file_size); - if (r != 0) { + if (r != 0) td_verror(td, errno, "fallocate"); - } + break; #endif /* CONFIG_LINUX_FALLOCATE */ default: @@ -118,15 +122,16 @@ static int extend_file(struct thread_data *td, struct fio_file *f) */ if (!td->o.fill_device) { dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name, - f->real_file_size); + (unsigned long long) f->real_file_size); if (ftruncate(f->fd, f->real_file_size) == -1) { - td_verror(td, errno, "ftruncate"); - goto err; + if (errno != EFBIG) { + td_verror(td, errno, "ftruncate"); + goto err; + } } } b = malloc(td->o.max_bs[DDIR_WRITE]); - memset(b, 0, td->o.max_bs[DDIR_WRITE]); left = f->real_file_size; while (left && !td->terminate) { @@ -134,6 +139,8 @@ static int extend_file(struct thread_data *td, struct fio_file *f) if (bs > left) bs = left; + fill_io_buffer(td, b, bs, bs); + r = write(f->fd, b, bs); if (r > 0) { @@ -354,7 +361,8 @@ static int get_file_size(struct thread_data *td, struct fio_file *f) if (f->file_offset > f->real_file_size) { log_err("%s: offset extends end (%llu > %llu)\n", td->o.name, - f->file_offset, f->real_file_size); + (unsigned long long) f->file_offset, + (unsigned long long) f->real_file_size); return 1; } @@ -385,7 +393,8 @@ static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f, if (f->mmap_ptr) { ret = posix_madvise(f->mmap_ptr, f->mmap_sz, POSIX_MADV_DONTNEED); #ifdef FIO_MADV_FREE - (void) posix_madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE); + if (f->filetype == FIO_TYPE_BD) + (void) posix_madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE); #endif } else if (f->filetype == FIO_TYPE_FILE) { ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED); @@ -440,6 +449,7 @@ int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f) f->shadow_fd = -1; } + f->engine_data = 0; return ret; } @@ -455,9 +465,6 @@ int file_lookup_open(struct fio_file *f, int flags) * racy, need the __f->lock locked */ f->lock = __f->lock; - f->lock_owner = __f->lock_owner; - f->lock_batch = __f->lock_batch; - f->lock_ddir = __f->lock_ddir; from_hash = 1; } else { dprint(FD_FILE, "file not found in hash %s\n", f->file_name); @@ -517,6 +524,13 @@ int generic_open_file(struct thread_data *td, struct fio_file *f) goto skip_flags; if (td->o.odirect) flags |= OS_O_DIRECT; + if (td->o.oatomic) { + if (!FIO_O_ATOMIC) { + td_verror(td, EINVAL, "OS does not support atomic IO"); + return 1; + } + flags |= OS_O_DIRECT | FIO_O_ATOMIC; + } if (td->o.sync_io) flags |= O_SYNC; if (td->o.create_on_open) @@ -582,7 +596,7 @@ open_again: * work-around a "feature" on Linux, where a close of * an fd that has been opened for write will trigger * udev to call blkid to check partitions, fs id, etc. - * That polutes the device cache, which can slow down + * That pollutes the device cache, which can slow down * unbuffered accesses. */ if (f->shadow_fd == -1) @@ -721,13 +735,20 @@ uint64_t get_start_offset(struct thread_data *td) int setup_files(struct thread_data *td) { unsigned long long total_size, extend_size; + struct thread_options *o = &td->o; struct fio_file *f; - unsigned int i; + unsigned int i, nr_fs_extra = 0; int err = 0, need_extend; + int old_state; + const unsigned int bs = td_min_bs(td); + uint64_t fs = 0; dprint(FD_FILE, "setup files\n"); - if (td->o.read_iolog_file) + old_state = td->runstate; + td_set_runstate(td, TD_SETTING_UP); + + if (o->read_iolog_file) goto done; /* @@ -741,7 +762,7 @@ int setup_files(struct thread_data *td) err = get_file_sizes(td); if (err) - return err; + goto err_out; /* * check sizes. if the files/devices do not exist and the size @@ -755,17 +776,32 @@ int setup_files(struct thread_data *td) total_size += f->real_file_size; } - if (td->o.fill_device) + if (o->fill_device) td->fill_device_size = get_fs_free_counts(td); /* * device/file sizes are zero and no size given, punt */ - if ((!total_size || total_size == -1ULL) && !td->o.size && - !(td->io_ops->flags & FIO_NOIO) && !td->o.fill_device) { - log_err("%s: you need to specify size=\n", td->o.name); + if ((!total_size || total_size == -1ULL) && !o->size && + !(td->io_ops->flags & FIO_NOIO) && !o->fill_device && + !(o->nr_files && (o->file_size_low || o->file_size_high))) { + log_err("%s: you need to specify size=\n", o->name); td_verror(td, EINVAL, "total_file_size"); - return 1; + goto err_out; + } + + /* + * Calculate per-file size and potential extra size for the + * first files, if needed. + */ + if (!o->file_size_low) { + uint64_t all_fs; + + fs = o->size / o->nr_files; + all_fs = fs * o->nr_files; + + if (all_fs < o->size) + nr_fs_extra = (o->size - all_fs) / bs; } /* @@ -778,27 +814,33 @@ int setup_files(struct thread_data *td) for_each_file(td, f, i) { f->file_offset = get_start_offset(td); - if (!td->o.file_size_low) { + if (!o->file_size_low) { /* * no file size range given, file size is equal to - * total size divided by number of files. if that is - * zero, set it to the real file size. + * total size divided by number of files. If that is + * zero, set it to the real file size. If the size + * doesn't divide nicely with the min blocksize, + * make the first files bigger. */ - f->io_size = td->o.size / td->o.nr_files; + f->io_size = fs; + if (nr_fs_extra) { + nr_fs_extra--; + f->io_size += bs; + } + if (!f->io_size) f->io_size = f->real_file_size - f->file_offset; - } else if (f->real_file_size < td->o.file_size_low || - f->real_file_size > td->o.file_size_high) { - if (f->file_offset > td->o.file_size_low) + } else if (f->real_file_size < o->file_size_low || + f->real_file_size > o->file_size_high) { + if (f->file_offset > o->file_size_low) goto err_offset; /* * file size given. if it's fixed, use that. if it's a * range, generate a random size in-between. */ - if (td->o.file_size_low == td->o.file_size_high) { - f->io_size = td->o.file_size_low - - f->file_offset; - } else { + if (o->file_size_low == o->file_size_high) + f->io_size = o->file_size_low - f->file_offset; + else { f->io_size = get_rand_file_size(td) - f->file_offset; } @@ -808,15 +850,15 @@ int setup_files(struct thread_data *td) if (f->io_size == -1ULL) total_size = -1ULL; else { - if (td->o.size_percent) - f->io_size = (f->io_size * td->o.size_percent) / 100; + if (o->size_percent) + f->io_size = (f->io_size * o->size_percent) / 100; total_size += f->io_size; } if (f->filetype == FIO_TYPE_FILE && (f->io_size + f->file_offset) > f->real_file_size && !(td->io_ops->flags & FIO_DISKLESSIO)) { - if (!td->o.create_on_open) { + if (!o->create_on_open) { need_extend++; extend_size += (f->io_size + f->file_offset); } else @@ -825,8 +867,8 @@ int setup_files(struct thread_data *td) } } - if (!td->o.size || td->o.size > total_size) - td->o.size = total_size; + if (!o->size || o->size > total_size) + o->size = total_size; /* * See if we need to extend some files @@ -835,7 +877,7 @@ int setup_files(struct thread_data *td) temp_stall_ts = 1; if (output_format == FIO_OUTPUT_NORMAL) log_info("%s: Laying out IO file(s) (%u file(s) /" - " %lluMB)\n", td->o.name, need_extend, + " %lluMB)\n", o->name, need_extend, extend_size >> 20); for_each_file(td, f, i) { @@ -846,7 +888,7 @@ int setup_files(struct thread_data *td) assert(f->filetype == FIO_TYPE_FILE); fio_file_clear_extend(f); - if (!td->o.fill_device) { + if (!o->fill_device) { old_len = f->real_file_size; extend_len = f->io_size + f->file_offset - old_len; @@ -867,25 +909,28 @@ int setup_files(struct thread_data *td) } if (err) - return err; + goto err_out; - if (!td->o.zone_size) - td->o.zone_size = td->o.size; + if (!o->zone_size) + o->zone_size = o->size; /* * iolog already set the total io size, if we read back * stored entries. */ - if (!td->o.read_iolog_file) - td->total_io_size = td->o.size * td->o.loops; + if (!o->read_iolog_file) + td->total_io_size = o->size * o->loops; done: - if (td->o.create_only) + if (o->create_only) td->done = 1; + td_set_runstate(td, old_state); return 0; err_offset: - log_err("%s: you need to specify valid offset=\n", td->o.name); + log_err("%s: you need to specify valid offset=\n", o->name); +err_out: + td_set_runstate(td, old_state); return 1; } @@ -919,9 +964,9 @@ static int __init_rand_distribution(struct thread_data *td, struct fio_file *f) seed = td->rand_seeds[4]; if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) - zipf_init(&f->zipf, nranges, td->o.zipf_theta, seed); + zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed); else - pareto_init(&f->zipf, nranges, td->o.pareto_h, seed); + pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed); return 1; } @@ -958,14 +1003,14 @@ int init_random_map(struct thread_data *td) for_each_file(td, f, i) { uint64_t file_size = min(f->real_file_size, f->io_size); - blocks = (file_size + td->o.rw_min_bs - 1) / - (unsigned long long) td->o.rw_min_bs; + blocks = file_size / (unsigned long long) td->o.rw_min_bs; + if (td->o.random_generator == FIO_RAND_GEN_LFSR) { unsigned long seed; seed = td->rand_seeds[FIO_RAND_BLOCK_OFF]; - if (!lfsr_init(&f->lfsr, blocks, seed)) + if (!lfsr_init(&f->lfsr, blocks, seed, seed & 0xF)) continue; } else if (!td->o.norandommap) { f->io_axmap = axmap_new(blocks); @@ -1027,8 +1072,11 @@ void close_and_free_files(struct thread_data *td) td->o.filename = NULL; free(td->files); + free(td->file_locks); td->files_index = 0; td->files = NULL; + td->file_locks = NULL; + td->o.file_lock_mode = FILE_LOCK_NONE; td->o.nr_files = 0; } @@ -1056,7 +1104,48 @@ static void get_file_type(struct fio_file *f) } } -int add_file(struct thread_data *td, const char *fname) +static void set_already_allocated(const char *fname) { + struct file_name *fn; + + fn = malloc(sizeof(struct file_name)); + fn->filename = strdup(fname); + flist_add_tail(&fn->list, &filename_list); +} + +static int is_already_allocated(const char *fname) +{ + struct flist_head *entry; + char *filename; + + if (!flist_empty(&filename_list)) + { + flist_for_each(entry, &filename_list) { + filename = flist_entry(entry, struct file_name, list)->filename; + + if (strcmp(filename, fname) == 0) + return 1; + } + } + + return 0; +} + +static void free_already_allocated() { + struct flist_head *entry, *tmp; + struct file_name *fn; + + if (!flist_empty(&filename_list)) + { + flist_for_each_safe(entry, tmp, &filename_list) { + fn = flist_entry(entry, struct file_name, list); + free(fn->filename); + flist_del(&fn->list); + free(fn); + } + } +} + +int add_file(struct thread_data *td, const char *fname, int numjob) { int cur_files = td->files_index; char file_name[PATH_MAX]; @@ -1065,6 +1154,15 @@ int add_file(struct thread_data *td, const char *fname) dprint(FD_FILE, "add file %s\n", fname); + if (td->o.directory) + len = set_name_idx(file_name, td->o.directory, numjob); + + sprintf(file_name + len, "%s", fname); + + /* clean cloned siblings using existing files */ + if (numjob && is_already_allocated(file_name)) + return 0; + f = smalloc(sizeof(*f)); if (!f) { log_err("fio: smalloc OOM\n"); @@ -1085,6 +1183,14 @@ int add_file(struct thread_data *td, const char *fname) log_err("fio: realloc OOM\n"); assert(0); } + if (td->o.file_lock_mode != FILE_LOCK_NONE) { + td->file_locks = realloc(td->file_locks, new_size); + if (!td->file_locks) { + log_err("fio: realloc OOM\n"); + assert(0); + } + td->file_locks[cur_files] = FILE_LOCK_NONE; + } td->files_size = new_size; } td->files[cur_files] = f; @@ -1096,10 +1202,6 @@ int add_file(struct thread_data *td, const char *fname) if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO)) f->real_file_size = -1ULL; - if (td->o.directory) - len = sprintf(file_name, "%s/", td->o.directory); - - sprintf(file_name + len, "%s", fname); f->file_name = smalloc_strdup(file_name); if (!f->file_name) { log_err("fio: smalloc OOM\n"); @@ -1112,7 +1214,7 @@ int add_file(struct thread_data *td, const char *fname) case FILE_LOCK_NONE: break; case FILE_LOCK_READWRITE: - f->lock = fio_mutex_rw_init(); + f->rwlock = fio_rwlock_init(); break; case FILE_LOCK_EXCLUSIVE: f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED); @@ -1126,6 +1228,8 @@ int add_file(struct thread_data *td, const char *fname) if (f->filetype == FIO_TYPE_FILE) td->nr_normal_files++; + set_already_allocated(file_name); + dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name, cur_files); @@ -1142,7 +1246,7 @@ int add_file_exclusive(struct thread_data *td, const char *fname) return i; } - return add_file(td, fname); + return add_file(td, fname, 0); } void get_file(struct fio_file *f) @@ -1187,57 +1291,36 @@ void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir) if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE) return; - if (f->lock_owner == td && f->lock_batch--) - return; - if (td->o.file_lock_mode == FILE_LOCK_READWRITE) { if (ddir == DDIR_READ) - fio_mutex_down_read(f->lock); + fio_rwlock_read(f->rwlock); else - fio_mutex_down_write(f->lock); + fio_rwlock_write(f->rwlock); } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) fio_mutex_down(f->lock); - f->lock_owner = td; - f->lock_batch = td->o.lockfile_batch; - f->lock_ddir = ddir; + td->file_locks[f->fileno] = td->o.file_lock_mode; } void unlock_file(struct thread_data *td, struct fio_file *f) { if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE) return; - if (f->lock_batch) - return; - - if (td->o.file_lock_mode == FILE_LOCK_READWRITE) { - const int is_read = f->lock_ddir == DDIR_READ; - int val = fio_mutex_getval(f->lock); - - if ((is_read && val == 1) || (!is_read && val == -1)) - f->lock_owner = NULL; - - if (is_read) - fio_mutex_up_read(f->lock); - else - fio_mutex_up_write(f->lock); - } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) { - int val = fio_mutex_getval(f->lock); - - if (val == 0) - f->lock_owner = NULL; + if (td->o.file_lock_mode == FILE_LOCK_READWRITE) + fio_rwlock_unlock(f->rwlock); + else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) fio_mutex_up(f->lock); - } + + td->file_locks[f->fileno] = FILE_LOCK_NONE; } void unlock_file_all(struct thread_data *td, struct fio_file *f) { - if (f->lock_owner != td) + if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks) return; - - f->lock_batch = 0; - unlock_file(td, f); + if (td->file_locks[f->fileno] != FILE_LOCK_NONE) + unlock_file(td, f); } static int recurse_dir(struct thread_data *td, const char *dirname) @@ -1272,7 +1355,7 @@ static int recurse_dir(struct thread_data *td, const char *dirname) } if (S_ISREG(sb.st_mode)) { - add_file(td, full_path); + add_file(td, full_path, 0); td->o.nr_files++; continue; } @@ -1310,6 +1393,9 @@ void dup_files(struct thread_data *td, struct thread_data *org) td->files = malloc(org->files_index * sizeof(f)); + if (td->o.file_lock_mode != FILE_LOCK_NONE) + td->file_locks = malloc(org->files_index); + for_each_file(org, f, i) { struct fio_file *__f; @@ -1331,6 +1417,11 @@ void dup_files(struct thread_data *td, struct thread_data *org) __f->filetype = f->filetype; } + if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) + __f->lock = f->lock; + else if (td->o.file_lock_mode == FILE_LOCK_READWRITE) + __f->rwlock = f->rwlock; + td->files[i] = __f; } } @@ -1369,3 +1460,20 @@ void fio_file_reset(struct thread_data *td, struct fio_file *f) if (td->o.random_generator == FIO_RAND_GEN_LFSR) lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]); } + +int fio_files_done(struct thread_data *td) +{ + struct fio_file *f; + unsigned int i; + + for_each_file(td, f, i) + if (!fio_file_done(f)) + return 0; + + return 1; +} + +/* free memory used in initialization phase only */ +void filesetup_mem_free() { + free_already_allocated(); +}