X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=filesetup.c;h=601df8f1ddec655b09ec8eb7b2d03947307a8e65;hp=3054d9dab4803271de2bbee3369ac9ef66d1133d;hb=3cd4c66f35fad0df64e4e6dfeddc5bdfd5206e0c;hpb=9db01ef976006c002b05fa4e4ec589eb029aac5b diff --git a/filesetup.c b/filesetup.c index 3054d9da..601df8f1 100644 --- a/filesetup.c +++ b/filesetup.c @@ -79,7 +79,8 @@ static int extend_file(struct thread_data *td, struct fio_file *f) break; case FIO_FALLOCATE_POSIX: dprint(FD_FILE, "posix_fallocate file %s size %llu\n", - f->file_name, f->real_file_size); + f->file_name, + (unsigned long long) f->real_file_size); r = posix_fallocate(f->fd, 0, f->real_file_size); if (r > 0) { @@ -91,8 +92,8 @@ static int extend_file(struct thread_data *td, struct fio_file *f) case FIO_FALLOCATE_KEEP_SIZE: dprint(FD_FILE, "fallocate(FALLOC_FL_KEEP_SIZE) " - "file %s size %llu\n", - f->file_name, f->real_file_size); + "file %s size %llu\n", f->file_name, + (unsigned long long) f->real_file_size); r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0, f->real_file_size); @@ -118,15 +119,16 @@ static int extend_file(struct thread_data *td, struct fio_file *f) */ if (!td->o.fill_device) { dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name, - f->real_file_size); + (unsigned long long) f->real_file_size); if (ftruncate(f->fd, f->real_file_size) == -1) { - td_verror(td, errno, "ftruncate"); - goto err; + if (errno != EFBIG) { + td_verror(td, errno, "ftruncate"); + goto err; + } } } b = malloc(td->o.max_bs[DDIR_WRITE]); - memset(b, 0, td->o.max_bs[DDIR_WRITE]); left = f->real_file_size; while (left && !td->terminate) { @@ -134,6 +136,8 @@ static int extend_file(struct thread_data *td, struct fio_file *f) if (bs > left) bs = left; + fill_io_buffer(td, b, bs, bs); + r = write(f->fd, b, bs); if (r > 0) { @@ -354,7 +358,8 @@ static int get_file_size(struct thread_data *td, struct fio_file *f) if (f->file_offset > f->real_file_size) { log_err("%s: offset extends end (%llu > %llu)\n", td->o.name, - f->file_offset, f->real_file_size); + (unsigned long long) f->file_offset, + (unsigned long long) f->real_file_size); return 1; } @@ -385,7 +390,8 @@ static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f, if (f->mmap_ptr) { ret = posix_madvise(f->mmap_ptr, f->mmap_sz, POSIX_MADV_DONTNEED); #ifdef FIO_MADV_FREE - (void) posix_madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE); + if (f->filetype == FIO_TYPE_BD) + (void) posix_madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE); #endif } else if (f->filetype == FIO_TYPE_FILE) { ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED); @@ -440,6 +446,7 @@ int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f) f->shadow_fd = -1; } + f->engine_data = 0; return ret; } @@ -455,9 +462,6 @@ int file_lookup_open(struct fio_file *f, int flags) * racy, need the __f->lock locked */ f->lock = __f->lock; - f->lock_owner = __f->lock_owner; - f->lock_batch = __f->lock_batch; - f->lock_ddir = __f->lock_ddir; from_hash = 1; } else { dprint(FD_FILE, "file not found in hash %s\n", f->file_name); @@ -517,6 +521,13 @@ int generic_open_file(struct thread_data *td, struct fio_file *f) goto skip_flags; if (td->o.odirect) flags |= OS_O_DIRECT; + if (td->o.oatomic) { + if (!FIO_O_ATOMIC) { + td_verror(td, EINVAL, "OS does not support atomic IO"); + return 1; + } + flags |= OS_O_DIRECT | FIO_O_ATOMIC; + } if (td->o.sync_io) flags |= O_SYNC; if (td->o.create_on_open) @@ -582,7 +593,7 @@ open_again: * work-around a "feature" on Linux, where a close of * an fd that has been opened for write will trigger * udev to call blkid to check partitions, fs id, etc. - * That polutes the device cache, which can slow down + * That pollutes the device cache, which can slow down * unbuffered accesses. */ if (f->shadow_fd == -1) @@ -721,13 +732,18 @@ uint64_t get_start_offset(struct thread_data *td) int setup_files(struct thread_data *td) { unsigned long long total_size, extend_size; + struct thread_options *o = &td->o; struct fio_file *f; unsigned int i; int err = 0, need_extend; + int old_state; dprint(FD_FILE, "setup files\n"); - if (td->o.read_iolog_file) + old_state = td->runstate; + td_set_runstate(td, TD_SETTING_UP); + + if (o->read_iolog_file) goto done; /* @@ -741,7 +757,7 @@ int setup_files(struct thread_data *td) err = get_file_sizes(td); if (err) - return err; + goto err_out; /* * check sizes. if the files/devices do not exist and the size @@ -755,17 +771,18 @@ int setup_files(struct thread_data *td) total_size += f->real_file_size; } - if (td->o.fill_device) + if (o->fill_device) td->fill_device_size = get_fs_free_counts(td); /* * device/file sizes are zero and no size given, punt */ - if ((!total_size || total_size == -1ULL) && !td->o.size && - !(td->io_ops->flags & FIO_NOIO) && !td->o.fill_device) { - log_err("%s: you need to specify size=\n", td->o.name); + if ((!total_size || total_size == -1ULL) && !o->size && + !(td->io_ops->flags & FIO_NOIO) && !o->fill_device && + !(o->nr_files && (o->file_size_low || o->file_size_high))) { + log_err("%s: you need to specify size=\n", o->name); td_verror(td, EINVAL, "total_file_size"); - return 1; + goto err_out; } /* @@ -778,27 +795,26 @@ int setup_files(struct thread_data *td) for_each_file(td, f, i) { f->file_offset = get_start_offset(td); - if (!td->o.file_size_low) { + if (!o->file_size_low) { /* * no file size range given, file size is equal to * total size divided by number of files. if that is * zero, set it to the real file size. */ - f->io_size = td->o.size / td->o.nr_files; + f->io_size = o->size / o->nr_files; if (!f->io_size) f->io_size = f->real_file_size - f->file_offset; - } else if (f->real_file_size < td->o.file_size_low || - f->real_file_size > td->o.file_size_high) { - if (f->file_offset > td->o.file_size_low) + } else if (f->real_file_size < o->file_size_low || + f->real_file_size > o->file_size_high) { + if (f->file_offset > o->file_size_low) goto err_offset; /* * file size given. if it's fixed, use that. if it's a * range, generate a random size in-between. */ - if (td->o.file_size_low == td->o.file_size_high) { - f->io_size = td->o.file_size_low - - f->file_offset; - } else { + if (o->file_size_low == o->file_size_high) + f->io_size = o->file_size_low - f->file_offset; + else { f->io_size = get_rand_file_size(td) - f->file_offset; } @@ -808,15 +824,15 @@ int setup_files(struct thread_data *td) if (f->io_size == -1ULL) total_size = -1ULL; else { - if (td->o.size_percent) - f->io_size = (f->io_size * td->o.size_percent) / 100; + if (o->size_percent) + f->io_size = (f->io_size * o->size_percent) / 100; total_size += f->io_size; } if (f->filetype == FIO_TYPE_FILE && (f->io_size + f->file_offset) > f->real_file_size && !(td->io_ops->flags & FIO_DISKLESSIO)) { - if (!td->o.create_on_open) { + if (!o->create_on_open) { need_extend++; extend_size += (f->io_size + f->file_offset); } else @@ -825,8 +841,8 @@ int setup_files(struct thread_data *td) } } - if (!td->o.size || td->o.size > total_size) - td->o.size = total_size; + if (!o->size || o->size > total_size) + o->size = total_size; /* * See if we need to extend some files @@ -835,7 +851,7 @@ int setup_files(struct thread_data *td) temp_stall_ts = 1; if (output_format == FIO_OUTPUT_NORMAL) log_info("%s: Laying out IO file(s) (%u file(s) /" - " %lluMB)\n", td->o.name, need_extend, + " %lluMB)\n", o->name, need_extend, extend_size >> 20); for_each_file(td, f, i) { @@ -846,7 +862,7 @@ int setup_files(struct thread_data *td) assert(f->filetype == FIO_TYPE_FILE); fio_file_clear_extend(f); - if (!td->o.fill_device) { + if (!o->fill_device) { old_len = f->real_file_size; extend_len = f->io_size + f->file_offset - old_len; @@ -867,25 +883,28 @@ int setup_files(struct thread_data *td) } if (err) - return err; + goto err_out; - if (!td->o.zone_size) - td->o.zone_size = td->o.size; + if (!o->zone_size) + o->zone_size = o->size; /* * iolog already set the total io size, if we read back * stored entries. */ - if (!td->o.read_iolog_file) - td->total_io_size = td->o.size * td->o.loops; + if (!o->read_iolog_file) + td->total_io_size = o->size * o->loops; done: - if (td->o.create_only) + if (o->create_only) td->done = 1; + td_set_runstate(td, old_state); return 0; err_offset: - log_err("%s: you need to specify valid offset=\n", td->o.name); + log_err("%s: you need to specify valid offset=\n", o->name); +err_out: + td_set_runstate(td, old_state); return 1; } @@ -958,14 +977,14 @@ int init_random_map(struct thread_data *td) for_each_file(td, f, i) { uint64_t file_size = min(f->real_file_size, f->io_size); - blocks = (file_size + td->o.rw_min_bs - 1) / - (unsigned long long) td->o.rw_min_bs; + blocks = file_size / (unsigned long long) td->o.rw_min_bs; + if (td->o.random_generator == FIO_RAND_GEN_LFSR) { unsigned long seed; seed = td->rand_seeds[FIO_RAND_BLOCK_OFF]; - if (!lfsr_init(&f->lfsr, blocks, seed)) + if (!lfsr_init(&f->lfsr, blocks, seed, seed & 0xF)) continue; } else if (!td->o.norandommap) { f->io_axmap = axmap_new(blocks); @@ -1027,8 +1046,10 @@ void close_and_free_files(struct thread_data *td) td->o.filename = NULL; free(td->files); + free(td->file_locks); td->files_index = 0; td->files = NULL; + td->file_locks = NULL; td->o.nr_files = 0; } @@ -1085,6 +1106,14 @@ int add_file(struct thread_data *td, const char *fname) log_err("fio: realloc OOM\n"); assert(0); } + if (td->o.file_lock_mode != FILE_LOCK_NONE) { + td->file_locks = realloc(td->file_locks, new_size); + if (!td->file_locks) { + log_err("fio: realloc OOM\n"); + assert(0); + } + td->file_locks[cur_files] = FILE_LOCK_NONE; + } td->files_size = new_size; } td->files[cur_files] = f; @@ -1112,7 +1141,7 @@ int add_file(struct thread_data *td, const char *fname) case FILE_LOCK_NONE: break; case FILE_LOCK_READWRITE: - f->lock = fio_mutex_rw_init(); + f->rwlock = fio_rwlock_init(); break; case FILE_LOCK_EXCLUSIVE: f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED); @@ -1187,57 +1216,34 @@ void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir) if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE) return; - if (f->lock_owner == td && f->lock_batch--) - return; - if (td->o.file_lock_mode == FILE_LOCK_READWRITE) { if (ddir == DDIR_READ) - fio_mutex_down_read(f->lock); + fio_rwlock_read(f->rwlock); else - fio_mutex_down_write(f->lock); + fio_rwlock_write(f->rwlock); } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) fio_mutex_down(f->lock); - f->lock_owner = td; - f->lock_batch = td->o.lockfile_batch; - f->lock_ddir = ddir; + td->file_locks[f->fileno] = td->o.file_lock_mode; } void unlock_file(struct thread_data *td, struct fio_file *f) { if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE) return; - if (f->lock_batch) - return; - - if (td->o.file_lock_mode == FILE_LOCK_READWRITE) { - const int is_read = f->lock_ddir == DDIR_READ; - int val = fio_mutex_getval(f->lock); - - if ((is_read && val == 1) || (!is_read && val == -1)) - f->lock_owner = NULL; - - if (is_read) - fio_mutex_up_read(f->lock); - else - fio_mutex_up_write(f->lock); - } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) { - int val = fio_mutex_getval(f->lock); - - if (val == 0) - f->lock_owner = NULL; + if (td->o.file_lock_mode == FILE_LOCK_READWRITE) + fio_rwlock_unlock(f->rwlock); + else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) fio_mutex_up(f->lock); - } + + td->file_locks[f->fileno] = FILE_LOCK_NONE; } void unlock_file_all(struct thread_data *td, struct fio_file *f) { - if (f->lock_owner != td) - return; - - f->lock_batch = 0; - unlock_file(td, f); + if (td->file_locks[f->fileno] != FILE_LOCK_NONE) + unlock_file(td, f); } static int recurse_dir(struct thread_data *td, const char *dirname) @@ -1310,6 +1316,9 @@ void dup_files(struct thread_data *td, struct thread_data *org) td->files = malloc(org->files_index * sizeof(f)); + if (td->o.file_lock_mode != FILE_LOCK_NONE) + td->file_locks = malloc(org->files_index); + for_each_file(org, f, i) { struct fio_file *__f;