same time, but writes get exclusive
access.
- The option may be post-fixed with a lock batch number. If
- set, then each thread/process may do that amount of IOs to
- the file before giving up the lock. Since lock acquisition is
- expensive, batching the lock/unlocks will speed up IO.
-
readwrite=str
rw=str Type of io pattern. Accepted values are:
/*
* if io is protected by a semaphore, this is set
*/
- struct fio_mutex *lock;
- void *lock_owner;
- unsigned int lock_batch;
- enum fio_ddir lock_ddir;
+ union {
+ struct fio_mutex *lock;
+ struct fio_rwlock *rwlock;
+ };
/*
* block map for random io
* racy, need the __f->lock locked
*/
f->lock = __f->lock;
- f->lock_owner = __f->lock_owner;
- f->lock_batch = __f->lock_batch;
- f->lock_ddir = __f->lock_ddir;
from_hash = 1;
} else {
dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
td->o.filename = NULL;
free(td->files);
+ free(td->file_locks);
td->files_index = 0;
td->files = NULL;
+ td->file_locks = NULL;
td->o.nr_files = 0;
}
log_err("fio: realloc OOM\n");
assert(0);
}
+ if (td->o.file_lock_mode != FILE_LOCK_NONE) {
+ td->file_locks = realloc(td->file_locks, new_size);
+ if (!td->file_locks) {
+ log_err("fio: realloc OOM\n");
+ assert(0);
+ }
+ td->file_locks[cur_files] = FILE_LOCK_NONE;
+ }
td->files_size = new_size;
}
td->files[cur_files] = f;
case FILE_LOCK_NONE:
break;
case FILE_LOCK_READWRITE:
- f->lock = fio_mutex_rw_init();
+ f->rwlock = fio_rwlock_init();
break;
case FILE_LOCK_EXCLUSIVE:
f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
return;
- if (f->lock_owner == td && f->lock_batch--)
- return;
-
if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
if (ddir == DDIR_READ)
- fio_mutex_down_read(f->lock);
+ fio_rwlock_read(f->rwlock);
else
- fio_mutex_down_write(f->lock);
+ fio_rwlock_write(f->rwlock);
} else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
fio_mutex_down(f->lock);
- f->lock_owner = td;
- f->lock_batch = td->o.lockfile_batch;
- f->lock_ddir = ddir;
+ td->file_locks[f->fileno] = td->o.file_lock_mode;
}
void unlock_file(struct thread_data *td, struct fio_file *f)
{
if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
return;
- if (f->lock_batch)
- return;
-
- if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
- const int is_read = f->lock_ddir == DDIR_READ;
- int val = fio_mutex_getval(f->lock);
-
- if ((is_read && val == 1) || (!is_read && val == -1))
- f->lock_owner = NULL;
-
- if (is_read)
- fio_mutex_up_read(f->lock);
- else
- fio_mutex_up_write(f->lock);
- } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE) {
- int val = fio_mutex_getval(f->lock);
-
- if (val == 0)
- f->lock_owner = NULL;
+ if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
+ fio_rwlock_unlock(f->rwlock);
+ else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
fio_mutex_up(f->lock);
- }
+
+ td->file_locks[f->fileno] = FILE_LOCK_NONE;
}
void unlock_file_all(struct thread_data *td, struct fio_file *f)
{
- if (f->lock_owner != td)
- return;
-
- f->lock_batch = 0;
- unlock_file(td, f);
+ if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
+ unlock_file(td, f);
}
static int recurse_dir(struct thread_data *td, const char *dirname)
td->files = malloc(org->files_index * sizeof(f));
+ if (td->o.file_lock_mode != FILE_LOCK_NONE)
+ td->file_locks = malloc(org->files_index);
+
for_each_file(org, f, i) {
struct fio_file *__f;
time, but writes get exclusive access.
.RE
.P
-The option may be post-fixed with a lock batch number. If set, then each
-thread/process may do that amount of IOs to the file before giving up the lock.
-Since lock acquisition is expensive, batching the lock/unlocks will speed up IO.
-.RE
-.P
.BI opendir \fR=\fPstr
Recursively open any files below directory \fIstr\fR.
.TP
struct rusage ru_end;
struct fio_file **files;
+ unsigned char *file_locks;
unsigned int files_size;
unsigned int files_index;
unsigned int nr_open_files;
* way too early, double check.
*/
ret = pthread_cond_timedwait(&mutex->cond, &mutex->lock, &t);
- if (ret == ETIMEDOUT && !mutex_timed_out(&tv_s, seconds)) {
- pthread_mutex_lock(&mutex->lock);
+ if (ret == ETIMEDOUT && !mutex_timed_out(&tv_s, seconds))
ret = 0;
- }
mutex->waiters--;
}
pthread_mutex_unlock(&mutex->lock);
}
-void fio_mutex_down_write(struct fio_mutex *mutex)
+void fio_rwlock_write(struct fio_rwlock *lock)
{
- pthread_mutex_lock(&mutex->lock);
-
- while (mutex->value != 0) {
- mutex->waiters++;
- pthread_cond_wait(&mutex->cond, &mutex->lock);
- mutex->waiters--;
- }
-
- mutex->value--;
- pthread_mutex_unlock(&mutex->lock);
+ pthread_rwlock_wrlock(&lock->lock);
}
-void fio_mutex_down_read(struct fio_mutex *mutex)
+void fio_rwlock_read(struct fio_rwlock *lock)
{
- pthread_mutex_lock(&mutex->lock);
-
- while (mutex->value < 0) {
- mutex->waiters++;
- pthread_cond_wait(&mutex->cond, &mutex->lock);
- mutex->waiters--;
- }
+ pthread_rwlock_rdlock(&lock->lock);
+}
- mutex->value++;
- pthread_mutex_unlock(&mutex->lock);
+void fio_rwlock_unlock(struct fio_rwlock *lock)
+{
+ pthread_rwlock_unlock(&lock->lock);
}
-void fio_mutex_up_read(struct fio_mutex *mutex)
+void fio_rwlock_remove(struct fio_rwlock *lock)
{
- pthread_mutex_lock(&mutex->lock);
- mutex->value--;
- read_barrier();
- if (mutex->value >= 0 && mutex->waiters)
- pthread_cond_signal(&mutex->cond);
- pthread_mutex_unlock(&mutex->lock);
+ munmap((void *) lock, sizeof(*lock));
}
-void fio_mutex_up_write(struct fio_mutex *mutex)
+struct fio_rwlock *fio_rwlock_init(void)
{
- pthread_mutex_lock(&mutex->lock);
- mutex->value++;
- read_barrier();
- if (mutex->value >= 0 && mutex->waiters)
- pthread_cond_signal(&mutex->cond);
- pthread_mutex_unlock(&mutex->lock);
+ struct fio_rwlock *lock;
+ int ret;
+
+ lock = (void *) mmap(NULL, sizeof(struct fio_rwlock),
+ PROT_READ | PROT_WRITE,
+ OS_MAP_ANON | MAP_SHARED, -1, 0);
+ if (lock == MAP_FAILED) {
+ perror("mmap rwlock");
+ lock = NULL;
+ goto err;
+ }
+
+ ret = pthread_rwlock_init(&lock->lock, NULL);
+ if (ret) {
+ log_err("pthread_rwlock_init: %s\n", strerror(ret));
+ goto err;
+ }
+
+ return lock;
+err:
+ if (lock)
+ fio_rwlock_remove(lock);
+ return NULL;
}
int waiters;
};
+struct fio_rwlock {
+ pthread_rwlock_t lock;
+};
+
enum {
FIO_MUTEX_LOCKED = 0,
FIO_MUTEX_UNLOCKED = 1,
extern struct fio_mutex *fio_mutex_init(int);
extern void fio_mutex_remove(struct fio_mutex *);
+extern void fio_mutex_up(struct fio_mutex *);
extern void fio_mutex_down(struct fio_mutex *);
extern int fio_mutex_down_timeout(struct fio_mutex *, unsigned int);
-extern void fio_mutex_down_read(struct fio_mutex *);
-extern void fio_mutex_down_write(struct fio_mutex *);
-extern void fio_mutex_up(struct fio_mutex *);
-extern void fio_mutex_up_read(struct fio_mutex *);
-extern void fio_mutex_up_write(struct fio_mutex *);
-
-static inline struct fio_mutex *fio_mutex_rw_init(void)
-{
- return fio_mutex_init(0);
-}
-static inline int fio_mutex_getval(struct fio_mutex *mutex)
-{
- return mutex->value;
-}
+extern void fio_rwlock_read(struct fio_rwlock *);
+extern void fio_rwlock_write(struct fio_rwlock *);
+extern void fio_rwlock_unlock(struct fio_rwlock *);
+extern struct fio_rwlock *fio_rwlock_init(void);
+extern void fio_rwlock_remove(struct fio_rwlock *);
#endif
return 0;
}
-static int str_lockfile_cb(void *data, const char *str)
-{
- struct thread_data *td = data;
- char *nr = get_opt_postfix(str);
-
- td->o.lockfile_batch = 1;
- if (nr) {
- td->o.lockfile_batch = atoi(nr);
- free(nr);
- }
-
- return 0;
-}
-
static int str_write_bw_log_cb(void *data, const char *str)
{
struct thread_data *td = data;
{
.name = "lockfile",
.type = FIO_OPT_STR,
- .cb = str_lockfile_cb,
.off1 = td_var_offset(file_lock_mode),
.help = "Lock file when doing IO to it",
.parent = "filename",
static struct pool mp[MAX_POOLS];
static unsigned int nr_pools;
static unsigned int last_pool;
-static struct fio_mutex *lock;
+static struct fio_rwlock *lock;
static inline void pool_lock(struct pool *pool)
{
static inline void global_read_lock(void)
{
- fio_mutex_down_read(lock);
+ fio_rwlock_read(lock);
}
static inline void global_read_unlock(void)
{
- fio_mutex_up_read(lock);
+ fio_rwlock_unlock(lock);
}
static inline void global_write_lock(void)
{
- fio_mutex_down_write(lock);
+ fio_rwlock_write(lock);
}
static inline void global_write_unlock(void)
{
- fio_mutex_up_write(lock);
+ fio_rwlock_unlock(lock);
}
static inline int ptr_valid(struct pool *pool, void *ptr)
{
int ret;
- lock = fio_mutex_rw_init();
+ lock = fio_rwlock_init();
ret = add_pool(&mp[0], INITIAL_SIZE);
assert(!ret);
}
cleanup_pool(&mp[i]);
if (lock)
- fio_mutex_remove(lock);
+ fio_rwlock_remove(lock);
}
#ifdef SMALLOC_REDZONE