.getevents = fio_libaio_getevents,
.event = fio_libaio_event,
.cleanup = fio_libaio_cleanup,
+ .open_file = generic_open_file,
+ .close_file = generic_close_file,
};
#else /* FIO_HAVE_LIBAIO */
return 0;
}
+static int fio_mmapio_open(struct thread_data *td, struct fio_file *f)
+{
+ int ret, flags;
+
+ ret = generic_open_file(td, f);
+ if (ret)
+ return ret;
+
+ if (td_rw(td))
+ flags = PROT_READ | PROT_WRITE;
+ else if (td_write(td)) {
+ flags = PROT_WRITE;
+
+ if (td->verify != VERIFY_NONE)
+ flags |= PROT_READ;
+ } else
+ flags = PROT_READ;
+
+ f->mmap = mmap(NULL, f->file_size, flags, MAP_SHARED, f->fd, f->file_offset);
+ if (f->mmap == MAP_FAILED) {
+ f->mmap = NULL;
+ td_verror(td, errno, "mmap");
+ goto err;
+ }
+
+ if (file_invalidate_cache(td, f))
+ goto err;
+
+ if (!td_random(td)) {
+ if (madvise(f->mmap, f->file_size, MADV_SEQUENTIAL) < 0) {
+ td_verror(td, errno, "madvise");
+ goto err;
+ }
+ } else {
+ if (madvise(f->mmap, f->file_size, MADV_RANDOM) < 0) {
+ td_verror(td, errno, "madvise");
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ if (f->mmap)
+ munmap(f->mmap, f->file_size);
+ generic_close_file(td, f);
+ return 1;
+}
+
+static void fio_mmapio_close(struct thread_data fio_unused *td,
+ struct fio_file *f)
+{
+ if (f->mmap) {
+ munmap(f->mmap, f->file_size);
+ f->mmap = NULL;
+ }
+}
+
static struct ioengine_ops ioengine = {
.name = "mmap",
.version = FIO_IOOPS_VERSION,
.queue = fio_mmapio_queue,
.init = fio_mmapio_init,
- .flags = FIO_SYNCIO | FIO_MMAPIO,
+ .open_file = fio_mmapio_open,
+ .close_file = fio_mmapio_close,
+ .flags = FIO_SYNCIO,
};
static void fio_init fio_mmapio_register(void)
#include "../fio.h"
#include "../os.h"
-#define send_to_net(td) ((td)->io_ops->priv)
+struct netio_data {
+ int listenfd;
+ int send_to_net;
+ char host[64];
+ struct sockaddr_in addr;
+};
static int fio_netio_prep(struct thread_data *td, struct io_u *io_u)
{
+ struct netio_data *nd = td->io_ops->data;
struct fio_file *f = io_u->file;
/*
* Make sure we don't see spurious reads to a receiver, and vice versa
*/
- if ((send_to_net(td) && io_u->ddir == DDIR_READ) ||
- (!send_to_net(td) && io_u->ddir == DDIR_WRITE)) {
+ if ((nd->send_to_net && io_u->ddir == DDIR_READ) ||
+ (!nd->send_to_net && io_u->ddir == DDIR_WRITE)) {
td_verror(td, EINVAL, "bad direction");
return 1;
}
return FIO_Q_COMPLETED;
}
-static int fio_netio_setup_connect(struct thread_data *td, const char *host,
- unsigned short port)
+static int fio_netio_connect(struct thread_data *td, struct fio_file *f)
{
- struct sockaddr_in addr;
- struct fio_file *f;
- int i;
-
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_port = htons(port);
-
- if (inet_aton(host, &addr.sin_addr) != 1) {
- struct hostent *hent;
-
- hent = gethostbyname(host);
- if (!hent) {
- td_verror(td, errno, "gethostbyname");
- return 1;
- }
+ struct netio_data *nd = td->io_ops->data;
- memcpy(&addr.sin_addr, hent->h_addr, 4);
+ f->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (f->fd < 0) {
+ td_verror(td, errno, "socket");
+ return 1;
}
- for_each_file(td, f, i) {
- f->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
- if (f->fd < 0) {
- td_verror(td, errno, "socket");
- return 1;
- }
-
- if (connect(f->fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
- td_verror(td, errno, "connect");
- return 1;
- }
+ if (connect(f->fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
+ td_verror(td, errno, "connect");
+ return 1;
}
return 0;
-
}
-static int fio_netio_accept_connections(struct thread_data *td, int fd,
- struct sockaddr_in *addr)
+static int fio_netio_accept(struct thread_data *td, struct fio_file *f)
{
- socklen_t socklen = sizeof(*addr);
- unsigned int accepts = 0;
+ struct netio_data *nd = td->io_ops->data;
+ socklen_t socklen = sizeof(nd->addr);
struct pollfd pfd;
+ int ret;
- fprintf(f_out, "fio: waiting for %u connections\n", td->nr_files);
+ fprintf(f_out, "fio: waiting for connection\n");
/*
* Accept loop. poll for incoming events, accept them. Repeat until we
* have all connections.
*/
- while (!td->terminate && accepts < td->nr_files) {
- struct fio_file *f;
- int ret, i;
-
- pfd.fd = fd;
+ while (!td->terminate) {
+ pfd.fd = nd->listenfd;
pfd.events = POLLIN;
ret = poll(&pfd, 1, -1);
if (!(pfd.revents & POLLIN))
continue;
- for_each_file(td, f, i) {
- if (f->fd != -1)
- continue;
+ f->fd = accept(nd->listenfd, (struct sockaddr *) &nd->addr, &socklen);
+ if (f->fd < 0) {
+ td_verror(td, errno, "accept");
+ return 1;
+ }
+ break;
+ }
- f->fd = accept(fd, (struct sockaddr *) addr, &socklen);
- if (f->fd < 0) {
- td_verror(td, errno, "accept");
- return 1;
- }
- accepts++;
- break;
+ return 0;
+}
+
+
+static int fio_netio_open_file(struct thread_data *td, struct fio_file *f)
+{
+ if (td_read(td))
+ return fio_netio_accept(td, f);
+ else
+ return fio_netio_connect(td, f);
+}
+
+static int fio_netio_setup_connect(struct thread_data *td, const char *host,
+ unsigned short port)
+{
+ struct netio_data *nd = td->io_ops->data;
+
+ nd->addr.sin_family = AF_INET;
+ nd->addr.sin_port = htons(port);
+
+ if (inet_aton(host, &nd->addr.sin_addr) != 1) {
+ struct hostent *hent;
+
+ hent = gethostbyname(host);
+ if (!hent) {
+ td_verror(td, errno, "gethostbyname");
+ return 1;
}
+
+ memcpy(&nd->addr.sin_addr, hent->h_addr, 4);
}
return 0;
}
-static int fio_netio_setup_listen(struct thread_data *td, unsigned short port)
+static int fio_netio_setup_listen(struct thread_data *td, short port)
{
- struct sockaddr_in addr;
+ struct netio_data *nd = td->io_ops->data;
int fd, opt;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
}
#endif
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_ANY);
- addr.sin_port = htons(port);
+ nd->addr.sin_family = AF_INET;
+ nd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ nd->addr.sin_port = htons(port);
- if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
+ if (bind(fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
td_verror(td, errno, "bind");
return 1;
}
return 1;
}
- return fio_netio_accept_connections(td, fd, &addr);
+ nd->listenfd = fd;
+ return 0;
}
static int fio_netio_init(struct thread_data *td)
{
- char host[64], buf[128];
+ struct netio_data *nd = td->io_ops->data;
unsigned short port;
struct fio_file *f;
+ char host[64], buf[128];
char *sep;
int ret, i;
port = atoi(sep);
if (td_read(td)) {
- send_to_net(td) = 0;
+ nd->send_to_net = 0;
ret = fio_netio_setup_listen(td, port);
} else {
- send_to_net(td) = 1;
+ nd->send_to_net = 1;
ret = fio_netio_setup_connect(td, host, port);
}
return 0;
}
-static int fio_netio_setup(struct thread_data fio_unused *td)
+static void fio_netio_cleanup(struct thread_data *td)
{
+ struct netio_data *nd = td->io_ops->data;
+
+ if (nd) {
+ free(nd);
+ td->io_ops->data = NULL;
+ }
+}
+
+static int fio_netio_setup(struct thread_data *td)
+{
+ struct netio_data *nd = malloc(sizeof(*nd));
+
+ memset(nd, 0, sizeof(*nd));
+ nd->listenfd = -1;
+ td->io_ops->data = nd;
return 0;
}
.queue = fio_netio_queue,
.setup = fio_netio_setup,
.init = fio_netio_init,
- .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_SELFOPEN,
+ .cleanup = fio_netio_cleanup,
+ .open_file = fio_netio_open_file,
+ .close_file = generic_close_file,
+ .flags = FIO_SYNCIO | FIO_DISKLESSIO,
};
static void fio_init fio_netio_register(void)
return 0;
}
+static int fio_null_open(struct thread_data fio_unused *td,
+ struct fio_file fio_unused *f)
+{
+ return 0;
+}
+
static struct ioengine_ops ioengine = {
.name = "null",
.version = FIO_IOOPS_VERSION,
.setup = fio_null_setup,
.queue = fio_null_queue,
- .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_SELFOPEN,
+ .open_file = fio_null_open,
+ .flags = FIO_SYNCIO | FIO_DISKLESSIO,
};
static void fio_init fio_null_register(void)
.getevents = fio_posixaio_getevents,
.event = fio_posixaio_event,
.cleanup = fio_posixaio_cleanup,
+ .open_file = generic_open_file,
+ .close_file = generic_close_file,
};
#else /* FIO_HAVE_POSIXAIO */
int *fd_flags;
void *sgbuf;
unsigned int bs;
+ int type_checked;
};
static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
static int fio_sgio_init(struct thread_data *td)
{
- struct fio_file *f = &td->files[0];
struct sgio_data *sd;
- unsigned int bs;
- int ret;
sd = malloc(sizeof(*sd));
memset(sd, 0, sizeof(*sd));
td->io_ops->data = sd;
+ /*
+ * we want to do it, regardless of whether odirect is set or not
+ */
+ td->override_sync = 1;
+ return 0;
+}
+
+static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
+{
+ struct sgio_data *sd = td->io_ops->data;
+ unsigned int bs;
+
if (td->filetype == FIO_TYPE_BD) {
if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
td_verror(td, errno, "ioctl");
- goto err;
+ return 1;
}
} else if (td->filetype == FIO_TYPE_CHAR) {
- int version;
+ int version, ret;
if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
td_verror(td, errno, "ioctl");
- goto err;
+ return 1;
}
ret = fio_sgio_get_bs(td, &bs);
if (ret)
- goto err;
+ return 1;
} else {
log_err("ioengine sgio only works on block devices\n");
- goto err;
+ return 1;
}
sd->bs = bs;
td->io_ops->event = NULL;
}
- /*
- * we want to do it, regardless of whether odirect is set or not
- */
- td->override_sync = 1;
return 0;
-err:
- free(sd->events);
- free(sd->cmds);
- free(sd->fd_flags);
- free(sd->pfds);
- free(sd->sgbuf);
- free(sd);
- td->io_ops->data = NULL;
- return 1;
+}
+
+static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
+{
+ struct sgio_data *sd = td->io_ops->data;
+ int ret;
+
+ ret = generic_open_file(td, f);
+ if (ret)
+ return ret;
+
+ if (!sd->type_checked && fio_sgio_type_check(td, f)) {
+ generic_close_file(td, f);
+ return 1;
+ }
+
+ return 0;
}
static struct ioengine_ops ioengine = {
.getevents = fio_sgio_getevents,
.event = fio_sgio_event,
.cleanup = fio_sgio_cleanup,
+ .open_file = fio_sgio_open,
+ .close_file = generic_close_file,
.flags = FIO_SYNCIO | FIO_RAWIO,
};
{
}
+/*
+ * Hook for opening the given file. Unless the engine has special
+ * needs, it usually just provides generic_file_open() as the handler.
+ */
+static int fio_skeleton_open(struct thread_data *td, struct fio_file *f)
+{
+ return generic_file_open(td, f);
+}
+
+/*
+ * Hook for closing a file. See fio_skeleton_open().
+ */
+static int fio_skeleton_close(struct thread_data *td, struct fio_file *f)
+{
+ generic_file_close(td, f);
+}
+
/*
* Note that the structure is exported, so that fio can get it via
* dlsym(..., "ioengine");
.getevents = fio_skeleton_getevents,
.event = fio_skeleton_event,
.cleanup = fio_skeleton_cleanup,
+ .open_file = fio_skeleton_open,
+ .close_file = fio_skeleton_close,
};
.init = fio_spliceio_init,
.queue = fio_spliceio_queue,
.cleanup = fio_spliceio_cleanup,
+ .open_file = generic_open_file,
+ .close_file = generic_close_file,
.flags = FIO_SYNCIO,
};
.version = FIO_IOOPS_VERSION,
.prep = fio_syncio_prep,
.queue = fio_syncio_queue,
+ .open_file = generic_open_file,
+ .close_file = generic_close_file,
.flags = FIO_SYNCIO,
};
.getevents = fio_syslet_getevents,
.event = fio_syslet_event,
.cleanup = fio_syslet_cleanup,
+ .open_file = generic_open_file,
+ .close_file = generic_close_file,
};
#else /* FIO_HAVE_SYSLET */
#include "fio.h"
#include "os.h"
-int open_file(struct thread_data *td, struct fio_file *f, int flags, int perm)
-{
- if (flags & O_CREAT)
- f->fd = open(f->file_name, flags, perm);
- else
- f->fd = open(f->file_name, flags);
-
- if (f->fd != -1) {
- td->nr_open_files++;
- return 0;
- }
-
- return 1;
-}
-
-void close_file(struct thread_data *td, struct fio_file *f)
-{
- if (f->fd != -1) {
- close(f->fd);
- f->fd = -1;
- td->nr_open_files--;
- }
-}
-
/*
* Check if the file exists and it's large enough.
*/
{
struct stat st;
- if (td->filetype != FIO_TYPE_FILE)
+ if (td->filetype != FIO_TYPE_FILE ||
+ (td->io_ops->flags & FIO_DISKLESSIO))
return 0;
if (lstat(f->file_name, &st) == -1)
{
int ret = 0;
+ if (!td->invalidate_cache)
+ return 0;
+ if (!td->odirect)
+ return 0;
+
/*
* FIXME: add blockdev flushing too
*/
- if (td->io_ops->flags & FIO_MMAPIO)
+ if (f->mmap)
ret = madvise(f->mmap, f->file_size, MADV_DONTNEED);
else if (td->filetype == FIO_TYPE_FILE) {
- if (!td->odirect)
- ret = fadvise(f->fd, f->file_offset, f->file_size, POSIX_FADV_DONTNEED);
+ ret = fadvise(f->fd, f->file_offset, f->file_size, POSIX_FADV_DONTNEED);
} else if (td->filetype == FIO_TYPE_BD) {
- if (!td->odirect)
- ret = blockdev_invalidate_cache(f->fd);
+ ret = blockdev_invalidate_cache(f->fd);
} else if (td->filetype == FIO_TYPE_CHAR)
ret = 0;
return ret;
}
-static int __setup_file_mmap(struct thread_data *td, struct fio_file *f)
+void generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
{
- int flags;
-
- if (td_rw(td))
- flags = PROT_READ | PROT_WRITE;
- else if (td_write(td)) {
- flags = PROT_WRITE;
-
- if (td->verify != VERIFY_NONE)
- flags |= PROT_READ;
- } else
- flags = PROT_READ;
-
- f->mmap = mmap(NULL, f->file_size, flags, MAP_SHARED, f->fd, f->file_offset);
- if (f->mmap == MAP_FAILED) {
- f->mmap = NULL;
- td_verror(td, errno, "mmap");
- return 1;
- }
-
- if (td->invalidate_cache && file_invalidate_cache(td, f))
- return 1;
-
- if (!td_random(td)) {
- if (madvise(f->mmap, f->file_size, MADV_SEQUENTIAL) < 0) {
- td_verror(td, errno, "madvise");
- return 1;
- }
- } else {
- if (madvise(f->mmap, f->file_size, MADV_RANDOM) < 0) {
- td_verror(td, errno, "madvise");
- return 1;
- }
- }
-
- return 0;
-}
-
-static int setup_files_mmap(struct thread_data *td)
-{
- struct fio_file *f;
- int i, err = 0;
-
- for_each_file(td, f, i) {
- err = __setup_file_mmap(td, f);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int __setup_file_plain(struct thread_data *td, struct fio_file *f)
-{
- if (td->invalidate_cache && file_invalidate_cache(td, f))
- return 1;
-
- if (!td_random(td)) {
- if (fadvise(f->fd, f->file_offset, f->file_size, POSIX_FADV_SEQUENTIAL) < 0) {
- td_verror(td, errno, "fadvise");
- return 1;
- }
- } else {
- if (fadvise(f->fd, f->file_offset, f->file_size, POSIX_FADV_RANDOM) < 0) {
- td_verror(td, errno, "fadvise");
- return 1;
- }
- }
-
- return 0;
-}
-
-static int setup_files_plain(struct thread_data *td)
-{
- struct fio_file *f;
- int i, err = 0;
-
- for_each_file(td, f, i) {
- err = __setup_file_plain(td, f);
- if (err)
- break;
- }
-
- return err;
+ close(f->fd);
+ f->fd = -1;
}
-static int setup_file(struct thread_data *td, struct fio_file *f)
+int generic_open_file(struct thread_data *td, struct fio_file *f)
{
int flags = 0;
- if (td->io_ops->flags & FIO_SELFOPEN)
- return 0;
-
if (td->odirect)
flags |= OS_O_DIRECT;
if (td->sync_io)
flags |= O_CREAT;
}
- open_file(td, f, flags, 0600);
+ f->fd = open(f->file_name, flags, 0600);
} else {
if (td->filetype == FIO_TYPE_CHAR)
flags |= O_RDWR;
else
flags |= O_RDONLY;
- open_file(td, f, flags, 0);
+ f->fd = open(f->file_name, flags);
}
if (f->fd == -1) {
return 1;
}
- if (get_file_size(td, f)) {
- close_file(td, f);
- return 1;
+ if (get_file_size(td, f))
+ goto err;
+
+ if (file_invalidate_cache(td, f))
+ goto err;
+
+ if (!td_random(td)) {
+ if (fadvise(f->fd, f->file_offset, f->file_size, POSIX_FADV_SEQUENTIAL) < 0) {
+ td_verror(td, errno, "fadvise");
+ goto err;
+ }
+ } else {
+ if (fadvise(f->fd, f->file_offset, f->file_size, POSIX_FADV_RANDOM) < 0) {
+ td_verror(td, errno, "fadvise");
+ goto err;
+ }
}
return 0;
+err:
+ close(f->fd);
+ return 1;
+}
+
+int reopen_file(struct thread_data *td, struct fio_file *f)
+{
+ f->last_free_lookup = 0;
+ f->last_completed_pos = 0;
+ f->last_pos = 0;
+
+ if (f->file_map)
+ memset(f->file_map, 0, f->num_maps * sizeof(long));
+
+ printf("setting up %s again\n", f->file_name);
+ return td_io_open_file(td, f);
}
int open_files(struct thread_data *td)
int i, err = 0;
for_each_file(td, f, i) {
- err = setup_file(td, f);
+ err = td_io_open_file(td, f);
if (err)
break;
+
+ if (td->open_files == td->nr_open_files)
+ break;
}
if (!err)
return 0;
for_each_file(td, f, i)
- close_file(td, f);
+ td_io_close_file(td, f);
return err;
}
td->total_io_size = td->io_size * td->loops;
- if (td->io_ops->flags & FIO_MMAPIO)
- err = setup_files_mmap(td);
- else
- err = setup_files_plain(td);
-
for_each_file(td, f, i)
- close_file(td, f);
+ td_io_close_file(td, f);
return err;
}
f->file_name = NULL;
}
- close_file(td, f);
-
- if (f->mmap) {
- munmap(f->mmap, f->file_size);
- f->mmap = NULL;
- }
+ td_io_close_file(td, f);
}
td->filename = NULL;
if (!td->create_serialize && setup_files(td))
goto err;
- if (open_files(td))
- goto err;
- /*
- * Do this late, as some IO engines would like to have the
- * files setup prior to initializing structures.
- */
if (td_io_init(td))
goto err;
+ if (open_files(td))
+ goto err;
+
if (td->exec_prerun) {
if (system(td->exec_prerun) < 0)
goto err;
enum fio_ioengine_flags {
FIO_SYNCIO = 1 << 0, /* io engine has synchronous ->queue */
FIO_CPUIO = 1 << 1, /* cpu burner, doesn't do real io */
- FIO_MMAPIO = 1 << 2, /* uses memory mapped io */
- FIO_RAWIO = 1 << 3, /* some sort of direct/raw io */
- FIO_DISKLESSIO = 1 << 4, /* no disk involved */
- FIO_SELFOPEN = 1 << 5, /* opens its own devices */
+ FIO_RAWIO = 1 << 2, /* some sort of direct/raw io */
+ FIO_DISKLESSIO = 1 << 3, /* no disk involved */
};
/*
unsigned int nice;
unsigned int file_service_type;
unsigned int group_reporting;
+ unsigned int open_files;
char *read_iolog_file;
char *write_iolog_file;
extern void close_files(struct thread_data *);
extern int __must_check setup_files(struct thread_data *);
extern int __must_check open_files(struct thread_data *);
-extern int open_file(struct thread_data *, struct fio_file *, int, int);
-extern void close_file(struct thread_data *, struct fio_file *);
+extern int reopen_file(struct thread_data *, struct fio_file *);
extern int __must_check file_invalidate_cache(struct thread_data *, struct fio_file *);
+extern int __must_check generic_open_file(struct thread_data *, struct fio_file *);
+extern void generic_close_file(struct thread_data *, struct fio_file *);
/*
* ETA/status stuff
extern int __must_check td_io_sync(struct thread_data *, struct fio_file *);
extern int __must_check td_io_getevents(struct thread_data *, int, int, struct timespec *);
extern int __must_check td_io_commit(struct thread_data *);
+extern int __must_check td_io_open_file(struct thread_data *, struct fio_file *);
+extern void td_io_close_file(struct thread_data *, struct fio_file *);
/*
* This is a pretty crappy semaphore implementation, but with the use that fio
struct io_u *(*event)(struct thread_data *, int);
int (*cancel)(struct thread_data *, struct io_u *);
void (*cleanup)(struct thread_data *);
+ int (*open_file)(struct thread_data *, struct fio_file *);
+ void (*close_file)(struct thread_data *, struct fio_file *);
void *data;
void *dlhandle;
- unsigned long priv;
};
-#define FIO_IOOPS_VERSION 5
+#define FIO_IOOPS_VERSION 6
extern struct ioengine_ops *load_ioengine(struct thread_data *, const char *);
extern void register_ioengine(struct ioengine_ops *);
#define for_each_td(td, i) \
for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
#define for_each_file(td, f, i) \
- for ((i) = 0, (f) = &(td)->files[0]; (i) < (int) (td)->nr_files; (i)++, (f)++)
+ for ((i) = 0, (f) = &(td)->files[0]; (i) < (int) (td)->open_files; (i)++, (f)++)
#define fio_assert(td, cond) do { \
if (!(cond)) { \
.help = "Split job workload between this number of files",
.def = "1",
},
+ {
+ .name = "openfiles",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(open_files),
+ .help = "Number of files to keep open at the same time",
+ },
{
.name = "file_service_type",
.type = FIO_OPT_STR,
td->iodepth = 1;
else {
if (!td->iodepth)
- td->iodepth = td->nr_files;
+ td->iodepth = td->open_files;
}
/*
* only really works for sequential io for now, and with 1 file
*/
- if (td->zone_size && td_random(td) && td->nr_files == 1)
+ if (td->zone_size && td_random(td) && td->open_files == 1)
td->zone_size = 0;
/*
*/
if (td->iodepth_batch > td->iodepth || !td->iodepth_batch)
td->iodepth_batch = td->iodepth;
+
+ if (td->open_files > td->nr_files || !td->open_files)
+ td->open_files = td->nr_files;
}
/*
len = sprintf(tmp, "%s/", td->directory);
}
- td->files = malloc(sizeof(struct fio_file) * td->nr_files);
+ td->files = malloc(sizeof(struct fio_file) * td->open_files);
for_each_file(td, f, i) {
memset(f, 0, sizeof(*f));
f->file_name = strdup(tmp);
}
} else {
- td->nr_files = 1;
+ td->open_files = td->nr_files = 1;
td->files = malloc(sizeof(struct fio_file));
f = &td->files[0];
if (td_random(td)) {
unsigned long long max_blocks = f->file_size / td->min_bs[ddir];
- int loops = 5;
+ int loops = 2;
do {
r = os_random_long(&td->random_state);
do {
long r = os_random_long(&td->next_file_state);
- fileno = (unsigned int) ((double) (td->nr_files - 1) * r / (RAND_MAX + 1.0));
+ fileno = (unsigned int) ((double) (td->open_files - 1) * r / (RAND_MAX + 1.0));
f = &td->files[fileno];
if (f->fd != -1)
return f;
f = &td->files[td->next_file];
td->next_file++;
- if (td->next_file >= td->nr_files)
+ if (td->next_file >= td->open_files)
td->next_file = 0;
if (f->fd != -1)
* No more to do for this file, close it
*/
io_u->file = NULL;
- close_file(td, f);
+ td_io_close_file(td, f);
+
+ /*
+ * probably not the right place to do this, but see
+ * if we need to open a new file
+ */
+ if (td->nr_open_files < td->nr_files &&
+ td->open_files != td->nr_files)
+ reopen_file(td, f);
} while (1);
if (td->zone_bytes >= td->zone_size) {
return 0;
}
+
+int td_io_open_file(struct thread_data *td, struct fio_file *f)
+{
+ if (!td->io_ops->open_file(td, f)) {
+ td->nr_open_files++;
+ return 0;
+ }
+
+ return 1;
+}
+
+void td_io_close_file(struct thread_data *td, struct fio_file *f)
+{
+ if (td->io_ops->close_file)
+ td->io_ops->close_file(td, f);
+ td->nr_open_files--;
+}