one_more:
events = 0;
for_each_file(td, f, i) {
- bf = f->file_data;
+ bf = (void *) f->engine_data;
ret = read(bf->fd, p, left * sizeof(struct b_user_cmd));
if (ret < 0) {
if (errno == EAGAIN)
* Fill in the file descriptors
*/
for_each_file(td, f, i) {
- bf = f->file_data;
+ bf = (void *) f->engine_data;
/*
* don't block for min events == 0
if (!min) {
for_each_file(td, f, i) {
- bf = f->file_data;
+ bf = (void *) f->engine_data;
fcntl(bf->fd, F_SETFL, bd->fd_flags[i]);
}
}
static int fio_binject_doio(struct thread_data *td, struct io_u *io_u)
{
struct b_user_cmd *buc = &io_u->buc;
- struct binject_file *bf = io_u->file->file_data;
+ struct binject_file *bf = (void *) io_u->file->engine_data;
int ret;
ret = write(bf->fd, buc, sizeof(*buc));
{
struct binject_data *bd = td->io_ops->data;
struct b_user_cmd *buc = &io_u->buc;
- struct binject_file *bf = io_u->file->file_data;
+ struct binject_file *bf = (void *) io_u->file->engine_data;
if (io_u->xfer_buflen & (bf->bs - 1)) {
log_err("read/write not sector aligned\n");
static int fio_binject_close_file(struct thread_data *td, struct fio_file *f)
{
- struct binject_file *bf = f->file_data;
+ struct binject_file *bf = (void *) f->engine_data;
if (bf) {
binject_unmap_dev(td, bf);
free(bf);
- f->file_data = NULL;
+ f->engine_data = 0;
return generic_close_file(td, f);
}
bf = malloc(sizeof(*bf));
bf->bs = bs;
bf->minor = bf->fd = -1;
- f->file_data = bf;
+ f->engine_data = (uint64_t) bf;
if (binject_map_dev(td, bf, f->fd)) {
err_close:
ret = ioctl(f->fd, EXT4_IOC_MOVE_EXT, &me);
len = me.moved_len * ed->bsz;
- if (io_u->file && len && ddir_rw(io_u->ddir))
- io_u->file->file_pos = io_u->offset + len;
-
if (len > io_u->xfer_buflen)
len = io_u->xfer_buflen;
if (ret)
io_u->error = errno;
- if (io_u->file && ret == 0 && ddir_rw(io_u->ddir))
- io_u->file->file_pos = io_u->offset + ret;
-
return FIO_Q_COMPLETED;
}
goto out;
} else {
io_u->error = 0;
- io_u->file->file_pos = io_u->offset + rc;
rc = FIO_Q_COMPLETED;
}
#include "../fio.h"
+/*
+ * Sync engine uses engine_data to store last offset
+ */
+#define LAST_POS(f) ((f)->engine_data)
+
struct syncio_data {
struct iovec *iovecs;
struct io_u **io_us;
if (!ddir_rw(io_u->ddir))
return 0;
- if (f->file_pos != -1ULL && f->file_pos == io_u->offset)
+ if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
return 0;
if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
{
if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
- io_u->file->file_pos = io_u->offset + ret;
+ LAST_POS(io_u->file) = io_u->offset + ret;
if (ret != (int) io_u->xfer_buflen) {
if (ret >= 0) {
struct flist_head hash_list;
enum fio_filetype filetype;
- void *file_data;
int fd;
+ int shadow_fd;
#ifdef WIN32
HANDLE hFile;
HANDLE ioCP;
/*
* For use by the io engine
*/
- unsigned long long file_pos;
+ uint64_t engine_data;
/*
* if io is protected by a semaphore, this is set
{
f->last_pos = f->file_offset;
f->last_start = -1ULL;
- f->file_pos = -1ULL;
if (f->io_axmap)
axmap_reset(f->io_axmap);
}
ret = errno;
f->fd = -1;
+
+ if (f->shadow_fd != -1) {
+ close(f->shadow_fd);
+ f->shadow_fd = -1;
+ }
+
return ret;
}
return from_hash;
}
+static int file_close_shadow_fds(struct thread_data *td)
+{
+ struct fio_file *f;
+ int num_closed = 0;
+ unsigned int i;
+
+ for_each_file(td, f, i) {
+ if (f->shadow_fd == -1)
+ continue;
+
+ close(f->shadow_fd);
+ f->shadow_fd = -1;
+ num_closed++;
+ }
+
+ return num_closed;
+}
+
int generic_open_file(struct thread_data *td, struct fio_file *f)
{
int is_std = 0;
flags &= ~FIO_O_NOATIME;
goto open_again;
}
+ if (__e == EMFILE && file_close_shadow_fds(td))
+ goto open_again;
snprintf(buf, sizeof(buf) - 1, "open(%s)", f->file_name);
int fio_unused ret;
/*
- * OK to ignore, we haven't done anything with it
+ * Stash away descriptor for later close. This is to
+ * work-around a "feature" on Linux, where a close of
+ * an fd that has been opened for write will trigger
+ * udev to call blkid to check partitions, fs id, etc.
+ * That polutes the device cache, which can slow down
+ * unbuffered accesses.
*/
- ret = generic_close_file(td, f);
+ if (f->shadow_fd == -1)
+ f->shadow_fd = f->fd;
+ else {
+ /*
+ * OK to ignore, we haven't done anything
+ * with it
+ */
+ ret = generic_close_file(td, f);
+ }
goto open_again;
}
}
}
f->fd = -1;
+ f->shadow_fd = -1;
fio_file_reset(f);
if (td->files_size <= td->files_index) {
enum fio_cs fio_clock_source = FIO_PREFERRED_CLOCK_SOURCE;
int fio_clock_source_set = 0;
+enum fio_cs fio_clock_source_inited = CS_INVAL;
#ifdef FIO_DEBUG_TIME
void fio_clock_init(void)
{
+ if (fio_clock_source == fio_clock_source_inited)
+ return;
+
last_tv_valid = 0;
+ fio_clock_source_inited = fio_clock_source;
calibrate_cpu_clock();
/*
CS_GTOD = 1,
CS_CGETTIME,
CS_CPUCLOCK,
+ CS_INVAL,
};
extern void fio_gettime(struct timeval *, void *);
fio_clock_source = td->o.clocksource;
fio_clock_source_set = 1;
+ fio_clock_init();
return 0;
}