typedef uint32_t u32;
typedef uint16_t u16;
+#define IORING_SQ_NEED_WAKEUP (1 << 0)
+
+#define IOEV_RES2_CACHEHIT (1 << 0)
+
struct aio_sq_ring {
union {
struct {
u32 tail;
u32 nr_events;
u16 sq_thread_cpu;
+ u16 kflags;
u64 iocbs;
};
u32 pad[16];
int queued;
int cq_ring_off;
+
+ uint64_t cachehit;
+ uint64_t cachemiss;
};
struct aioring_options {
iocb = &ld->iocbs[io_u->index];
- if (io_u->ddir == DDIR_READ) {
- if (o->fixedbufs) {
- iocb->aio_fildes = f->fd;
+ if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
+ if (io_u->ddir == DDIR_READ)
iocb->aio_lio_opcode = IO_CMD_PREAD;
- iocb->u.c.offset = io_u->offset;
- } else {
- io_prep_pread(iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
- if (o->hipri)
- iocb->u.c.flags |= IOCB_FLAG_HIPRI;
- }
- } else if (io_u->ddir == DDIR_WRITE) {
- if (o->fixedbufs) {
- iocb->aio_fildes = f->fd;
+ else
iocb->aio_lio_opcode = IO_CMD_PWRITE;
- iocb->u.c.offset = io_u->offset;
- } else {
- io_prep_pwrite(iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
- if (o->hipri)
- iocb->u.c.flags |= IOCB_FLAG_HIPRI;
- }
+ iocb->aio_reqprio = 0;
+ iocb->aio_fildes = f->fd;
+ iocb->u.c.buf = io_u->xfer_buf;
+ iocb->u.c.nbytes = io_u->xfer_buflen;
+ iocb->u.c.offset = io_u->offset;
+ if (o->hipri)
+ iocb->u.c.flags |= IOCB_FLAG_HIPRI;
+ else
+ iocb->u.c.flags = 0;
} else if (ddir_sync(io_u->ddir))
io_prep_fsync(iocb, f->fd);
} else
io_u->error = 0;
+ if (io_u->ddir == DDIR_READ) {
+ if (ev->res2 & IOEV_RES2_CACHEHIT)
+ ld->cachehit++;
+ else
+ ld->cachemiss++;
+ }
+
return io_u;
}
/* Nothing to do */
if (o->sqthread_poll) {
+ struct aio_sq_ring *ring = ld->sq_ring;
+
+ if (ring->kflags & IORING_SQ_NEED_WAKEUP)
+ io_ring_enter(ld->aio_ctx, ld->queued, 0, IORING_FLAG_SUBMIT);
ld->queued = 0;
return 0;
}
struct aioring_data *ld = td->io_ops_data;
if (ld) {
+ td->ts.cachehit += ld->cachehit;
+ td->ts.cachemiss += ld->cachemiss;
+
/* Bump depth to match init depth */
td->o.iodepth++;
flags |= IOCTX_FLAG_SQTHREAD;
if (o->sqthread_poll)
flags |= IOCTX_FLAG_SQPOLL;
- } else if (o->sqwq)
+ }
+ if (o->sqwq)
flags |= IOCTX_FLAG_SQWQ;
if (o->fixedbufs) {
}
err = fio_aioring_queue_init(td);
+
+ /* Adjust depth back again */
+ td->o.iodepth--;
+
if (err) {
- td_verror(td, -err, "io_queue_init");
+ td_verror(td, errno, "io_queue_init");
return 1;
}
- /* Adjust depth back again */
- td->o.iodepth--;
return 0;
}
static int fio_aioring_init(struct thread_data *td)
{
- struct aioring_options *o = td->eo;
struct aioring_data *ld;
- if (o->sqthread_set && o->sqwq) {
- log_err("fio: aioring sqthread and sqwq are mutually exclusive\n");
- return 1;
- }
-
/* ring needs an extra entry, add one to achieve QD set */
td->o.iodepth++;