int queued;
int cq_ring_off;
unsigned iodepth;
+ bool ioprio_class_set;
+ bool ioprio_set;
struct ioring_mmap mmap[3];
};
unsigned int sqpoll_cpu;
unsigned int nonvectored;
unsigned int uncached;
+ unsigned int nowait;
};
static const int ddir_to_op[2][2] = {
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_IOURING,
},
+ {
+ .name = "nowait",
+ .lname = "RWF_NOWAIT",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct ioring_options, nowait),
+ .help = "Use RWF_NOWAIT for reads/writes",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
{
.name = NULL,
},
}
if (!td->o.odirect && o->uncached)
sqe->rw_flags = RWF_UNCACHED;
- if (fio_option_is_set(&td->o, ioprio_class))
+ if (o->nowait)
+ sqe->rw_flags |= RWF_NOWAIT;
+ if (ld->ioprio_class_set)
sqe->ioprio = td->o.ioprio_class << 13;
- if (fio_option_is_set(&td->o, ioprio))
+ if (ld->ioprio_set)
sqe->ioprio |= td->o.ioprio;
sqe->off = io_u->offset;
} else if (ddir_sync(io_u->ddir)) {
head = *ring->head;
do {
- read_barrier();
- if (head == *ring->tail)
+ if (head == atomic_load_acquire(ring->tail))
break;
reaped++;
head++;
} while (reaped + events < max);
- *ring->head = head;
- write_barrier();
+ atomic_store_release(ring->head, head);
return reaped;
}
tail = *ring->tail;
next_tail = tail + 1;
- read_barrier();
- if (next_tail == *ring->head)
+ if (next_tail == atomic_load_acquire(ring->head))
return FIO_Q_BUSY;
- /* ensure sqe stores are ordered with tail update */
- write_barrier();
if (o->cmdprio_percentage)
fio_ioring_prio_prep(td, io_u);
ring->array[tail & ld->sq_ring_mask] = io_u->index;
- *ring->tail = next_tail;
- write_barrier();
+ atomic_store_release(ring->tail, next_tail);
ld->queued++;
return FIO_Q_QUEUED;
td_verror(td, EINVAL, "fio_io_uring_init");
return 1;
}
+
+ if (fio_option_is_set(&td->o, ioprio_class))
+ ld->ioprio_class_set = true;
+ if (fio_option_is_set(&td->o, ioprio))
+ ld->ioprio_set = true;
+
return 0;
}