* Current IO depth and list of free and busy io_u's.
*/
unsigned int cur_depth;
+ unsigned int io_u_queued;
struct list_head io_u_freelist;
struct list_head io_u_busylist;
struct list_head io_u_requeues;
- unsigned int io_u_queued;
/*
* Rate state
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_init_timeout(void);
extern void io_u_set_timeout(struct thread_data *);
-extern void io_u_mark_depth(struct thread_data *, struct io_u *, unsigned int);
+extern void io_u_mark_depth(struct thread_data *, unsigned int);
/*
* io engine entry points
return 0;
}
-void io_u_mark_depth(struct thread_data *td, struct io_u *io_u,
- unsigned int nr)
+void io_u_mark_depth(struct thread_data *td, unsigned int nr)
{
int index = 0;
- if (io_u->ddir == DDIR_SYNC)
- return;
-
switch (td->cur_depth) {
default:
index = 6;
}
td->ts.io_u_map[index] += nr;
- td->ts.total_io_u[io_u->ddir] += nr;
}
static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
unlock_file(td, io_u->file);
- if (ret != FIO_Q_BUSY)
- io_u_mark_depth(td, io_u, 1);
-
- if (ret == FIO_Q_QUEUED) {
+ if (ret == FIO_Q_COMPLETED) {
+ if (io_u->ddir != DDIR_SYNC) {
+ io_u_mark_depth(td, 1);
+ td->ts.total_io_u[io_u->ddir]++;
+ }
+ } else if (ret == FIO_Q_QUEUED) {
int r;
- td->io_u_queued++;
- if (td->io_u_queued > td->o.iodepth_batch) {
+ if (io_u->ddir != DDIR_SYNC) {
+ td->io_u_queued++;
+ td->ts.total_io_u[io_u->ddir]++;
+ }
+
+ if (td->io_u_queued >= td->o.iodepth_batch) {
r = td_io_commit(td);
if (r < 0)
return r;
{
dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
- if (!td->cur_depth)
+ if (!td->cur_depth || !td->io_u_queued)
return 0;
+ io_u_mark_depth(td, td->io_u_queued);
td->io_u_queued = 0;
+
if (td->io_ops->commit)
return td->io_ops->commit(td);