static void iolog_delay(struct thread_data *td, unsigned long delay)
{
unsigned long usec = utime_since_now(&td->last_issue);
+ unsigned long this_delay;
if (delay < usec)
return;
if (delay < 100)
return;
- usec_sleep(td, delay);
+ while (delay && !td->terminate) {
+ this_delay = delay;
+ if (this_delay > 500000)
+ this_delay = 500000;
+
+ usec_sleep(td, this_delay);
+ delay -= this_delay;
+ }
}
static int ipo_special(struct thread_data *td, struct io_piece *ipo)
td->io_hist_len++;
}
+void unlog_io_piece(struct thread_data *td, struct io_u *io_u)
+{
+ struct io_piece *ipo = io_u->ipo;
+
+ if (!ipo)
+ return;
+
+ if (ipo->flags & IP_F_ONRB)
+ rb_erase(&ipo->rb_node, &td->io_hist_tree);
+ else if (ipo->flags & IP_F_ONLIST)
+ flist_del(&ipo->list);
+
+ free(ipo);
+ io_u->ipo = NULL;
+ td->io_hist_len--;
+}
+
+void trim_io_piece(struct thread_data *td, struct io_u *io_u)
+{
+ struct io_piece *ipo = io_u->ipo;
+
+ if (!ipo)
+ return;
+
+ ipo->len = io_u->xfer_buflen - io_u->resid;
+}
+
void write_iolog_close(struct thread_data *td)
{
fflush(td->iolog_f);
} else {
ipo->offset = offset;
ipo->len = bytes;
- if (bytes > td->o.max_bs[rw])
+ if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw])
td->o.max_bs[rw] = bytes;
ipo->fileno = fileno;
ipo->file_action = file_action;
+ td->o.size += bytes;
}
queue_io_piece(td, ipo);