td->cur_depth--;
}
+void clear_io_u(struct thread_data *td, struct io_u *io_u)
+{
+ io_u->flags &= ~IO_U_F_FLIGHT;
+ put_io_u(td, io_u);
+}
+
void requeue_io_u(struct thread_data *td, struct io_u **io_u)
{
struct io_u *__io_u = *io_u;
}
if (!td->o.disable_bw)
add_bw_sample(td, idx, bytes, &icd->time);
- if (__should_check_rate(td, idx))
- td->rate_pending_usleep[idx] += (long) td->rate_usec_cycle[idx] - rusec;
+ if (__should_check_rate(td, idx)) {
+ td->rate_pending_usleep[idx] +=
+ (long) td->rate_usec_cycle[idx] - rusec;
+ }
if (__should_check_rate(td, idx ^ 1))
- td->rate_pending_usleep[idx ^ 1] -= lusec;
+ td->rate_pending_usleep[idx ^ 1] -= rusec;
}
if (td_write(td) && idx == DDIR_WRITE &&
icd->error = io_u->error;
io_u_log_error(td, io_u);
}
+ if (td->o.continue_on_error && icd->error &&
+ td_non_fatal_error(icd->error)) {
+ /*
+ * If there is a non_fatal error, then add to the error count
+ * and clear all the errors.
+ */
+ update_error_count(td, icd->error);
+ td_clear_error(td);
+ icd->error = 0;
+ io_u->error = 0;
+ }
}
static void init_icd(struct thread_data *td, struct io_completion_data *icd,