static unsigned long long get_next_offset(struct thread_data *td)
{
unsigned long long kb;
- double r;
+ long r;
if (!td->sequential) {
- drand48_r(&td->random_state, &r);
+ lrand48_r(&td->random_state, &r);
kb = (1+(double) (td->kb-1) * r / (RAND_MAX+1.0));
} else
kb = td->last_kb;
static unsigned int get_next_buflen(struct thread_data *td)
{
unsigned int buflen;
- double r;
+ long r;
if (td->min_bs == td->max_bs)
buflen = td->min_bs;
else {
- drand48_r(&td->bsrange_state, &r);
+ lrand48_r(&td->bsrange_state, &r);
buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
}
struct io_u *io_u = NULL;
struct timeval e;
- for (td->this_io_kb = 0; td->this_io_kb < td->kb;) {
+ while (td->this_io_kb < td->kb) {
int ret;
if (td->terminate)
}
}
+static int async_do_verify(struct thread_data *td, struct io_u **io_u)
+{
+ struct io_u *v_io_u = *io_u;
+ int ret = 0;
+
+ if (v_io_u) {
+ ret = verify_io_us(td, v_io_u, NULL);
+ put_io_u(td, v_io_u);
+ *io_u = NULL;
+ }
+
+ return ret;
+}
+
static int do_async_verify(struct thread_data *td)
{
struct timeval t;
- struct io_u *io_u, *v_io_u;
+ struct io_u *io_u, *v_io_u = NULL;
struct verify_header *hdr;
int ret, back;
char *p;
td_set_runstate(td, TD_VERIFYING);
- td->cur_off = 0;
- td->last_kb = 0;
- v_io_u = NULL;
-
do {
if (td->terminate)
break;
* we have one pending to verify, do that while the next
* we are doing io on the next one
*/
- if (v_io_u) {
- ret = verify_io_us(td, v_io_u, NULL);
- put_io_u(td, v_io_u);
- v_io_u = NULL;
- if (ret)
- break;
- }
+ if (async_do_verify(td, &v_io_u))
+ break;
ret = io_getevents(td->aio_ctx, 1, 1, td->aio_events, NULL);
if (ret != 1) {
td->cur_off += (v_io_u->buflen - back);
/*
- * if max depth is 1, we need to verify now
+ * if we can't submit more io, we need to verify now
*/
- if (queue_full(td)) {
- ret = verify_io_us(td, v_io_u, NULL);
- put_io_u(td, v_io_u);
- v_io_u = NULL;
- if (ret)
- break;
- }
+ if (queue_full(td) && async_do_verify(td, &v_io_u))
+ break;
+
} while (1);
- if (v_io_u) {
- verify_io_us(td, v_io_u, NULL);
- put_io_u(td, v_io_u);
- }
+ async_do_verify(td, &v_io_u);
if (td->cur_depth)
cleanup_pending_aio(td);
struct timeval s, e;
unsigned long usec;
- for (td->this_io_kb = 0; td->this_io_kb < td->kb;) {
+ while (td->this_io_kb < td->kb) {
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
struct timespec *timeout;
int ret, min_evts = 0;
return 0;
}
+static void clear_io_state(struct thread_data *td)
+{
+ td->cur_off = 0;
+ td->last_kb = 0;
+ td->stat_io_kb = 0;
+ td->this_io_kb = 0;
+}
+
static void *thread_main(int shm_id, int offset, char *argv[])
{
struct thread_data *td;
if (td->ratemin)
memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
- td->cur_off = 0;
- td->last_kb = 0;
- td->stat_io_kb = 0;
+ clear_io_state(td);
if (!td->use_aio) {
do_sync_io(td);
- if (td->verify && !td_read(td)) {
- if (!do_sync_verify(td))
- break;
- }
+ if (!td->verify || td_read(td))
+ continue;
+
+ clear_io_state(td);
+ if (!do_sync_verify(td))
+ break;
} else {
do_async_io(td);
- if (td->verify && !td_read(td)) {
- if (!do_async_verify(td))
- break;
- }
+ if (!td->verify || td_read(td))
+ continue;
+
+ clear_io_state(td);
+ if (!do_async_verify(td))
+ break;
}
}