projects
/
fio.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Add support for queuing > 1 command at the time
[fio.git]
/
fio.c
diff --git
a/fio.c
b/fio.c
index 78dca9a09610e3140ae5eaabc7518012bff40abe..858d6b81e6c5df199e17ae33f09d802105b1011b 100644
(file)
--- a/
fio.c
+++ b/
fio.c
@@
-27,6
+27,7
@@
#include <signal.h>
#include <time.h>
#include <locale.h>
#include <signal.h>
#include <time.h>
#include <locale.h>
+#include <assert.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/ipc.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/ipc.h>
@@
-162,20
+163,14
@@
static struct fio_file *get_next_file(struct thread_data *td)
*/
static void cleanup_pending_aio(struct thread_data *td)
{
*/
static void cleanup_pending_aio(struct thread_data *td)
{
- struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
struct list_head *entry, *n;
struct list_head *entry, *n;
- struct io_completion_data icd;
struct io_u *io_u;
int r;
/*
* get immediately available events, if any
*/
struct io_u *io_u;
int r;
/*
* get immediately available events, if any
*/
- r = td_io_getevents(td, 0, td->cur_depth, &ts);
- if (r > 0) {
- icd.nr = r;
- ios_completed(td, &icd);
- }
+ io_u_queued_complete(td, 0, NULL);
/*
* now cancel remaining active events
/*
* now cancel remaining active events
@@
-190,13
+185,8
@@
static void cleanup_pending_aio(struct thread_data *td)
}
}
}
}
- if (td->cur_depth) {
- r = td_io_getevents(td, td->cur_depth, td->cur_depth, NULL);
- if (r > 0) {
- icd.nr = r;
- ios_completed(td, &icd);
- }
- }
+ if (td->cur_depth)
+ io_u_queued_complete(td, td->cur_depth, NULL);
}
/*
}
/*
@@
-206,7
+196,6
@@
static void cleanup_pending_aio(struct thread_data *td)
static int fio_io_sync(struct thread_data *td, struct fio_file *f)
{
struct io_u *io_u = __get_io_u(td);
static int fio_io_sync(struct thread_data *td, struct fio_file *f)
{
struct io_u *io_u = __get_io_u(td);
- struct io_completion_data icd;
int ret;
if (!io_u)
int ret;
if (!io_u)
@@
-220,24
+209,26
@@
static int fio_io_sync(struct thread_data *td, struct fio_file *f)
return 1;
}
return 1;
}
+requeue:
ret = td_io_queue(td, io_u);
ret = td_io_queue(td, io_u);
- if (ret) {
+ if (ret
< 0
) {
td_verror(td, io_u->error);
put_io_u(td, io_u);
return 1;
td_verror(td, io_u->error);
put_io_u(td, io_u);
return 1;
- }
-
- ret = td_io_getevents(td, 1, td->cur_depth, NULL);
- if (ret < 0) {
- td_verror(td, ret);
- return 1;
- }
+ } else if (ret == FIO_Q_QUEUED) {
+ if (io_u_queued_complete(td, 1, NULL))
+ return 1;
+ } else if (ret == FIO_Q_COMPLETED) {
+ if (io_u->error) {
+ td_verror(td, io_u->error);
+ return 1;
+ }
-
icd.nr = ret
;
- ios_completed(td, &icd);
- if (icd.error) {
-
td_verror(td, icd.error)
;
-
return 1
;
+
io_u_sync_complete(td, io_u, NULL)
;
+ } else if (ret == FIO_Q_BUSY) {
+ if (td_io_commit(td))
+
return 1
;
+
goto requeue
;
}
return 0;
}
return 0;
@@
-249,10
+240,9
@@
static int fio_io_sync(struct thread_data *td, struct fio_file *f)
*/
static void do_verify(struct thread_data *td)
{
*/
static void do_verify(struct thread_data *td)
{
- struct io_u *io_u, *v_io_u = NULL;
- struct io_completion_data icd;
struct fio_file *f;
struct fio_file *f;
- int ret, i;
+ struct io_u *io_u;
+ int ret, i, min_events;
/*
* sync io first and invalidate cache, to make sure we really
/*
* sync io first and invalidate cache, to make sure we really
@@
-265,78
+255,72
@@
static void do_verify(struct thread_data *td)
td_set_runstate(td, TD_VERIFYING);
td_set_runstate(td, TD_VERIFYING);
- do {
- if (td->terminate)
- break;
-
+ io_u = NULL;
+ while (!td->terminate) {
io_u = __get_io_u(td);
if (!io_u)
break;
io_u = __get_io_u(td);
if (!io_u)
break;
- if (runtime_exceeded(td, &io_u->start_time)) {
- put_io_u(td, io_u);
+ if (runtime_exceeded(td, &io_u->start_time))
break;
break;
- }
- if (get_next_verify(td, io_u)) {
- put_io_u(td, io_u);
+ if (get_next_verify(td, io_u))
break;
break;
- }
- f = get_next_file(td);
- if (!f)
+ if (td_io_prep(td, io_u))
break;
break;
- io_u->file = f;
+requeue:
+ ret = td_io_queue(td, io_u);
- if (td_io_prep(td, io_u)) {
- put_io_u(td, io_u);
- break;
- }
+ switch (ret) {
+ case FIO_Q_COMPLETED:
+ if (io_u->error)
+ ret = -io_u->error;
+ if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+ int bytes = io_u->xfer_buflen - io_u->resid;
- ret = td_io_queue(td, io_u);
- if (ret) {
- td_verror(td, io_u->error);
- put_io_u(td, io_u);
+ io_u->xfer_buflen = io_u->resid;
+ io_u->xfer_buf += bytes;
+ goto requeue;
+ }
+ ret = io_u_sync_complete(td, io_u, verify_io_u);
+ if (ret)
+ break;
+ continue;
+ case FIO_Q_QUEUED:
break;
break;
- }
-
- /*
- * we have one pending to verify, do that while
- * we are doing io on the next one
- */
- if (do_io_u_verify(td, &v_io_u))
+ case FIO_Q_BUSY:
+ requeue_io_u(td, &io_u);
+ ret = td_io_commit(td);
break;
break;
-
- ret = td_io_getevents(td, 1, 1, NULL);
- if (ret != 1) {
- if (ret < 0)
- td_verror(td, ret);
+ default:
+ assert(ret < 0);
+ td_verror(td, -ret);
break;
}
break;
}
- v_io_u = td->io_ops->event(td, 0);
- icd.nr = 1;
- icd.error = 0;
- fio_gettime(&icd.time, NULL);
- io_completed(td, v_io_u, &icd);
-
- if (icd.error) {
- td_verror(td, icd.error);
- put_io_u(td, v_io_u);
- v_io_u = NULL;
+ if (ret < 0 || td->error)
break;
break;
- }
/*
/*
- * if we can't submit more io, we need to verify now
+ * if we can queue more, do so. but check if there are
+ * completed io_u's first.
*/
*/
- if (queue_full(td) && do_io_u_verify(td, &v_io_u))
- break;
+ min_events = 0;
+ if (queue_full(td) || ret == FIO_Q_BUSY)
+ min_events = 1;
- } while (1);
+ /*
+ * Reap required number of io units, if any, and do the
+ * verification on them through the callback handler
+ */
+ if (io_u_queued_complete(td, min_events, verify_io_u))
+ break;
+ }
- do_io_u_verify(td, &v_io_u);
+ if (io_u)
+ put_io_u(td, io_u);
if (td->cur_depth)
cleanup_pending_aio(td);
if (td->cur_depth)
cleanup_pending_aio(td);
@@
-375,7
+359,6
@@
static void do_cpuio(struct thread_data *td)
*/
static void do_io(struct thread_data *td)
{
*/
static void do_io(struct thread_data *td)
{
- struct io_completion_data icd;
struct timeval s;
unsigned long usec;
struct fio_file *f;
struct timeval s;
unsigned long usec;
struct fio_file *f;
@@
-384,7
+367,8
@@
static void do_io(struct thread_data *td)
td_set_runstate(td, TD_RUNNING);
while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) {
td_set_runstate(td, TD_RUNNING);
while ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->io_size) {
- struct timespec *timeout;
+ struct timeval comp_time;
+ long bytes_done = 0;
int min_evts = 0;
struct io_u *io_u;
int min_evts = 0;
struct io_u *io_u;
@@
-400,74
+384,97
@@
static void do_io(struct thread_data *td)
break;
memcpy(&s, &io_u->start_time, sizeof(s));
break;
memcpy(&s, &io_u->start_time, sizeof(s));
+
+ if (runtime_exceeded(td, &s)) {
+ put_io_u(td, io_u);
+ break;
+ }
requeue:
ret = td_io_queue(td, io_u);
requeue:
ret = td_io_queue(td, io_u);
- if (ret) {
- if (ret > 0 && (io_u->xfer_buflen != io_u->resid) &&
- io_u->resid) {
- /*
- * short read/write. requeue.
- */
+
+ switch (ret) {
+ case FIO_Q_COMPLETED:
+ if (io_u->error) {
+ ret = io_u->error;
+ break;
+ }
+ if (io_u->xfer_buflen != io_u->resid && io_u->resid) {
+ int bytes = io_u->xfer_buflen - io_u->resid;
+
io_u->xfer_buflen = io_u->resid;
io_u->xfer_buflen = io_u->resid;
- io_u->xfer_buf +=
ret
;
+ io_u->xfer_buf +=
bytes
;
goto requeue;
goto requeue;
- } else {
- put_io_u(td, io_u);
- break;
}
}
+ fio_gettime(&comp_time, NULL);
+ bytes_done = io_u_sync_complete(td, io_u, NULL);
+ if (bytes_done < 0)
+ ret = bytes_done;
+ break;
+ case FIO_Q_QUEUED:
+ break;
+ case FIO_Q_BUSY:
+ requeue_io_u(td, &io_u);
+ ret = td_io_commit(td);
+ break;
+ default:
+ assert(ret < 0);
+ put_io_u(td, io_u);
+ break;
}
}
- add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time));
+ if (ret < 0 || td->error)
+ break;
- if (
td->cur_depth < td->iodepth) {
-
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0}
;
+ if (
io_u)
+
add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time))
;
- timeout = &ts;
+ /*
+ * See if we need to complete some commands
+ */
+ if (ret == FIO_Q_QUEUED || ret == FIO_Q_BUSY) {
min_evts = 0;
min_evts = 0;
- } else {
- timeout = NULL;
- min_evts = 1;
+ if (queue_full(td) || ret == FIO_Q_BUSY)
+ min_evts = 1;
+
+ fio_gettime(&comp_time, NULL);
+ bytes_done = io_u_queued_complete(td, min_evts, NULL);
+ if (bytes_done < 0)
+ break;
}
}
- ret = td_io_getevents(td, min_evts, td->cur_depth, timeout);
- if (ret < 0) {
- td_verror(td, ret);
- break;
- } else if (!ret)
+ if (!bytes_done)
continue;
continue;
- icd.nr = ret;
- ios_completed(td, &icd);
- if (icd.error) {
- td_verror(td, icd.error);
- break;
- }
-
/*
* the rate is batched for now, it should work for batches
* of completions except the very first one which may look
* a little bursty
*/
/*
* the rate is batched for now, it should work for batches
* of completions except the very first one which may look
* a little bursty
*/
- usec = utime_since(&s, &
icd.
time);
+ usec = utime_since(&s, &
comp_
time);
- rate_throttle(td, usec,
icd.bytes_done[td->ddir]
, td->ddir);
+ rate_throttle(td, usec,
bytes_done
, td->ddir);
- if (check_min_rate(td, &
icd.
time)) {
+ if (check_min_rate(td, &
comp_
time)) {
if (exitall_on_terminate)
terminate_threads(td->groupid, 0);
td_verror(td, ENODATA);
break;
}
if (exitall_on_terminate)
terminate_threads(td->groupid, 0);
td_verror(td, ENODATA);
break;
}
- if (runtime_exceeded(td, &icd.time))
- break;
-
if (td->thinktime) {
unsigned long long b;
b = td->io_blocks[0] + td->io_blocks[1];
if (td->thinktime) {
unsigned long long b;
b = td->io_blocks[0] + td->io_blocks[1];
- if (!(b % td->thinktime_blocks))
- usec_sleep(td, td->thinktime);
+ if (!(b % td->thinktime_blocks)) {
+ int left;
+
+ if (td->thinktime_spin)
+ __usec_sleep(td->thinktime_spin);
+
+ left = td->thinktime - td->thinktime_spin;
+ if (left)
+ usec_sleep(td, left);
+ }
}
}
}
}
@@
-640,6
+647,7
@@
static void *thread_main(void *data)
INIT_LIST_HEAD(&td->io_u_freelist);
INIT_LIST_HEAD(&td->io_u_busylist);
INIT_LIST_HEAD(&td->io_u_freelist);
INIT_LIST_HEAD(&td->io_u_busylist);
+ INIT_LIST_HEAD(&td->io_u_requeues);
INIT_LIST_HEAD(&td->io_hist_list);
INIT_LIST_HEAD(&td->io_log_list);
INIT_LIST_HEAD(&td->io_hist_list);
INIT_LIST_HEAD(&td->io_log_list);