We can't keep it on the stack for async IO offload.
Signed-off-by: Jens Axboe <axboe@fb.com>
-static int check_min_rate(struct thread_data *td, struct timeval *now,
- uint64_t *bytes_done)
+static int check_min_rate(struct thread_data *td, struct timeval *now)
- if (bytes_done[DDIR_READ])
+ if (td->bytes_done[DDIR_READ])
ret |= __check_min_rate(td, now, DDIR_READ);
ret |= __check_min_rate(td, now, DDIR_READ);
- if (bytes_done[DDIR_WRITE])
+ if (td->bytes_done[DDIR_WRITE])
ret |= __check_min_rate(td, now, DDIR_WRITE);
ret |= __check_min_rate(td, now, DDIR_WRITE);
- if (bytes_done[DDIR_TRIM])
+ if (td->bytes_done[DDIR_TRIM])
ret |= __check_min_rate(td, now, DDIR_TRIM);
return ret;
ret |= __check_min_rate(td, now, DDIR_TRIM);
return ret;
/*
* get immediately available events, if any
*/
/*
* get immediately available events, if any
*/
- r = io_u_queued_complete(td, 0, NULL);
+ r = io_u_queued_complete(td, 0);
- r = io_u_queued_complete(td, td->cur_depth, NULL);
+ r = io_u_queued_complete(td, td->cur_depth);
put_io_u(td, io_u);
return 1;
} else if (ret == FIO_Q_QUEUED) {
put_io_u(td, io_u);
return 1;
} else if (ret == FIO_Q_QUEUED) {
- if (io_u_queued_complete(td, 1, NULL) < 0)
+ if (io_u_queued_complete(td, 1) < 0)
return 1;
} else if (ret == FIO_Q_COMPLETED) {
if (io_u->error) {
return 1;
} else if (ret == FIO_Q_COMPLETED) {
if (io_u->error) {
- if (io_u_sync_complete(td, io_u, NULL) < 0)
+ if (io_u_sync_complete(td, io_u) < 0)
return 1;
} else if (ret == FIO_Q_BUSY) {
if (td_io_commit(td))
return 1;
} else if (ret == FIO_Q_BUSY) {
if (td_io_commit(td))
-static int wait_for_completions(struct thread_data *td, struct timeval *time,
- uint64_t *bytes_done)
+static int wait_for_completions(struct thread_data *td, struct timeval *time)
{
const int full = queue_full(td);
int min_evts = 0;
{
const int full = queue_full(td);
int min_evts = 0;
fio_gettime(time, NULL);
do {
fio_gettime(time, NULL);
do {
- ret = io_u_queued_complete(td, min_evts, bytes_done);
+ ret = io_u_queued_complete(td, min_evts);
if (ret < 0)
break;
} while (full && (td->cur_depth > td->o.iodepth_low));
if (ret < 0)
break;
} while (full && (td->cur_depth > td->o.iodepth_low));
*/
static void do_verify(struct thread_data *td, uint64_t verify_bytes)
{
*/
static void do_verify(struct thread_data *td, uint64_t verify_bytes)
{
- uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
struct fio_file *f;
struct io_u *io_u;
int ret, min_events;
struct fio_file *f;
struct io_u *io_u;
int ret, min_events;
- if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes)
+ if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
break;
while ((io_u = get_io_u(td)) != NULL) {
break;
while ((io_u = get_io_u(td)) != NULL) {
requeue_io_u(td, &io_u);
} else {
sync_done:
requeue_io_u(td, &io_u);
} else {
sync_done:
- ret = io_u_sync_complete(td, io_u, bytes_done);
+ ret = io_u_sync_complete(td, io_u);
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete)
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete)
- ret = wait_for_completions(td, NULL, bytes_done);
+ ret = wait_for_completions(td, NULL);
min_events = td->cur_depth;
if (min_events)
min_events = td->cur_depth;
if (min_events)
- ret = io_u_queued_complete(td, min_events, NULL);
+ ret = io_u_queued_complete(td, min_events);
} else
cleanup_pending_aio(td);
} else
cleanup_pending_aio(td);
*/
static uint64_t do_io(struct thread_data *td)
{
*/
static uint64_t do_io(struct thread_data *td)
{
- uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
unsigned int i;
int ret = 0;
uint64_t total_bytes, bytes_issued = 0;
unsigned int i;
int ret = 0;
uint64_t total_bytes, bytes_issued = 0;
__should_check_rate(td, DDIR_TRIM))
fio_gettime(&comp_time, NULL);
__should_check_rate(td, DDIR_TRIM))
fio_gettime(&comp_time, NULL);
- ret = io_u_sync_complete(td, io_u, bytes_done);
+ ret = io_u_sync_complete(td, io_u);
if (ret < 0)
break;
bytes_issued += io_u->xfer_buflen;
if (ret < 0)
break;
bytes_issued += io_u->xfer_buflen;
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete)
reap:
full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete)
- ret = wait_for_completions(td, &comp_time, bytes_done);
+ ret = wait_for_completions(td, &comp_time);
- if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO))
+ if (!ddir_rw_sum(td->bytes_done) &&
+ !(td->io_ops->flags & FIO_NOIO))
- if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) {
- if (check_min_rate(td, &comp_time, bytes_done)) {
+ if (!in_ramp_time(td) && should_check_rate(td)) {
+ if (check_min_rate(td, &comp_time)) {
if (exitall_on_terminate)
fio_terminate_threads(td->groupid);
td_verror(td, EIO, "check_min_rate");
if (exitall_on_terminate)
fio_terminate_threads(td->groupid);
td_verror(td, EIO, "check_min_rate");
i = td->cur_depth;
if (i) {
i = td->cur_depth;
if (i) {
- ret = io_u_queued_complete(td, i, bytes_done);
+ ret = io_u_queued_complete(td, i);
if (td->o.fill_device && td->error == ENOSPC)
td->error = 0;
}
if (td->o.fill_device && td->error == ENOSPC)
td->error = 0;
}
if (!ddir_rw_sum(td->this_io_bytes))
td->done = 1;
if (!ddir_rw_sum(td->this_io_bytes))
td->done = 1;
- return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
+ return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
}
static void cleanup_io_u(struct thread_data *td)
}
static void cleanup_io_u(struct thread_data *td)
*/
static uint64_t do_dry_run(struct thread_data *td)
{
*/
static uint64_t do_dry_run(struct thread_data *td)
{
- uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 };
-
td_set_runstate(td, TD_RUNNING);
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
td_set_runstate(td, TD_RUNNING);
while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
!td->o.experimental_verify)
log_io_piece(td, io_u);
!td->o.experimental_verify)
log_io_piece(td, io_u);
- ret = io_u_sync_complete(td, io_u, bytes_done);
+ ret = io_u_sync_complete(td, io_u);
- return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM];
+ return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
uint64_t io_blocks[DDIR_RWDIR_CNT];
uint64_t this_io_blocks[DDIR_RWDIR_CNT];
uint64_t io_bytes[DDIR_RWDIR_CNT];
uint64_t io_blocks[DDIR_RWDIR_CNT];
uint64_t this_io_blocks[DDIR_RWDIR_CNT];
uint64_t io_bytes[DDIR_RWDIR_CNT];
- uint64_t io_skip_bytes;
uint64_t this_io_bytes[DDIR_RWDIR_CNT];
uint64_t this_io_bytes[DDIR_RWDIR_CNT];
+ uint64_t io_skip_bytes;
uint64_t zone_bytes;
struct fio_mutex *mutex;
uint64_t zone_bytes;
struct fio_mutex *mutex;
+ uint64_t bytes_done[DDIR_RWDIR_CNT];
/*
* State for random io, a bitmap of blocks done vs not done
/*
* State for random io, a bitmap of blocks done vs not done
-static inline int should_check_rate(struct thread_data *td,
- uint64_t *bytes_done)
+static inline int should_check_rate(struct thread_data *td)
- if (bytes_done[DDIR_READ])
+ if (td->bytes_done[DDIR_READ])
ret |= __should_check_rate(td, DDIR_READ);
ret |= __should_check_rate(td, DDIR_READ);
- if (bytes_done[DDIR_WRITE])
+ if (td->bytes_done[DDIR_WRITE])
ret |= __should_check_rate(td, DDIR_WRITE);
ret |= __should_check_rate(td, DDIR_WRITE);
- if (bytes_done[DDIR_TRIM])
+ if (td->bytes_done[DDIR_TRIM])
ret |= __should_check_rate(td, DDIR_TRIM);
return ret;
ret |= __should_check_rate(td, DDIR_TRIM);
return ret;
while (td->io_u_in_flight) {
int fio_unused ret;
while (td->io_u_in_flight) {
int fio_unused ret;
- ret = io_u_queued_complete(td, 1, NULL);
+ ret = io_u_queued_complete(td, 1);
/*
* Complete a single io_u for the sync engines.
*/
/*
* Complete a single io_u for the sync engines.
*/
-int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
- uint64_t *bytes)
+int io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
{
struct io_completion_data icd;
{
struct io_completion_data icd;
init_icd(td, &icd, 1);
io_completed(td, &io_u, &icd);
init_icd(td, &icd, 1);
io_completed(td, &io_u, &icd);
- if (bytes) {
- int ddir;
-
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
- bytes[ddir] += icd.bytes_done[ddir];
- }
+ for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ td->bytes_done[ddir] += icd.bytes_done[ddir];
/*
* Called to complete min_events number of io for the async engines.
*/
/*
* Called to complete min_events number of io for the async engines.
*/
-int io_u_queued_complete(struct thread_data *td, int min_evts,
- uint64_t *bytes)
+int io_u_queued_complete(struct thread_data *td, int min_evts)
{
struct io_completion_data icd;
struct timespec *tvp = NULL;
{
struct io_completion_data icd;
struct timespec *tvp = NULL;
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
- if (bytes) {
- int ddir;
-
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
- bytes[ddir] += icd.bytes_done[ddir];
- }
+ for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ td->bytes_done[ddir] += icd.bytes_done[ddir];
extern void put_io_u(struct thread_data *, struct io_u *);
extern void clear_io_u(struct thread_data *, struct io_u *);
extern void requeue_io_u(struct thread_data *, struct io_u **);
extern void put_io_u(struct thread_data *, struct io_u *);
extern void clear_io_u(struct thread_data *, struct io_u *);
extern void requeue_io_u(struct thread_data *, struct io_u **);
-extern int __must_check io_u_sync_complete(struct thread_data *, struct io_u *, uint64_t *);
-extern int __must_check io_u_queued_complete(struct thread_data *, int, uint64_t *);
+extern int __must_check io_u_sync_complete(struct thread_data *, struct io_u *);
+extern int __must_check io_u_queued_complete(struct thread_data *, int);
extern void io_u_queued(struct thread_data *, struct io_u *);
extern void io_u_quiesce(struct thread_data *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_queued(struct thread_data *, struct io_u *);
extern void io_u_quiesce(struct thread_data *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
td->this_io_blocks[ddir] = 0;
td->rate_bytes[ddir] = 0;
td->rate_blocks[ddir] = 0;
td->this_io_blocks[ddir] = 0;
td->rate_bytes[ddir] = 0;
td->rate_blocks[ddir] = 0;
+ td->bytes_done[ddir] = 0;