* Copyright (C) 2005 Jens Axboe <axboe@suse.de>
* Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
*
+ * The license below covers all files distributed with fio unless otherwise
+ * noted in the file itself.
+ *
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
int temp_stall_ts;
char *fio_inst_prefix = _INST_PREFIX;
-#define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync))
-
static volatile int startup_sem;
#define TERMINATE_ALL (-1)
}
}
-/*
- * The ->file_map[] contains a map of blocks we have or have not done io
- * to yet. Used to make sure we cover the entire range in a fair fashion.
- */
-static int random_map_free(struct thread_data *td, struct fio_file *f,
- unsigned long long block)
-{
- unsigned int idx = RAND_MAP_IDX(td, f, block);
- unsigned int bit = RAND_MAP_BIT(td, f, block);
-
- return (f->file_map[idx] & (1UL << bit)) == 0;
-}
-
-/*
- * Return the next free block in the map.
- */
-static int get_next_free_block(struct thread_data *td, struct fio_file *f,
- unsigned long long *b)
-{
- int i;
-
- *b = 0;
- i = 0;
- while ((*b) * td->min_bs < f->file_size) {
- if (f->file_map[i] != -1UL) {
- *b += ffz(f->file_map[i]);
- return 0;
- }
-
- *b += BLOCKS_PER_MAP;
- i++;
- }
-
- return 1;
-}
-
-/*
- * Mark a given offset as used in the map.
- */
-static void mark_random_map(struct thread_data *td, struct fio_file *f,
- struct io_u *io_u)
-{
- unsigned long long block = io_u->offset / (unsigned long long) td->min_bs;
- unsigned int blocks = 0;
-
- while (blocks < (io_u->buflen / td->min_bs)) {
- unsigned int idx, bit;
-
- if (!random_map_free(td, f, block))
- break;
-
- idx = RAND_MAP_IDX(td, f, block);
- bit = RAND_MAP_BIT(td, f, block);
-
- assert(idx < f->num_maps);
-
- f->file_map[idx] |= (1UL << bit);
- block++;
- blocks++;
- }
-
- if ((blocks * td->min_bs) < io_u->buflen)
- io_u->buflen = blocks * td->min_bs;
-}
-
-/*
- * For random io, generate a random new block and see if it's used. Repeat
- * until we find a free one. For sequential io, just return the end of
- * the last io issued.
- */
-static int get_next_offset(struct thread_data *td, struct fio_file *f,
- unsigned long long *offset)
-{
- unsigned long long b, rb;
- long r;
-
- if (!td->sequential) {
- unsigned long long max_blocks = td->io_size / td->min_bs;
- int loops = 50;
-
- do {
- r = os_random_long(&td->random_state);
- b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
- rb = b + (f->file_offset / td->min_bs);
- loops--;
- } while (!random_map_free(td, f, rb) && loops);
-
- if (!loops) {
- if (get_next_free_block(td, f, &b))
- return 1;
- }
- } else
- b = f->last_pos / td->min_bs;
-
- *offset = (b * td->min_bs) + f->file_offset;
- if (*offset > f->file_size)
- return 1;
-
- return 0;
-}
-
-static unsigned int get_next_buflen(struct thread_data *td)
-{
- unsigned int buflen;
- long r;
-
- if (td->min_bs == td->max_bs)
- buflen = td->min_bs;
- else {
- r = os_random_long(&td->bsrange_state);
- buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
- buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
- }
-
- if (buflen > td->io_size - td->this_io_bytes[td->ddir]) {
- /*
- * if using direct/raw io, we may not be able to
- * shrink the size. so just fail it.
- */
- if (td->io_ops->flags & FIO_RAWIO)
- return 0;
-
- buflen = td->io_size - td->this_io_bytes[td->ddir];
- }
-
- return buflen;
-}
-
/*
* Check if we are above the minimum rate given.
*/
return 0;
}
-/*
- * Return the data direction for the next io_u. If the job is a
- * mixed read/write workload, check the rwmix cycle and switch if
- * necessary.
- */
-static int get_rw_ddir(struct thread_data *td)
-{
- if (td_rw(td)) {
- struct timeval now;
- unsigned long elapsed;
-
- gettimeofday(&now, NULL);
- elapsed = mtime_since_now(&td->rwmix_switch);
-
- /*
- * Check if it's time to seed a new data direction.
- */
- if (elapsed >= td->rwmixcycle) {
- unsigned int v;
- long r;
-
- r = os_random_long(&td->rwmix_state);
- v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
- if (v < td->rwmixread)
- td->rwmix_ddir = DDIR_READ;
- else
- td->rwmix_ddir = DDIR_WRITE;
- memcpy(&td->rwmix_switch, &now, sizeof(now));
- }
- return td->rwmix_ddir;
- } else if (td_read(td))
- return DDIR_READ;
- else
- return DDIR_WRITE;
-}
-
-static int td_io_prep(struct thread_data *td, struct io_u *io_u)
-{
- if (td->io_ops->prep && td->io_ops->prep(td, io_u))
- return 1;
-
- return 0;
-}
-
-void put_io_u(struct thread_data *td, struct io_u *io_u)
-{
- io_u->file = NULL;
- list_del(&io_u->list);
- list_add(&io_u->list, &td->io_u_freelist);
- td->cur_depth--;
-}
-
-static int fill_io_u(struct thread_data *td, struct fio_file *f,
- struct io_u *io_u)
-{
- /*
- * If using an iolog, grab next piece if any available.
- */
- if (td->read_iolog)
- return read_iolog_get(td, io_u);
-
- /*
- * No log, let the seq/rand engine retrieve the next position.
- */
- if (!get_next_offset(td, f, &io_u->offset)) {
- io_u->buflen = get_next_buflen(td);
-
- if (io_u->buflen) {
- io_u->ddir = get_rw_ddir(td);
-
- /*
- * If using a write iolog, store this entry.
- */
- if (td->write_iolog)
- write_iolog_put(td, io_u);
-
- io_u->file = f;
- return 0;
- }
- }
-
- return 1;
-}
-
-#define queue_full(td) list_empty(&(td)->io_u_freelist)
-
-struct io_u *__get_io_u(struct thread_data *td)
-{
- struct io_u *io_u = NULL;
-
- if (!queue_full(td)) {
- io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
-
- io_u->error = 0;
- io_u->resid = 0;
- list_del(&io_u->list);
- list_add(&io_u->list, &td->io_u_busylist);
- td->cur_depth++;
- }
-
- return io_u;
-}
-
-/*
- * Return an io_u to be processed. Gets a buflen and offset, sets direction,
- * etc. The returned io_u is fully ready to be prepped and submitted.
- */
-static struct io_u *get_io_u(struct thread_data *td, struct fio_file *f)
-{
- struct io_u *io_u;
-
- io_u = __get_io_u(td);
- if (!io_u)
- return NULL;
-
- if (td->zone_bytes >= td->zone_size) {
- td->zone_bytes = 0;
- f->last_pos += td->zone_skip;
- }
-
- if (fill_io_u(td, f, io_u)) {
- put_io_u(td, io_u);
- return NULL;
- }
-
- if (io_u->buflen + io_u->offset > f->file_size) {
- if (td->io_ops->flags & FIO_RAWIO) {
- put_io_u(td, io_u);
- return NULL;
- }
-
- io_u->buflen = f->file_size - io_u->offset;
- }
-
- if (!io_u->buflen) {
- put_io_u(td, io_u);
- return NULL;
- }
-
- if (!td->read_iolog && !td->sequential)
- mark_random_map(td, f, io_u);
-
- f->last_pos += io_u->buflen;
-
- if (td->verify != VERIFY_NONE)
- populate_verify_io_u(td, io_u);
-
- if (td_io_prep(td, io_u)) {
- put_io_u(td, io_u);
- return NULL;
- }
-
- gettimeofday(&io_u->start_time, NULL);
- return io_u;
-}
-
static inline void td_set_runstate(struct thread_data *td, int runstate)
{
td->runstate = runstate;
return f;
}
-static int td_io_sync(struct thread_data *td, struct fio_file *f)
-{
- if (td->io_ops->sync)
- return td->io_ops->sync(td, f);
-
- return 0;
-}
-
-static int td_io_getevents(struct thread_data *td, int min, int max,
- struct timespec *t)
-{
- return td->io_ops->getevents(td, min, max, t);
-}
-
-static int td_io_queue(struct thread_data *td, struct io_u *io_u)
-{
- gettimeofday(&io_u->issue_time, NULL);
-
- return td->io_ops->queue(td, io_u);
-}
-
-#define iocb_time(iocb) ((unsigned long) (iocb)->data)
-
-static void io_completed(struct thread_data *td, struct io_u *io_u,
- struct io_completion_data *icd)
-{
- struct timeval e;
- unsigned long msec;
-
- gettimeofday(&e, NULL);
-
- if (!io_u->error) {
- unsigned int bytes = io_u->buflen - io_u->resid;
- const int idx = io_u->ddir;
-
- td->io_blocks[idx]++;
- td->io_bytes[idx] += bytes;
- td->zone_bytes += bytes;
- td->this_io_bytes[idx] += bytes;
-
- msec = mtime_since(&io_u->issue_time, &e);
-
- add_clat_sample(td, idx, msec);
- add_bw_sample(td, idx);
-
- if ((td_rw(td) || td_write(td)) && idx == DDIR_WRITE)
- log_io_piece(td, io_u);
-
- icd->bytes_done[idx] += bytes;
- } else
- icd->error = io_u->error;
-}
-
-static void ios_completed(struct thread_data *td,struct io_completion_data *icd)
-{
- struct io_u *io_u;
- int i;
-
- icd->error = 0;
- icd->bytes_done[0] = icd->bytes_done[1] = 0;
-
- for (i = 0; i < icd->nr; i++) {
- io_u = td->io_ops->event(td, i);
-
- io_completed(td, io_u, icd);
- put_io_u(td, io_u);
- }
-}
-
/*
* When job exits, we can cancel the in-flight IO if we are using async
* io. Attempt to do so.
}
}
+/*
+ * Helper to handle the final sync of a file. Works just like the normal
+ * io path, just does everything sync.
+ */
+static int fio_io_sync(struct thread_data *td, struct fio_file *f)
+{
+ struct io_u *io_u = __get_io_u(td);
+ struct io_completion_data icd;
+ int ret;
+
+ if (!io_u)
+ return 1;
+
+ io_u->ddir = DDIR_SYNC;
+ io_u->file = f;
+
+ if (td_io_prep(td, io_u)) {
+ put_io_u(td, io_u);
+ return 1;
+ }
+
+ ret = td_io_queue(td, io_u);
+ if (ret) {
+ td_verror(td, io_u->error);
+ put_io_u(td, io_u);
+ return 1;
+ }
+
+ ret = td_io_getevents(td, 1, td->cur_depth, NULL);
+ if (ret < 0) {
+ td_verror(td, ret);
+ return 1;
+ }
+
+ icd.nr = ret;
+ ios_completed(td, &icd);
+ if (icd.error) {
+ td_verror(td, icd.error);
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* The main verify engine. Runs over the writes we previusly submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
* read from disk.
*/
for_each_file(td, f, i) {
- td_io_sync(td, f);
+ fio_io_sync(td, f);
file_invalidate_cache(td, f);
}
ret = td_io_queue(td, io_u);
if (ret) {
+ td_verror(td, io_u->error);
put_io_u(td, io_u);
- td_verror(td, ret);
break;
}
ret = td_io_queue(td, io_u);
if (ret) {
+ td_verror(td, io_u->error);
put_io_u(td, io_u);
- td_verror(td, ret);
break;
}
min_evts = 1;
}
-
ret = td_io_getevents(td, min_evts, td->cur_depth, timeout);
if (ret < 0) {
- td_verror(td, -ret);
+ td_verror(td, ret);
break;
} else if (!ret)
continue;
if (td->thinktime)
usec_sleep(td, td->thinktime);
-
- if (should_fsync(td) && td->fsync_blocks &&
- (td->io_blocks[DDIR_WRITE] % td->fsync_blocks) == 0)
- td_io_sync(td, f);
}
if (!ret) {
if (should_fsync(td) && td->end_fsync) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i)
- td_io_sync(td, f);
+ fio_io_sync(td, f);
}
}
}
-static int td_io_init(struct thread_data *td)
-{
- if (td->io_ops->init)
- return td->io_ops->init(td);
-
- return 0;
-}
-
static void cleanup_io_u(struct thread_data *td)
{
struct list_head *entry, *n;
FILE *f;
int ret;
+ if (td->io_ops->flags & FIO_CPUIO)
+ return 0;
+
sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
f = fopen(tmp, "r+");
finish_log(td, td->slat_log, "slat");
if (td->clat_log)
finish_log(td, td->clat_log, "clat");
- if (td->write_iolog)
+ if (td->write_iolog_file)
write_iolog_close(td);
if (td->exec_postrun)
system(td->exec_postrun);