static void terminate_threads(int group_id)
{
+ struct thread_data *td;
int i;
- for (i = 0; i < thread_number; i++) {
- struct thread_data *td = &threads[i];
-
+ for_each_td(td, i) {
if (group_id == TERMINATE_ALL || groupid == td->groupid) {
td->terminate = 1;
td->start_delay = 0;
return 0;
}
-static void fill_random_bytes(struct thread_data *td,
- unsigned char *p, unsigned int len)
-{
- unsigned int todo;
- double r;
-
- while (len) {
- r = os_random_double(&td->verify_state);
-
- /*
- * lrand48_r seems to be broken and only fill the bottom
- * 32-bits, even on 64-bit archs with 64-bit longs
- */
- todo = sizeof(r);
- if (todo > len)
- todo = len;
-
- memcpy(p, &r, todo);
-
- len -= todo;
- p += todo;
- }
-}
-
-static void hexdump(void *buffer, int len)
-{
- unsigned char *p = buffer;
- int i;
-
- for (i = 0; i < len; i++)
- fprintf(f_out, "%02x", p[i]);
- fprintf(f_out, "\n");
-}
-
-static int verify_io_u_crc32(struct verify_header *hdr, struct io_u *io_u)
-{
- unsigned char *p = (unsigned char *) io_u->buf;
- unsigned long c;
-
- p += sizeof(*hdr);
- c = crc32(p, hdr->len - sizeof(*hdr));
-
- if (c != hdr->crc32) {
- log_err("crc32: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
- log_err("crc32: wanted %lx, got %lx\n", hdr->crc32, c);
- return 1;
- }
-
- return 0;
-}
-
-static int verify_io_u_md5(struct verify_header *hdr, struct io_u *io_u)
-{
- unsigned char *p = (unsigned char *) io_u->buf;
- struct md5_ctx md5_ctx;
-
- memset(&md5_ctx, 0, sizeof(md5_ctx));
- p += sizeof(*hdr);
- md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
-
- if (memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash))) {
- log_err("md5: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
- hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
- hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
- return 1;
- }
-
- return 0;
-}
-
-static int verify_io_u(struct io_u *io_u)
-{
- struct verify_header *hdr = (struct verify_header *) io_u->buf;
- int ret;
-
- if (hdr->fio_magic != FIO_HDR_MAGIC)
- return 1;
-
- if (hdr->verify_type == VERIFY_MD5)
- ret = verify_io_u_md5(hdr, io_u);
- else if (hdr->verify_type == VERIFY_CRC32)
- ret = verify_io_u_crc32(hdr, io_u);
- else {
- log_err("Bad verify type %d\n", hdr->verify_type);
- ret = 1;
- }
-
- return ret;
-}
-
-static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
-{
- hdr->crc32 = crc32(p, len);
-}
-
-static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
-{
- struct md5_ctx md5_ctx;
-
- memset(&md5_ctx, 0, sizeof(md5_ctx));
- md5_update(&md5_ctx, p, len);
- memcpy(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
-}
-
/*
* Return the data direction for the next io_u. If the job is a
* mixed read/write workload, check the rwmix cycle and switch if
* Check if it's time to seed a new data direction.
*/
if (elapsed >= td->rwmixcycle) {
- int v;
+ unsigned int v;
long r;
r = os_random_long(&td->rwmix_state);
return DDIR_WRITE;
}
-/*
- * fill body of io_u->buf with random data and add a header with the
- * crc32 or md5 sum of that data.
- */
-static void populate_io_u(struct thread_data *td, struct io_u *io_u)
-{
- unsigned char *p = (unsigned char *) io_u->buf;
- struct verify_header hdr;
-
- hdr.fio_magic = FIO_HDR_MAGIC;
- hdr.len = io_u->buflen;
- p += sizeof(hdr);
- fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
-
- if (td->verify == VERIFY_MD5) {
- fill_md5(&hdr, p, io_u->buflen - sizeof(hdr));
- hdr.verify_type = VERIFY_MD5;
- } else {
- fill_crc32(&hdr, p, io_u->buflen - sizeof(hdr));
- hdr.verify_type = VERIFY_CRC32;
- }
-
- memcpy(io_u->buf, &hdr, sizeof(hdr));
-}
-
static int td_io_prep(struct thread_data *td, struct io_u *io_u)
{
if (td->io_ops->prep && td->io_ops->prep(td, io_u))
f->last_pos += io_u->buflen;
if (td->verify != VERIFY_NONE)
- populate_io_u(td, io_u);
+ populate_verify_io_u(td, io_u);
if (td_io_prep(td, io_u)) {
put_io_u(td, io_u);
td->runstate = runstate;
}
-static int get_next_verify(struct thread_data *td, struct io_u *io_u)
-{
- struct io_piece *ipo;
-
- if (!list_empty(&td->io_hist_list)) {
- ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
-
- list_del(&ipo->list);
-
- io_u->offset = ipo->offset;
- io_u->buflen = ipo->len;
- io_u->ddir = DDIR_READ;
- free(ipo);
- return 0;
- }
-
- return 1;
-}
-
static struct fio_file *get_next_file(struct thread_data *td)
{
- int old_next_file = td->next_file;
+ unsigned int old_next_file = td->next_file;
struct fio_file *f;
do {
}
}
-static int do_io_u_verify(struct thread_data *td, struct io_u **io_u)
-{
- struct io_u *v_io_u = *io_u;
- int ret = 0;
-
- if (v_io_u) {
- ret = verify_io_u(v_io_u);
- put_io_u(td, v_io_u);
- *io_u = NULL;
- }
-
- return ret;
-}
-
/*
* The main verify engine. Runs over the writes we previusly submitted,
* reads the blocks back in, and checks the crc/md5 of the data.
*/
-static void do_verify(struct thread_data *td)
+void do_verify(struct thread_data *td)
{
struct timeval t;
struct io_u *io_u, *v_io_u = NULL;
struct io_completion_data icd;
struct fio_file *f;
- int ret;
+ int ret, i;
+
+ /*
+ * sync io first and invalidate cache, to make sure we really
+ * read from disk.
+ */
+ for_each_file(td, f, i) {
+ td_io_sync(td, f);
+ file_invalidate_cache(td, f);
+ }
td_set_runstate(td, TD_VERIFYING);
struct timeval s, e;
unsigned long usec;
struct fio_file *f;
- int i;
+ int i, ret = 0;
td_set_runstate(td, TD_RUNNING);
while (td->this_io_bytes[td->ddir] < td->io_size) {
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
struct timespec *timeout;
- int ret, min_evts = 0;
+ int min_evts = 0;
struct io_u *io_u;
if (td->terminate)
min_evts = 1;
}
+
ret = td_io_getevents(td, min_evts, td->cur_depth, timeout);
if (ret < 0) {
- td_verror(td, ret);
+ td_verror(td, -ret);
break;
} else if (!ret)
continue;
td_io_sync(td, f);
}
- if (td->cur_depth)
- cleanup_pending_aio(td);
+ if (!ret) {
+ if (td->cur_depth)
+ cleanup_pending_aio(td);
- if (should_fsync(td) && td->end_fsync) {
- td_set_runstate(td, TD_FSYNCING);
- for_each_file(td, f, i)
- td_io_sync(td, f);
+ if (should_fsync(td) && td->end_fsync) {
+ td_set_runstate(td, TD_FSYNCING);
+ for_each_file(td, f, i)
+ td_io_sync(td, f);
+ }
}
}
*/
static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
{
+ struct thread_data *td;
int i, cputhreads;
/*
* reap exited threads (TD_EXITED -> TD_REAPED)
*/
- for (i = 0, cputhreads = 0; i < thread_number; i++) {
- struct thread_data *td = &threads[i];
-
- if (td->io_ops->flags & FIO_CPUIO)
+ cputhreads = 0;
+ for_each_td(td, i) {
+ /*
+ * ->io_ops is NULL for a thread that has closed its
+ * io engine
+ */
+ if (td->io_ops && td->io_ops->flags & FIO_CPUIO)
cputhreads++;
if (td->runstate != TD_EXITED)
nr_started = 0;
m_rate = t_rate = 0;
- for (i = 0; i < thread_number; i++) {
- td = &threads[i];
-
+ for_each_td(td, i) {
print_status_init(td->thread_number - 1);
init_disk_util(td);
/*
* create threads (TD_NOT_CREATED -> TD_CREATED)
*/
- for (i = 0; i < thread_number; i++) {
- td = &threads[i];
-
+ for_each_td(td, i) {
if (td->runstate != TD_NOT_CREATED)
continue;
/*
* start created threads (TD_INITIALIZED -> TD_RUNNING).
*/
- for (i = 0; i < thread_number; i++) {
- td = &threads[i];
-
+ for_each_td(td, i) {
if (td->runstate != TD_INITIALIZED)
continue;