}
}
+static void hexdump(void *buffer, int len)
+{
+ unsigned char *p = buffer;
+ int i;
+
+ for (i = 0; i < len; i++)
+ printf("%02x", p[i]);
+ printf("\n");
+}
+
static int verify_io_u(struct io_u *io_u)
{
struct verify_header *hdr = (struct verify_header *) io_u->buf;
unsigned char *p = (unsigned char *) io_u->buf;
struct md5_ctx md5_ctx;
+ int ret;
if (hdr->fio_magic != FIO_HDR_MAGIC)
return 1;
p += sizeof(*hdr);
md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
- return memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
+ ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
+ if (ret) {
+ hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
+ hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
+ }
+
+ return ret;
}
/*
return NULL;
}
- if (!td_read(td) && td->verify)
+ if (td->verify)
populate_io_u(td, io_u);
if (td->use_aio) {
return 0;
}
+static void prune_io_piece_log(struct thread_data *td)
+{
+ struct io_piece *ipo;
+
+ while (!list_empty(&td->io_hist_list)) {
+ ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
+
+ list_del(&ipo->list);
+ free(ipo);
+ }
+}
+
+/*
+ * if ipo's overlap, kill old ipo
+ */
+static int ipo_overlap(struct io_piece *old, struct io_piece *new)
+{
+ unsigned long long old_end = old->offset + old->len;
+ unsigned long long new_end = new->offset + new->len;
+
+ if ((new->offset > old->offset && new->offset < old_end) ||
+ (new_end > old->offset && new_end < old_end)) {
+ list_add(&new->list, &old->list);
+ list_del(&old->list);
+ free(old);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * log a succesful write, so we can unwind the log for verify
+ */
+static void log_io_piece(struct thread_data *td, struct io_u *io_u)
+{
+ struct io_piece *ipo = malloc(sizeof(*ipo));
+ struct list_head *entry;
+
+ INIT_LIST_HEAD(&ipo->list);
+ ipo->offset = io_u->offset;
+ ipo->len = io_u->buflen;
+
+ if (td->sequential) {
+ list_add_tail(&ipo->list, &td->io_hist_list);
+ return;
+ }
+
+ /*
+ * for random io, sort the list so verify will run faster
+ */
+ entry = &td->io_hist_list;
+ while ((entry = entry->prev) != &td->io_hist_list) {
+ struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
+
+ if (ipo_overlap(__ipo, ipo))
+ return;
+
+ if (__ipo->offset < ipo->offset)
+ break;
+ }
+
+ list_add(&ipo->list, entry);
+}
+
static void do_sync_verify(struct thread_data *td)
{
struct timeval t;
put_io_u(td, io_u);
}
-/*
- * log a succesful write, so we can unwind the log for verify
- */
-static void log_io_piece(struct thread_data *td, struct io_u *io_u)
-{
- struct io_piece *ipo = malloc(sizeof(*ipo));
- struct list_head *entry;
-
- INIT_LIST_HEAD(&ipo->list);
- ipo->offset = io_u->offset;
- ipo->len = io_u->buflen;
-
- if (td->sequential) {
- list_add_tail(&ipo->list, &td->io_hist_list);
- return;
- }
-
- /*
- * for random io, sort the list so verify will run faster
- */
- entry = &td->io_hist_list;
- while ((entry = entry->prev) != &td->io_hist_list) {
- struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
-
- if (__ipo->offset == ipo->offset &&
- __ipo->len == ipo->len) {
- free(ipo);
- ipo = NULL;
- break;
- } else if (__ipo->offset < ipo->offset)
- break;
- }
-
- if (ipo)
- list_add(&ipo->list, entry);
-}
-
static void do_sync_io(struct thread_data *td)
{
unsigned long msec, usec;
static void clear_io_state(struct thread_data *td)
{
+ if (!td->use_aio)
+ lseek(td->fd, SEEK_SET, 0);
+
td->cur_off = 0;
td->last_kb = 0;
td->stat_io_kb = 0;
memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
clear_io_state(td);
+ prune_io_piece_log(td);
if (!td->use_aio)
do_sync_io(td);