td->cur_depth--;
}
-static int fill_io_u(struct thread_data *td, struct io_u *io_u)
+static void write_iolog_put(struct thread_data *td, struct io_u *io_u)
{
- /*
- * If using an iolog, grab next piece if any available.
- */
- if (td->iolog) {
- struct io_piece *ipo;
+ fprintf(td->iolog_f, "%d,%llu,%u\n", io_u->ddir, io_u->offset, io_u->buflen);
+}
- if (list_empty(&td->io_log_list))
- return 1;
+static int read_iolog_get(struct thread_data *td, struct io_u *io_u)
+{
+ struct io_piece *ipo;
+ if (!list_empty(&td->io_log_list)) {
ipo = list_entry(td->io_log_list.next, struct io_piece, list);
list_del(&ipo->list);
io_u->offset = ipo->offset;
return 0;
}
+ return 1;
+}
+
+static int fill_io_u(struct thread_data *td, struct io_u *io_u)
+{
+ /*
+ * If using an iolog, grab next piece if any available.
+ */
+ if (td->read_iolog)
+ return read_iolog_get(td, io_u);
+
/*
* No log, let the seq/rand engine retrieve the next position.
*/
if (io_u->buflen) {
io_u->ddir = get_rw_ddir(td);
+
+ /*
+ * If using a write iolog, store this entry.
+ */
+ if (td->write_iolog)
+ write_iolog_put(td, io_u);
+
return 0;
}
}
return NULL;
}
- if (!td->iolog && !td->sequential)
+ if (!td->read_iolog && !td->sequential)
mark_random_map(td, io_u);
td->last_pos += io_u->buflen;
list_add(&ipo->list, entry);
}
+static void write_iolog_close(struct thread_data *td)
+{
+ fflush(td->iolog_f);
+ fclose(td->iolog_f);
+ free(td->iolog_buf);
+}
+
static int init_iolog(struct thread_data *td)
{
unsigned long long offset;
FILE *f;
int rw, i, reads, writes;
- if (!td->iolog)
+ if (!td->read_iolog && !td->write_iolog)
return 0;
- f = fopen(td->iolog_file, "r");
+ if (td->read_iolog)
+ f = fopen(td->iolog_file, "r");
+ else
+ f = fopen(td->iolog_file, "w");
+
if (!f) {
perror("fopen iolog");
+ printf("file %s, %d/%d\n", td->iolog_file, td->read_iolog, td->write_iolog);
return 1;
}
+ /*
+ * That's it for writing, setup a log buffer and we're done.
+ */
+ if (td->write_iolog) {
+ td->iolog_f = f;
+ td->iolog_buf = malloc(8192);
+ setvbuf(f, td->iolog_buf, _IOFBF, 8192);
+ return 0;
+ }
+
+ /*
+ * Read in the read iolog and store it, reuse the infrastructure
+ * for doing verifications.
+ */
str = malloc(4096);
reads = writes = i = 0;
while ((p = fgets(str, 4096, f)) != NULL) {
sprintf(foo, "%s", p);
}
+ td->sysfs_root = strdup(foo);
disk_util_add(dev, foo);
}
+static int switch_ioscheduler(struct thread_data *td)
+{
+ char tmp[256], tmp2[128];
+ FILE *f;
+ int ret;
+
+ sprintf(tmp, "%s/queue/scheduler", td->sysfs_root);
+
+ f = fopen(tmp, "r+");
+ if (!f) {
+ td_verror(td, errno);
+ return 1;
+ }
+
+ /*
+ * Set io scheduler.
+ */
+ ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
+ if (ferror(f) || ret != 1) {
+ td_verror(td, errno);
+ fclose(f);
+ return 1;
+ }
+
+ rewind(f);
+
+ /*
+ * Read back and check that the selected scheduler is now the default.
+ */
+ ret = fread(tmp, 1, sizeof(tmp), f);
+ if (ferror(f) || ret < 0) {
+ td_verror(td, errno);
+ fclose(f);
+ return 1;
+ }
+
+ sprintf(tmp2, "[%s]", td->ioscheduler);
+ if (!strstr(tmp, tmp2)) {
+ fprintf(stderr, "fio: io scheduler %s not found\n", td->ioscheduler);
+ td_verror(td, EINVAL);
+ fclose(f);
+ return 1;
+ }
+
+ fclose(f);
+ return 0;
+}
+
static void disk_util_timer_arm(void)
{
itimer.it_value.tv_sec = 0;
if (init_random_state(td))
goto err;
+ if (td->ioscheduler && switch_ioscheduler(td))
+ goto err;
+
td_set_runstate(td, TD_INITIALIZED);
sem_post(&startup_sem);
sem_wait(&td->mutex);
gettimeofday(&td->epoch, NULL);
+ if (td->exec_prerun)
+ system(td->exec_prerun);
+
while (td->loops--) {
getrusage(RUSAGE_SELF, &td->ru_start);
gettimeofday(&td->start, NULL);
finish_log(td, td->slat_log, "slat");
if (td->clat_log)
finish_log(td, td->clat_log, "clat");
+ if (td->write_iolog)
+ write_iolog_close(td);
+ if (td->exec_postrun)
+ system(td->exec_postrun);
if (exitall_on_terminate)
terminate_threads(td->groupid);
}
if (td->mmap)
munmap(td->mmap, td->file_size);
+ if (td->directory)
+ free(td->directory);
+ if (td->iolog_file)
+ free(td->iolog_file);
+ if (td->exec_prerun)
+ free(td->exec_prerun);
+ if (td->exec_postrun)
+ free(td->exec_postrun);
+ if (td->ioscheduler)
+ free(td->ioscheduler);
+ if (td->sysfs_root)
+ free(td->sysfs_root);
cleanup_io(td);
cleanup_io_u(td);
td_set_runstate(td, TD_EXITED);