static int fio_cpuio_queue(struct thread_data *td, struct io_u fio_unused *io_u)
{
- __usec_sleep(td->cpucycle);
+ __usec_sleep(td->o.cpucycle);
return FIO_Q_COMPLETED;
}
struct fio_file *f;
unsigned int i;
- td->total_file_size = -1;
- td->io_size = td->total_file_size;
+ td->o.size = -1;
+ td->io_size = td->o.size;
td->total_io_size = td->io_size;
for_each_file(td, f, i) {
static int fio_cpuio_init(struct thread_data *td)
{
- if (!td->cpuload) {
+ struct thread_options *o = &td->o;
+
+ if (!o->cpuload) {
td_vmsg(td, EINVAL, "cpu thread needs rate (cpuload=)","cpuio");
return 1;
}
- if (td->cpuload > 100)
- td->cpuload = 100;
+ if (o->cpuload > 100)
+ o->cpuload = 100;
/*
* set thinktime_sleep and thinktime_spin appropriately
*/
- td->thinktime_blocks = 1;
- td->thinktime_spin = 0;
- td->thinktime = (td->cpucycle * (100 - td->cpuload)) / td->cpuload;
+ o->thinktime_blocks = 1;
+ o->thinktime_spin = 0;
+ o->thinktime = (o->cpucycle * (100 - o->cpuload)) / o->cpuload;
- td->nr_files = td->open_files = 1;
+ o->nr_files = o->open_files = 1;
return 0;
}
{
struct libaio_data *ld = td->io_ops->data;
- if (ld->iocbs_nr == (int) td->iodepth)
+ if (ld->iocbs_nr == (int) td->o.iodepth)
return FIO_Q_BUSY;
/*
struct libaio_data *ld = malloc(sizeof(*ld));
memset(ld, 0, sizeof(*ld));
- if (io_queue_init(td->iodepth, &ld->aio_ctx)) {
+ if (io_queue_init(td->o.iodepth, &ld->aio_ctx)) {
td_verror(td, errno, "io_queue_init");
free(ld);
return 1;
}
- ld->aio_events = malloc(td->iodepth * sizeof(struct io_event));
- memset(ld->aio_events, 0, td->iodepth * sizeof(struct io_event));
- ld->iocbs = malloc(td->iodepth * sizeof(struct iocb *));
+ ld->aio_events = malloc(td->o.iodepth * sizeof(struct io_event));
+ memset(ld->aio_events, 0, td->o.iodepth * sizeof(struct io_event));
+ ld->iocbs = malloc(td->o.iodepth * sizeof(struct iocb *));
memset(ld->iocbs, 0, sizeof(struct iocb *));
- ld->io_us = malloc(td->iodepth * sizeof(struct io_u *));
- memset(ld->io_us, 0, td->iodepth * sizeof(struct io_u *));
+ ld->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
+ memset(ld->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
ld->iocbs_nr = 0;
td->io_ops->data = ld;
/*
* not really direct, but should drop the pages from the cache
*/
- if (td->odirect && io_u->ddir != DDIR_SYNC) {
+ if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
size_t len = (io_u->xfer_buflen + page_size - 1) & ~page_mask;
unsigned long long off = real_off & ~page_mask;
else if (td_write(td)) {
flags = PROT_WRITE;
- if (td->verify != VERIFY_NONE)
+ if (td->o.verify != VERIFY_NONE)
flags |= PROT_READ;
} else
flags = PROT_READ;
char *sep;
int ret;
- if (!td->total_file_size) {
+ if (!td->o.size) {
log_err("fio: need size= set\n");
return 1;
}
return 1;
}
- strcpy(buf, td->filename);
+ strcpy(buf, td->o.filename);
sep = strchr(buf, '/');
if (!sep) {
- log_err("fio: bad network host/port <<%s>>\n", td->filename);
+ log_err("fio: bad network host/port <<%s>>\n", td->o.filename);
return 1;
}
if (ret)
return ret;
- td->io_size = td->total_file_size;
+ td->io_size = td->o.size;
td->total_io_size = td->io_size;
for_each_file(td, f, i) {
- f->file_size = td->total_file_size / td->nr_files;
+ f->file_size = td->o.size / td->o.nr_files;
f->real_file_size = f->file_size;
}
struct fio_file *f;
unsigned int i;
- if (!td->total_file_size) {
+ if (!td->o.size) {
log_err("fio: need size= set\n");
return 1;
}
- td->io_size = td->total_file_size;
+ td->io_size = td->o.size;
td->total_io_size = td->io_size;
for_each_file(td, f, i) {
- f->real_file_size = td->total_io_size / td->nr_files;
+ f->real_file_size = td->total_io_size / td->o.nr_files;
f->file_size = f->real_file_size;
}
memset(nd, 0, sizeof(*nd));
- if (td->iodepth != 1) {
- nd->io_us = malloc(td->iodepth * sizeof(struct io_u *));
- memset(nd->io_us, 0, td->iodepth * sizeof(struct io_u *));
+ if (td->o.iodepth != 1) {
+ nd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
+ memset(nd->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
} else
td->io_ops->flags |= FIO_SYNCIO;
struct posixaio_data *pd = malloc(sizeof(*pd));
memset(pd, 0, sizeof(*pd));
- pd->aio_events = malloc(td->iodepth * sizeof(struct io_u *));
- memset(pd->aio_events, 0, td->iodepth * sizeof(struct io_u *));
+ pd->aio_events = malloc(td->o.iodepth * sizeof(struct io_u *));
+ memset(pd->aio_events, 0, td->o.iodepth * sizeof(struct io_u *));
td->io_ops->data = pd;
return 0;
if (!min)
break;
- ret = poll(sd->pfds, td->nr_files, -1);
+ ret = poll(sd->pfds, td->o.nr_files, -1);
if (ret < 0) {
if (!r)
r = -errno;
} else if (!ret)
continue;
- if (pollin_events(sd->pfds, td->nr_files))
+ if (pollin_events(sd->pfds, td->o.nr_files))
break;
} while (1);
sd = malloc(sizeof(*sd));
memset(sd, 0, sizeof(*sd));
- sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
- memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
- sd->events = malloc(td->iodepth * sizeof(struct io_u *));
- memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
- sd->pfds = malloc(sizeof(struct pollfd) * td->nr_files);
- memset(sd->pfds, 0, sizeof(struct pollfd) * td->nr_files);
- sd->fd_flags = malloc(sizeof(int) * td->nr_files);
- memset(sd->fd_flags, 0, sizeof(int) * td->nr_files);
- sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->iodepth);
- memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->iodepth);
+ sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
+ memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
+ sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
+ memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
+ sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
+ memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
+ sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
+ memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
+ sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
+ memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
td->io_ops->data = sd;
/*
* we want to do it, regardless of whether odirect is set or not
*/
- td->override_sync = 1;
+ td->o.override_sync = 1;
return 0;
}
else if (ret < 0)
io_u->error = ret;
- assert(sd->nr_events < td->iodepth);
+ assert(sd->nr_events < td->o.iodepth);
sd->events[sd->nr_events++] = io_u;
if (atom == last)
break;
sd->ring[sd->ahu.user_ring_idx] = NULL;
- if (++sd->ahu.user_ring_idx == td->iodepth)
+ if (++sd->ahu.user_ring_idx == td->o.iodepth)
sd->ahu.user_ring_idx = 0;
fio_syslet_complete_atom(td, atom);
sd = malloc(sizeof(*sd));
memset(sd, 0, sizeof(*sd));
- sd->events = malloc(sizeof(struct io_u *) * td->iodepth);
- memset(sd->events, 0, sizeof(struct io_u *) * td->iodepth);
+ sd->events = malloc(sizeof(struct io_u *) * td->o.iodepth);
+ memset(sd->events, 0, sizeof(struct io_u *) * td->o.iodepth);
/*
* This will handily fail for kernels where syslet isn't available
*/
- if (async_head_init(sd, td->iodepth)) {
+ if (async_head_init(sd, td->o.iodepth)) {
free(sd->events);
free(sd);
return 1;
* if writing, bytes_total will be twice the size. If mixing,
* assume a 50/50 split and thus bytes_total will be 50% larger.
*/
- if (td->verify) {
+ if (td->o.verify) {
if (td_rw(td))
bytes_total = bytes_total * 3 / 2;
else
bytes_total <<= 1;
}
- if (td->zone_size && td->zone_skip)
- bytes_total /= (td->zone_skip / td->zone_size);
+ if (td->o.zone_size && td->o.zone_skip)
+ bytes_total /= (td->o.zone_skip / td->o.zone_size);
if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
double perc;
eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed;
- if (td->timeout && eta_sec > (td->timeout - elapsed))
- eta_sec = td->timeout - elapsed;
+ if (td->o.timeout && eta_sec > (td->o.timeout - elapsed))
+ eta_sec = td->o.timeout - elapsed;
} else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED
|| td->runstate == TD_INITIALIZED) {
int t_eta = 0, r_eta = 0;
* We can only guess - assume it'll run the full timeout
* if given, otherwise assume it'll run at the specified rate.
*/
- if (td->timeout)
- t_eta = td->timeout + td->start_delay - elapsed;
- if (td->rate) {
- r_eta = (bytes_total / 1024) / td->rate;
- r_eta += td->start_delay - elapsed;
+ if (td->o.timeout)
+ t_eta = td->o.timeout + td->o.start_delay - elapsed;
+ if (td->o.rate) {
+ r_eta = (bytes_total / 1024) / td->o.rate;
+ r_eta += td->o.start_delay - elapsed;
}
if (r_eta && t_eta)
nr_pending = nr_running = t_rate = m_rate = 0;
bw_avg_time = ULONG_MAX;
for_each_td(td, i) {
- if (td->bw_avg_time < bw_avg_time)
- bw_avg_time = td->bw_avg_time;
+ if (td->o.bw_avg_time < bw_avg_time)
+ bw_avg_time = td->o.bw_avg_time;
if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
|| td->runstate == TD_FSYNCING) {
nr_running++;
- t_rate += td->rate;
- m_rate += td->ratemin;
+ t_rate += td->o.rate;
+ m_rate += td->o.ratemin;
} else if (td->runstate < TD_RUNNING)
nr_pending++;
goto err;
}
- b = malloc(td->max_bs[DDIR_WRITE]);
- memset(b, 0, td->max_bs[DDIR_WRITE]);
+ b = malloc(td->o.max_bs[DDIR_WRITE]);
+ memset(b, 0, td->o.max_bs[DDIR_WRITE]);
left = f->file_size;
while (left && !td->terminate) {
- bs = td->max_bs[DDIR_WRITE];
+ bs = td->o.max_bs[DDIR_WRITE];
if (bs > left)
bs = left;
if (td->terminate)
unlink(f->file_name);
- else if (td->create_fsync)
+ else if (td->o.create_fsync)
fsync(f->fd);
free(b);
unsigned long long ret;
long r;
- if (upper > td->file_size_high)
- upper = td->file_size_high;
- else if (upper < td->file_size_low)
+ if (upper > td->o.file_size_high)
+ upper = td->o.file_size_high;
+ else if (upper < td->o.file_size_low)
return 0;
else if (!upper)
return 0;
r = os_random_long(&td->file_size_state);
- ret = td->file_size_low + (unsigned long long) ((double) upper * (r / (RAND_MAX + 1.0)));
- ret -= (ret % td->rw_min_bs);
+ ret = td->o.file_size_low + (unsigned long long) ((double) upper * (r / (RAND_MAX + 1.0)));
+ ret -= (ret % td->o.rw_min_bs);
if (ret > upper)
ret = upper;
return ret;
unsigned int i, new_files;
new_files = 0;
- total_file_size = td->total_file_size;
+ total_file_size = td->o.size;
for_each_file(td, f, i) {
unsigned long long s;
- f->file_offset = td->start_offset;
+ f->file_offset = td->o.start_offset;
if (f->filetype != FIO_TYPE_FILE)
continue;
/*
* unless specifically asked for overwrite, let normal io extend it
*/
- can_extend = !td->overwrite && !(td->io_ops->flags & FIO_NOEXTEND);
+ can_extend = !td->o.overwrite && !(td->io_ops->flags & FIO_NOEXTEND);
if (can_extend)
return 0;
continue;
}
- if (!td->file_size_low)
+ if (!td->o.file_size_low)
f->file_size = total_file_size / new_files;
else {
/*
* If we don't have enough space left for a file
* of the minimum size, bail.
*/
- if (local_file_size < td->file_size_low) {
+ if (local_file_size < td->o.file_size_low) {
log_info("fio: limited to %d files\n", i);
- new_files -= (td->nr_files - i);
- td->nr_files = i;
+ new_files -= (td->o.nr_files - i);
+ td->o.nr_files = i;
break;
}
create_size += f->file_size;
file_there = !file_ok(td, f);
- if (file_there && td_write(td) && !td->overwrite) {
+ if (file_there && td_write(td) && !td->o.overwrite) {
unlink(f->file_name);
file_there = 0;
}
if (!need_create)
return 0;
- if (!td->total_file_size && !total_file_size) {
+ if (!td->o.size && !total_file_size) {
log_err("Need size for create\n");
td_verror(td, EINVAL, "file_size");
return 1;
temp_stall_ts = 1;
log_info("%s: Laying out IO file(s) (%u files / %LuMiB)\n",
- td->name, new_files, create_size >> 20);
+ td->o.name, new_files, create_size >> 20);
err = 0;
for_each_file(td, f, i) {
*/
f->flags &= ~FIO_FILE_UNLINK;
if (file_ok(td, f)) {
- if (td->unlink)
+ if (td->o.unlink)
f->flags |= FIO_FILE_UNLINK;
err = create_file(td, f);
{
struct stat st;
- if (td->overwrite) {
+ if (td->o.overwrite) {
if (fstat(f->fd, &st) == -1) {
td_verror(td, errno, "fstat");
return 1;
return ret;
if (f->file_offset > f->real_file_size) {
- log_err("%s: offset extends end (%Lu > %Lu)\n", td->name, f->file_offset, f->real_file_size);
+ log_err("%s: offset extends end (%Lu > %Lu)\n", td->o.name, f->file_offset, f->real_file_size);
return 1;
}
{
int ret = 0;
- if (td->odirect)
+ if (td->o.odirect)
return 0;
/*
{
int flags = 0;
- if (td->odirect)
+ if (td->o.odirect)
flags |= OS_O_DIRECT;
- if (td->sync_io)
+ if (td->o.sync_io)
flags |= O_SYNC;
if (td_write(td) || td_rw(td)) {
int __e = errno;
td_verror(td, __e, "open");
- if (__e == EINVAL && td->odirect)
+ if (__e == EINVAL && td->o.odirect)
log_err("fio: destination does not support O_DIRECT\n");
if (__e == EMFILE)
- log_err("fio: try reducing/setting openfiles (failed at %u of %u)\n", td->nr_open_files, td->nr_files);
+ log_err("fio: try reducing/setting openfiles (failed at %u of %u)\n", td->nr_open_files, td->o.nr_files);
return 1;
}
if (get_file_size(td, f))
goto err;
- if (td->invalidate_cache && file_invalidate_cache(td, f))
+ if (td->o.invalidate_cache && file_invalidate_cache(td, f))
goto err;
if (!td_random(td)) {
if (err)
break;
- if (td->open_files == td->nr_open_files)
+ if (td->o.open_files == td->nr_open_files)
break;
}
/*
* Recalculate the total file size now that files are set up.
*/
- td->total_file_size = 0;
+ td->o.size = 0;
for_each_file(td, f, i)
- td->total_file_size += f->file_size;
+ td->o.size += f->file_size;
- td->io_size = td->total_file_size;
+ td->io_size = td->o.size;
if (td->io_size == 0) {
- log_err("%s: no io blocks\n", td->name);
+ log_err("%s: no io blocks\n", td->o.name);
td_verror(td, EINVAL, "total_file_size");
return 1;
}
- if (!td->zone_size)
- td->zone_size = td->io_size;
+ if (!td->o.zone_size)
+ td->o.zone_size = td->io_size;
- td->total_io_size = td->io_size * td->loops;
+ td->total_io_size = td->io_size * td->o.loops;
for_each_file(td, f, i)
td_io_close_file(td, f);
free(f->file_map);
}
- td->filename = NULL;
+ td->o.filename = NULL;
td->files = NULL;
- td->nr_files = 0;
+ td->o.nr_files = 0;
}
static void get_file_type(struct fio_file *f)
if (--f->references)
return;
- if (should_fsync(td) && td->fsync_on_close)
+ if (should_fsync(td) && td->o.fsync_on_close)
fsync(f->fd);
if (td->io_ops->close_file)
if (S_ISREG(sb.st_mode)) {
add_file(td, full_path);
- td->nr_files++;
+ td->o.nr_files++;
continue;
}
if (td->runstate < TD_RUNNING)
kill(td->pid, SIGQUIT);
td->terminate = 1;
- td->start_delay = 0;
+ td->o.start_delay = 0;
}
}
}
/*
* No minimum rate set, always ok
*/
- if (!td->ratemin && !td->rate_iops_min)
+ if (!td->o.ratemin && !td->o.rate_iops_min)
return 0;
/*
*/
if (td->rate_bytes || td->rate_blocks) {
spent = mtime_since(&td->lastrate, now);
- if (spent < td->ratecycle)
+ if (spent < td->o.ratecycle)
return 0;
- if (td->rate) {
+ if (td->o.rate) {
/*
* check bandwidth specified rate
*/
if (bytes < td->rate_bytes) {
- log_err("%s: min rate %u not met\n", td->name, td->ratemin);
+ log_err("%s: min rate %u not met\n", td->o.name, td->o.ratemin);
return 1;
} else {
rate = (bytes - td->rate_bytes) / spent;
- if (rate < td->ratemin || bytes < td->rate_bytes) {
- log_err("%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate);
+ if (rate < td->o.ratemin || bytes < td->rate_bytes) {
+ log_err("%s: min rate %u not met, got %luKiB/sec\n", td->o.name, td->o.ratemin, rate);
return 1;
}
}
/*
* checks iops specified rate
*/
- if (iops < td->rate_iops) {
- log_err("%s: min iops rate %u not met\n", td->name, td->rate_iops);
+ if (iops < td->o.rate_iops) {
+ log_err("%s: min iops rate %u not met\n", td->o.name, td->o.rate_iops);
return 1;
} else {
rate = (iops - td->rate_blocks) / spent;
- if (rate < td->rate_iops_min || iops < td->rate_blocks) {
- log_err("%s: min iops rate %u not met, got %lu\n", td->name, td->rate_iops_min, rate);
+ if (rate < td->o.rate_iops_min || iops < td->rate_blocks) {
+ log_err("%s: min iops rate %u not met, got %lu\n", td->o.name, td->o.rate_iops_min, rate);
}
}
}
static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
{
- if (!td->timeout)
+ if (!td->o.timeout)
return 0;
- if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
+ if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000)
return 1;
return 0;
if (queue_full(td) || ret == FIO_Q_BUSY) {
min_events = 1;
- if (td->cur_depth > td->iodepth_low)
- min_events = td->cur_depth - td->iodepth_low;
+ if (td->cur_depth > td->o.iodepth_low)
+ min_events = td->cur_depth - td->o.iodepth_low;
}
/*
if (queue_full(td) || ret == FIO_Q_BUSY) {
min_evts = 1;
- if (td->cur_depth > td->iodepth_low)
- min_evts = td->cur_depth - td->iodepth_low;
+ if (td->cur_depth > td->o.iodepth_low)
+ min_evts = td->cur_depth - td->o.iodepth_low;
}
fio_gettime(&comp_time, NULL);
break;
}
- if (td->thinktime) {
+ if (td->o.thinktime) {
unsigned long long b;
b = td->io_blocks[0] + td->io_blocks[1];
- if (!(b % td->thinktime_blocks)) {
+ if (!(b % td->o.thinktime_blocks)) {
int left;
- if (td->thinktime_spin)
- __usec_sleep(td->thinktime_spin);
+ if (td->o.thinktime_spin)
+ __usec_sleep(td->o.thinktime_spin);
- left = td->thinktime - td->thinktime_spin;
+ left = td->o.thinktime - td->o.thinktime_spin;
if (left)
usec_sleep(td, left);
}
if (i)
ret = io_u_queued_complete(td, i);
- if (should_fsync(td) && td->end_fsync) {
+ if (should_fsync(td) && td->o.end_fsync) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i) {
if (td->io_ops->flags & FIO_SYNCIO)
max_units = 1;
else
- max_units = td->iodepth;
+ max_units = td->o.iodepth;
- max_bs = max(td->max_bs[DDIR_READ], td->max_bs[DDIR_WRITE]);
+ max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]);
td->orig_buffer_size = max_bs * max_units;
- if (td->mem_type == MEM_SHMHUGE || td->mem_type == MEM_MMAPHUGE)
- td->orig_buffer_size = (td->orig_buffer_size + td->hugepage_size - 1) & ~(td->hugepage_size - 1);
+ if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE)
+ td->orig_buffer_size = (td->orig_buffer_size + td->o.hugepage_size - 1) & ~(td->o.hugepage_size - 1);
else
td->orig_buffer_size += page_mask;
/*
* Set io scheduler.
*/
- ret = fwrite(td->ioscheduler, strlen(td->ioscheduler), 1, f);
+ ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f);
if (ferror(f) || ret != 1) {
td_verror(td, errno, "fwrite");
fclose(f);
return 1;
}
- sprintf(tmp2, "[%s]", td->ioscheduler);
+ sprintf(tmp2, "[%s]", td->o.ioscheduler);
if (!strstr(tmp, tmp2)) {
- log_err("fio: io scheduler %s not found\n", td->ioscheduler);
+ log_err("fio: io scheduler %s not found\n", td->o.ioscheduler);
td_verror(td, EINVAL, "iosched_switch");
fclose(f);
return 1;
struct thread_data *td = data;
int clear_state;
- if (!td->use_thread)
+ if (!td->o.use_thread)
setsid();
td->pid = getpid();
}
}
- if (nice(td->nice) == -1) {
+ if (nice(td->o.nice) == -1) {
td_verror(td, errno, "nice");
goto err_sem;
}
- if (td->ioscheduler && switch_ioscheduler(td))
+ if (td->o.ioscheduler && switch_ioscheduler(td))
goto err_sem;
td_set_runstate(td, TD_INITIALIZED);
*/
fio_sem_remove(td->mutex);
- if (!td->create_serialize && setup_files(td))
+ if (!td->o.create_serialize && setup_files(td))
goto err;
if (td_io_init(td))
if (open_files(td))
goto err;
- if (td->exec_prerun) {
- if (system(td->exec_prerun) < 0)
+ if (td->o.exec_prerun) {
+ if (system(td->o.exec_prerun) < 0)
goto err;
}
runtime[0] = runtime[1] = 0;
clear_state = 0;
- while (td->loops--) {
+ while (td->o.loops--) {
fio_gettime(&td->start, NULL);
memcpy(&td->ts.stat_sample_time, &td->start, sizeof(td->start));
- if (td->ratemin)
+ if (td->o.ratemin)
memcpy(&td->lastrate, &td->ts.stat_sample_time, sizeof(td->lastrate));
if (clear_state && clear_io_state(td))
if (td->error || td->terminate)
break;
- if (td->verify == VERIFY_NONE)
+ if (td->o.verify == VERIFY_NONE)
continue;
if (clear_io_state(td))
finish_log(td, td->ts.slat_log, "slat");
if (td->ts.clat_log)
finish_log(td, td->ts.clat_log, "clat");
- if (td->write_iolog_file)
+ if (td->o.write_iolog_file)
write_iolog_close(td);
- if (td->exec_postrun) {
- if (system(td->exec_postrun) < 0)
- log_err("fio: postrun %s failed\n", td->exec_postrun);
+ if (td->o.exec_postrun) {
+ if (system(td->o.exec_postrun) < 0)
+ log_err("fio: postrun %s failed\n", td->o.exec_postrun);
}
if (exitall_on_terminate)
if (!td->pid || td->runstate == TD_REAPED)
continue;
- if (td->use_thread) {
+ if (td->o.use_thread) {
if (td->runstate == TD_EXITED) {
td_set_runstate(td, TD_REAPED);
goto reaped;
*/
continue;
reaped:
- if (td->use_thread) {
+ if (td->o.use_thread) {
long ret;
if (pthread_join(td->thread, (void *) &ret))
}
(*nr_running)--;
- (*m_rate) -= td->ratemin;
- (*t_rate) -= td->rate;
+ (*m_rate) -= td->o.ratemin;
+ (*t_rate) -= td->o.rate;
if (td->error)
exit_value++;
for_each_td(td, i) {
print_status_init(td->thread_number - 1);
- if (!td->create_serialize) {
+ if (!td->o.create_serialize) {
init_disk_util(td);
continue;
}
continue;
}
- if (td->start_delay) {
+ if (td->o.start_delay) {
spent = mtime_since_genesis();
- if (td->start_delay * 1000 > spent)
+ if (td->o.start_delay * 1000 > spent)
continue;
}
- if (td->stonewall && (nr_started || nr_running))
+ if (td->o.stonewall && (nr_started || nr_running))
break;
/*
map[this_jobs++] = td;
nr_started++;
- if (td->use_thread) {
+ if (td->o.use_thread) {
if (pthread_create(&td->thread, NULL, thread_main, td)) {
perror("thread_create");
nr_started--;
td_set_runstate(td, TD_RUNNING);
nr_running++;
nr_started--;
- m_rate += td->ratemin;
- t_rate += td->rate;
+ m_rate += td->o.ratemin;
+ t_rate += td->o.rate;
todo--;
fio_sem_up(td->mutex);
}
unsigned long total_run_time;
};
-/*
- * This describes a single thread/process executing a fio job.
- */
-struct thread_data {
+struct thread_options {
int pad;
char *description;
char *name;
char *directory;
char *filename;
+ char *opendir;
char *ioengine;
- char verror[128];
- pthread_t thread;
- int thread_number;
- int groupid;
- struct thread_stat ts;
- struct fio_file *files;
- unsigned int files_index;
- unsigned int nr_files;
- unsigned int nr_open_files;
- unsigned int nr_normal_files;
- union {
- unsigned int next_file;
- os_random_state_t next_file_state;
- };
- int error;
- pid_t pid;
- char *orig_buffer;
- size_t orig_buffer_size;
- volatile int terminate;
- volatile int runstate;
enum td_ddir td_ddir;
- unsigned int ioprio;
- unsigned int last_was_sync;
+ unsigned int iodepth;
+ unsigned int iodepth_low;
+ unsigned int iodepth_batch;
+
+ unsigned long long size;
+ unsigned long long file_size_low;
+ unsigned long long file_size_high;
+ unsigned long long start_offset;
+
+ unsigned int bs[2];
+ unsigned int min_bs[2];
+ unsigned int max_bs[2];
+
+ unsigned int nr_files;
+ unsigned int open_files;
unsigned int odirect;
unsigned int invalidate_cache;
unsigned int bs_unaligned;
unsigned int fsync_on_close;
- unsigned int bs[2];
- unsigned int min_bs[2];
- unsigned int max_bs[2];
unsigned int hugepage_size;
unsigned int rw_min_bs;
unsigned int thinktime;
unsigned long long zone_size;
unsigned long long zone_skip;
enum fio_memtype mem_type;
- char *mmapfile;
- int mmapfd;
+
unsigned int stonewall;
unsigned int numjobs;
- unsigned int iodepth;
- unsigned int iodepth_low;
- unsigned int iodepth_batch;
os_cpu_mask_t cpumask;
unsigned int iolog;
unsigned int read_iolog;
unsigned int nice;
unsigned int file_service_type;
unsigned int group_reporting;
- unsigned int open_files;
- char *opendir;
char *read_iolog_file;
char *write_iolog_file;
+
+ /*
+ * Pre-run and post-run shell
+ */
+ char *exec_prerun;
+ char *exec_postrun;
+
+ unsigned int rate;
+ unsigned int ratemin;
+ unsigned int ratecycle;
+ unsigned int rate_iops;
+ unsigned int rate_iops_min;
+
+ char *ioscheduler;
+
+ /*
+ * CPU "io" cycle burner
+ */
+ unsigned int cpuload;
+ unsigned int cpucycle;
+};
+
+/*
+ * This describes a single thread/process executing a fio job.
+ */
+struct thread_data {
+ struct thread_options o;
+ char verror[128];
+ pthread_t thread;
+ int thread_number;
+ int groupid;
+ struct thread_stat ts;
+ struct fio_file *files;
+ unsigned int files_index;
+ unsigned int nr_open_files;
+ unsigned int nr_normal_files;
+ union {
+ unsigned int next_file;
+ os_random_state_t next_file_state;
+ };
+ int error;
+ pid_t pid;
+ char *orig_buffer;
+ size_t orig_buffer_size;
+ volatile int terminate;
+ volatile int runstate;
+ unsigned int ioprio;
+ unsigned int last_was_sync;
+
+ char *mmapfile;
+ int mmapfd;
+
void *iolog_buf;
FILE *iolog_f;
char *sysfs_root;
- char *ioscheduler;
os_random_state_t bsrange_state;
os_random_state_t verify_state;
/*
* Rate state
*/
- unsigned int rate;
- unsigned int ratemin;
- unsigned int ratecycle;
- unsigned int rate_iops;
- unsigned int rate_iops_min;
unsigned long rate_usec_cycle;
long rate_pending_usleep;
unsigned long rate_bytes;
struct timeval lastrate;
unsigned long long io_size;
- unsigned long long total_file_size;
- unsigned long long start_offset;
unsigned long long total_io_size;
unsigned long io_issues[2];
*/
os_random_state_t random_state;
- /*
- * CPU "io" cycle burner
- */
- unsigned int cpuload;
- unsigned int cpucycle;
-
struct timeval start; /* start of this loop */
struct timeval epoch; /* time job was started */
struct timeval rwmix_switch;
enum fio_ddir rwmix_ddir;
- /*
- * Pre-run and post-run shell
- */
- char *exec_prerun;
- char *exec_postrun;
-
/*
* IO historic logs
*/
* For generating file sizes
*/
os_random_state_t file_size_state;
- unsigned long long file_size_low;
- unsigned long long file_size_high;
};
/*
extern struct thread_data *threads;
-#define td_read(td) ((td)->td_ddir & TD_DDIR_READ)
-#define td_write(td) ((td)->td_ddir & TD_DDIR_WRITE)
-#define td_rw(td) (((td)->td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
-#define td_random(td) ((td)->td_ddir & TD_DDIR_RAND)
+#define td_read(td) ((td)->o.td_ddir & TD_DDIR_READ)
+#define td_write(td) ((td)->o.td_ddir & TD_DDIR_WRITE)
+#define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
+#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
#define BLOCKS_PER_MAP (8 * sizeof(long))
-#define TO_MAP_BLOCK(td, f, b) ((b) - ((f)->file_offset / (td)->rw_min_bs))
+#define TO_MAP_BLOCK(td, f, b) ((b) - ((f)->file_offset / (td)->o.rw_min_bs))
#define RAND_MAP_IDX(td, f, b) (TO_MAP_BLOCK(td, f, b) / BLOCKS_PER_MAP)
#define RAND_MAP_BIT(td, f, b) (TO_MAP_BLOCK(td, f, b) & (BLOCKS_PER_MAP - 1))
{
if (td->last_was_sync)
return 0;
- if (td->odirect)
+ if (td->o.odirect)
return 0;
- if (td_write(td) || td_rw(td) || td->override_sync)
+ if (td_write(td) || td_rw(td) || td->o.override_sync)
return 1;
return 0;
#define for_each_td(td, i) \
for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
#define for_each_file(td, f, i) \
- for ((i) = 0, (f) = &(td)->files[0]; (i) < (td)->nr_files; (i)++, (f)++)
+ for ((i) = 0, (f) = &(td)->files[0]; (i) < (td)->o.nr_files; (i)++, (f)++)
#define fio_assert(td, cond) do { \
if (!(cond)) { \
unsigned long long rate;
unsigned int bs;
- if (!td->rate && !td->rate_iops)
+ if (!td->o.rate && !td->o.rate_iops)
return 0;
if (td_rw(td))
- bs = td->rw_min_bs;
+ bs = td->o.rw_min_bs;
else if (td_read(td))
- bs = td->min_bs[DDIR_READ];
+ bs = td->o.min_bs[DDIR_READ];
else
- bs = td->min_bs[DDIR_WRITE];
+ bs = td->o.min_bs[DDIR_WRITE];
- if (td->rate) {
- rate = td->rate;
+ if (td->o.rate) {
+ rate = td->o.rate;
nr_reads_per_msec = (rate * 1024 * 1000LL) / bs;
} else
- nr_reads_per_msec = td->rate_iops * 1000UL;
+ nr_reads_per_msec = td->o.rate_iops * 1000UL;
if (!nr_reads_per_msec) {
log_err("rate lower than supported\n");
*/
static int fixup_options(struct thread_data *td)
{
- if (!td->rwmixread && td->rwmixwrite)
- td->rwmixread = 100 - td->rwmixwrite;
+ struct thread_options *o = &td->o;
- if (td->write_iolog_file && td->read_iolog_file) {
+ if (!o->rwmixread && o->rwmixwrite)
+ o->rwmixread = 100 - o->rwmixwrite;
+
+ if (o->write_iolog_file && o->read_iolog_file) {
log_err("fio: read iolog overrides write_iolog\n");
- free(td->write_iolog_file);
- td->write_iolog_file = NULL;
+ free(o->write_iolog_file);
+ o->write_iolog_file = NULL;
}
if (td->io_ops->flags & FIO_SYNCIO)
- td->iodepth = 1;
+ o->iodepth = 1;
else {
- if (!td->iodepth)
- td->iodepth = td->open_files;
+ if (!o->iodepth)
+ o->iodepth = o->open_files;
}
/*
* only really works for sequential io for now, and with 1 file
*/
- if (td->zone_size && td_random(td) && td->open_files == 1)
- td->zone_size = 0;
+ if (o->zone_size && td_random(td) && o->open_files == 1)
+ o->zone_size = 0;
/*
* Reads can do overwrites, we always need to pre-create the file
*/
if (td_read(td) || td_rw(td))
- td->overwrite = 1;
+ o->overwrite = 1;
- if (!td->min_bs[DDIR_READ])
- td->min_bs[DDIR_READ]= td->bs[DDIR_READ];
- if (!td->max_bs[DDIR_READ])
- td->max_bs[DDIR_READ] = td->bs[DDIR_READ];
- if (!td->min_bs[DDIR_WRITE])
- td->min_bs[DDIR_WRITE]= td->bs[DDIR_WRITE];
- if (!td->max_bs[DDIR_WRITE])
- td->max_bs[DDIR_WRITE] = td->bs[DDIR_WRITE];
+ if (!o->min_bs[DDIR_READ])
+ o->min_bs[DDIR_READ]= o->bs[DDIR_READ];
+ if (!o->max_bs[DDIR_READ])
+ o->max_bs[DDIR_READ] = o->bs[DDIR_READ];
+ if (!o->min_bs[DDIR_WRITE])
+ o->min_bs[DDIR_WRITE]= o->bs[DDIR_WRITE];
+ if (!o->max_bs[DDIR_WRITE])
+ o->max_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
- td->rw_min_bs = min(td->min_bs[DDIR_READ], td->min_bs[DDIR_WRITE]);
+ o->rw_min_bs = min(o->min_bs[DDIR_READ], o->min_bs[DDIR_WRITE]);
- if (!td->file_size_high)
- td->file_size_high = td->file_size_low;
+ if (!o->file_size_high)
+ o->file_size_high = o->file_size_low;
if (td_read(td) && !td_rw(td))
- td->verify = 0;
+ o->verify = 0;
- if (td->norandommap && td->verify != VERIFY_NONE) {
+ if (o->norandommap && o->verify != VERIFY_NONE) {
log_err("fio: norandommap given, verify disabled\n");
- td->verify = VERIFY_NONE;
+ o->verify = VERIFY_NONE;
}
- if (td->bs_unaligned && (td->odirect || td->io_ops->flags & FIO_RAWIO))
+ if (o->bs_unaligned && (o->odirect || td->io_ops->flags & FIO_RAWIO))
log_err("fio: bs_unaligned may not work with raw io\n");
/*
* thinktime_spin must be less than thinktime
*/
- if (td->thinktime_spin > td->thinktime)
- td->thinktime_spin = td->thinktime;
+ if (o->thinktime_spin > o->thinktime)
+ o->thinktime_spin = o->thinktime;
/*
* The low water mark cannot be bigger than the iodepth
*/
- if (td->iodepth_low > td->iodepth || !td->iodepth_low) {
+ if (o->iodepth_low > o->iodepth || !o->iodepth_low) {
/*
* syslet work around - if the workload is sequential,
* we want to let the queue drain all the way down to
* avoid seeking between async threads
*/
if (!strcmp(td->io_ops->name, "syslet-rw") && !td_random(td))
- td->iodepth_low = 1;
+ o->iodepth_low = 1;
else
- td->iodepth_low = td->iodepth;
+ o->iodepth_low = o->iodepth;
}
/*
* If batch number isn't set, default to the same as iodepth
*/
- if (td->iodepth_batch > td->iodepth || !td->iodepth_batch)
- td->iodepth_batch = td->iodepth;
+ if (o->iodepth_batch > o->iodepth || !o->iodepth_batch)
+ o->iodepth_batch = o->iodepth;
- if (td->nr_files > td->files_index)
- td->nr_files = td->files_index;
+ if (o->nr_files > td->files_index)
+ o->nr_files = td->files_index;
- if (td->open_files > td->nr_files || !td->open_files)
- td->open_files = td->nr_files;
+ if (o->open_files > o->nr_files || !o->open_files)
+ o->open_files = o->nr_files;
- if ((td->rate && td->rate_iops) || (td->ratemin && td->rate_iops_min)) {
+ if ((o->rate && o->rate_iops) || (o->ratemin && o->rate_iops_min)) {
log_err("fio: rate and rate_iops are mutually exclusive\n");
return 1;
}
- if ((td->rate < td->ratemin) || (td->rate_iops < td->rate_iops_min)) {
+ if ((o->rate < o->ratemin) || (o->rate_iops < o->rate_iops_min)) {
log_err("fio: minimum rate exceeds rate\n");
return 1;
}
os_random_seed(seeds[1], &td->verify_state);
os_random_seed(seeds[2], &td->rwmix_state);
- if (td->file_service_type == FIO_FSERVICE_RANDOM)
+ if (td->o.file_service_type == FIO_FSERVICE_RANDOM)
os_random_seed(seeds[3], &td->next_file_state);
os_random_seed(seeds[5], &td->file_size_state);
if (!td_random(td))
return 0;
- if (td->rand_repeatable)
+ if (td->o.rand_repeatable)
seeds[4] = FIO_RANDSEED * td->thread_number;
- if (!td->norandommap) {
+ if (!td->o.norandommap) {
for_each_file(td, f, i) {
- blocks = (f->real_file_size + td->rw_min_bs - 1) / td->rw_min_bs;
+ blocks = (f->real_file_size + td->o.rw_min_bs - 1) / td->o.rw_min_bs;
num_maps = (blocks + BLOCKS_PER_MAP-1)/ BLOCKS_PER_MAP;
f->file_map = malloc(num_maps * sizeof(long));
if (!f->file_map) {
if (td == &def_thread)
return 0;
- engine = get_engine_name(td->ioengine);
+ engine = get_engine_name(td->o.ioengine);
td->io_ops = load_ioengine(td, engine);
if (!td->io_ops) {
log_err("fio: failed to load engine %s\n", engine);
goto err;
}
- if (td->use_thread)
+ if (td->o.use_thread)
nr_thread++;
else
nr_process++;
- if (td->odirect)
+ if (td->o.odirect)
td->io_ops->flags |= FIO_RAWIO;
file_alloced = 0;
- if (!td->filename && !td->files_index) {
+ if (!td->o.filename && !td->files_index) {
file_alloced = 1;
- if (td->nr_files == 1 && exists_and_not_file(jobname))
+ if (td->o.nr_files == 1 && exists_and_not_file(jobname))
add_file(td, jobname);
else {
- for (i = 0; i < td->nr_files; i++) {
+ for (i = 0; i < td->o.nr_files; i++) {
sprintf(fname, "%s.%d.%d", jobname, td->thread_number, i);
add_file(td, fname);
}
goto err;
for_each_file(td, f, i) {
- if (td->directory && f->filetype == FIO_TYPE_FILE) {
- sprintf(fname, "%s/%s", td->directory, f->file_name);
+ if (td->o.directory && f->filetype == FIO_TYPE_FILE) {
+ sprintf(fname, "%s/%s", td->o.directory, f->file_name);
f->file_name = strdup(fname);
}
}
td->ts.slat_stat[0].min_val = td->ts.slat_stat[1].min_val = ULONG_MAX;
td->ts.bw_stat[0].min_val = td->ts.bw_stat[1].min_val = ULONG_MAX;
- if ((td->stonewall || td->numjobs > 1) && prev_group_jobs) {
+ if ((td->o.stonewall || td->o.numjobs > 1) && prev_group_jobs) {
prev_group_jobs = 0;
groupid++;
}
if (setup_rate(td))
goto err;
- if (td->write_lat_log) {
+ if (td->o.write_lat_log) {
setup_log(&td->ts.slat_log);
setup_log(&td->ts.clat_log);
}
- if (td->write_bw_log)
+ if (td->o.write_bw_log)
setup_log(&td->ts.bw_log);
- if (!td->name)
- td->name = strdup(jobname);
+ if (!td->o.name)
+ td->o.name = strdup(jobname);
if (!terse_output) {
if (!job_add_num) {
if (!strcmp(td->io_ops->name, "cpuio"))
- log_info("%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->name, td->cpuload, td->cpucycle);
+ log_info("%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->o.name, td->o.cpuload, td->o.cpucycle);
else {
char *c1, *c2, *c3, *c4;
- c1 = to_kmg(td->min_bs[DDIR_READ]);
- c2 = to_kmg(td->max_bs[DDIR_READ]);
- c3 = to_kmg(td->min_bs[DDIR_WRITE]);
- c4 = to_kmg(td->max_bs[DDIR_WRITE]);
+ c1 = to_kmg(td->o.min_bs[DDIR_READ]);
+ c2 = to_kmg(td->o.max_bs[DDIR_READ]);
+ c3 = to_kmg(td->o.min_bs[DDIR_WRITE]);
+ c4 = to_kmg(td->o.max_bs[DDIR_WRITE]);
- log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s, ioengine=%s, iodepth=%u\n", td->name, td->groupid, ddir_str[td->td_ddir], c1, c2, c3, c4, td->io_ops->name, td->iodepth);
+ log_info("%s: (g=%d): rw=%s, bs=%s-%s/%s-%s, ioengine=%s, iodepth=%u\n", td->o.name, td->groupid, ddir_str[td->o.td_ddir], c1, c2, c3, c4, td->io_ops->name, td->o.iodepth);
free(c1);
free(c2);
* recurse add identical jobs, clear numjobs and stonewall options
* as they don't apply to sub-jobs
*/
- numjobs = td->numjobs;
+ numjobs = td->o.numjobs;
while (--numjobs) {
struct thread_data *td_new = get_new_job(0, td);
if (!td_new)
goto err;
- td_new->numjobs = 1;
- td_new->stonewall = 0;
+ td_new->o.numjobs = 1;
+ td_new->o.stonewall = 0;
if (file_alloced) {
- td_new->filename = NULL;
+ td_new->o.filename = NULL;
td_new->files_index = 0;
td_new->files = NULL;
}
goto err;
}
- if (td->numjobs > 1) {
+ if (td->o.numjobs > 1) {
groupid++;
prev_group_jobs = 0;
}
* Seperate multiple job files by a stonewall
*/
if (!global && stonewall) {
- td->stonewall = stonewall;
+ td->o.stonewall = stonewall;
stonewall = 0;
}
{
memset(&def_thread, 0, sizeof(def_thread));
- if (fio_getaffinity(getpid(), &def_thread.cpumask) == -1) {
+ if (fio_getaffinity(getpid(), &def_thread.o.cpumask) == -1) {
perror("sched_getaffinity");
return 1;
}
*/
fio_fill_default_options(&def_thread);
- def_thread.timeout = def_timeout;
- def_thread.write_bw_log = write_bw_log;
- def_thread.write_lat_log = write_lat_log;
+ def_thread.o.timeout = def_timeout;
+ def_thread.o.write_bw_log = write_bw_log;
+ def_thread.o.write_lat_log = write_lat_log;
#ifdef FIO_HAVE_DISK_UTIL
- def_thread.do_disk_util = 1;
+ def_thread.o.do_disk_util = 1;
#endif
return 0;
char *val = optarg;
if (!strncmp(opt, "name", 4) && td) {
- ret = add_job(td, td->name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0);
if (ret) {
put_job(td);
return 0;
if (dont_add_job)
put_job(td);
else {
- ret = add_job(td, td->name ?: "fio", 0);
+ ret = add_job(td, td->o.name ?: "fio", 0);
if (ret)
put_job(td);
}
*/
static void mark_random_map(struct thread_data *td, struct io_u *io_u)
{
- unsigned int min_bs = td->rw_min_bs;
+ unsigned int min_bs = td->o.rw_min_bs;
struct fio_file *f = io_u->file;
unsigned long long block;
unsigned int blocks;
i = f->last_free_lookup;
*b = (i * BLOCKS_PER_MAP);
- while ((*b) * td->rw_min_bs < f->real_file_size) {
+ while ((*b) * td->o.rw_min_bs < f->real_file_size) {
if (f->file_map[i] != -1UL) {
*b += ffz(f->file_map[i]);
f->last_free_lookup = i;
long r;
if (td_random(td)) {
- unsigned long long max_blocks = f->file_size / td->min_bs[ddir];
+ unsigned long long max_blocks = f->file_size / td->o.min_bs[ddir];
int loops = 5;
do {
b = 0;
else
b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
- if (td->norandommap)
+ if (td->o.norandommap)
break;
- rb = b + (f->file_offset / td->min_bs[ddir]);
+ rb = b + (f->file_offset / td->o.min_bs[ddir]);
loops--;
} while (!random_map_free(td, f, rb) && loops);
if (!loops && get_next_free_block(td, f, &b))
return 1;
} else
- b = f->last_pos / td->min_bs[ddir];
+ b = f->last_pos / td->o.min_bs[ddir];
- io_u->offset = (b * td->min_bs[ddir]) + f->file_offset;
+ io_u->offset = (b * td->o.min_bs[ddir]) + f->file_offset;
if (io_u->offset >= f->real_file_size)
return 1;
unsigned int buflen;
long r;
- if (td->min_bs[ddir] == td->max_bs[ddir])
- buflen = td->min_bs[ddir];
+ if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
+ buflen = td->o.min_bs[ddir];
else {
r = os_random_long(&td->bsrange_state);
- buflen = (unsigned int) (1 + (double) (td->max_bs[ddir] - 1) * r / (RAND_MAX + 1.0));
- if (!td->bs_unaligned)
- buflen = (buflen + td->min_bs[ddir] - 1) & ~(td->min_bs[ddir] - 1);
+ buflen = (unsigned int) (1 + (double) (td->o.max_bs[ddir] - 1) * r / (RAND_MAX + 1.0));
+ if (!td->o.bs_unaligned)
+ buflen = (buflen + td->o.min_bs[ddir] - 1) & ~(td->o.min_bs[ddir] - 1);
}
while (buflen + io_u->offset > f->real_file_size) {
- if (buflen == td->min_bs[ddir]) {
- if (!td->odirect) {
+ if (buflen == td->o.min_bs[ddir]) {
+ if (!td->o.odirect) {
assert(io_u->offset <= f->real_file_size);
buflen = f->real_file_size - io_u->offset;
return buflen;
return 0;
}
- buflen = td->min_bs[ddir];
+ buflen = td->o.min_bs[ddir];
}
return buflen;
/*
* Check if it's time to seed a new data direction.
*/
- if (elapsed >= td->rwmixcycle) {
+ if (elapsed >= td->o.rwmixcycle) {
unsigned int v;
long r;
r = os_random_long(&td->rwmix_state);
v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
- if (v < td->rwmixread)
+ if (v < td->o.rwmixread)
td->rwmix_ddir = DDIR_READ;
else
td->rwmix_ddir = DDIR_WRITE;
/*
* If using an iolog, grab next piece if any available.
*/
- if (td->read_iolog)
+ if (td->o.read_iolog)
return read_iolog_get(td, io_u);
/*
* see if it's time to sync
*/
- if (td->fsync_blocks && !(td->io_issues[DDIR_WRITE] % td->fsync_blocks)
- && td->io_issues[DDIR_WRITE] && should_fsync(td)) {
+ if (td->o.fsync_blocks &&
+ !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
+ td->io_issues[DDIR_WRITE] && should_fsync(td)) {
io_u->ddir = DDIR_SYNC;
return 0;
}
/*
* mark entry before potentially trimming io_u
*/
- if (!td->read_iolog && td_random(td) && !td->norandommap)
+ if (!td->o.read_iolog && td_random(td) && !td->o.norandommap)
mark_random_map(td, io_u);
/*
* If using a write iolog, store this entry.
*/
- if (td->write_iolog_file)
+ if (td->o.write_iolog_file)
write_iolog_put(td, io_u);
return 0;
do {
long r = os_random_long(&td->next_file_state);
- fno = (unsigned int) ((double) td->nr_files * (r / (RAND_MAX + 1.0)));
+ fno = (unsigned int) ((double) td->o.nr_files * (r / (RAND_MAX + 1.0)));
f = &td->files[fno];
if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
f = &td->files[td->next_file];
td->next_file++;
- if (td->next_file >= td->nr_files)
+ if (td->next_file >= td->o.nr_files)
td->next_file = 0;
if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
{
struct fio_file *f;
- assert(td->nr_files <= td->files_index);
+ assert(td->o.nr_files <= td->files_index);
if (!td->nr_open_files)
return NULL;
if (f && (f->flags & FIO_FILE_OPEN) && td->file_service_left--)
return f;
- if (td->file_service_type == FIO_FSERVICE_RR)
+ if (td->o.file_service_type == FIO_FSERVICE_RR)
f = get_next_file_rr(td, FIO_FILE_OPEN, FIO_FILE_CLOSING);
else
f = get_next_file_rand(td, FIO_FILE_OPEN, FIO_FILE_CLOSING);
{
struct fio_file *f;
- if (td->file_service_type == FIO_FSERVICE_RR)
+ if (td->o.file_service_type == FIO_FSERVICE_RR)
f = get_next_file_rr(td, 0, FIO_FILE_OPEN);
else
f = get_next_file_rand(td, 0, FIO_FILE_OPEN);
* probably not the right place to do this, but see
* if we need to open a new file
*/
- if (td->nr_open_files < td->open_files &&
- td->open_files != td->nr_files) {
+ if (td->nr_open_files < td->o.open_files &&
+ td->o.open_files != td->o.nr_files) {
f = find_next_new_file(td);
if (!f || (ret = td_io_open_file(td, f))) {
}
} while (1);
- if (td->zone_bytes >= td->zone_size) {
+ if (td->zone_bytes >= td->o.zone_size) {
td->zone_bytes = 0;
- f->last_pos += td->zone_skip;
+ f->last_pos += td->o.zone_skip;
}
if (io_u->buflen + io_u->offset > f->real_file_size) {
f->last_pos = io_u->offset + io_u->buflen;
- if (td->verify != VERIFY_NONE)
+ if (td->o.verify != VERIFY_NONE)
populate_verify_io_u(td, io_u);
}
int r;
td->io_u_queued++;
- if (td->io_u_queued > td->iodepth_batch) {
+ if (td->io_u_queued > td->o.iodepth_batch) {
r = td_io_commit(td);
if (r < 0)
return r;
* be laid out with the block scattered as written. it's faster to
* read them in in that order again, so don't sort
*/
- if (!td_random(td) || !td->overwrite) {
+ if (!td_random(td) || !td->o.overwrite) {
list_add_tail(&ipo->list, &td->io_hist_list);
return;
}
FILE *f;
int rw, reads, writes;
- f = fopen(td->read_iolog_file, "r");
+ f = fopen(td->o.read_iolog_file, "r");
if (!f) {
perror("fopen read iolog");
return 1;
ipo->offset = offset;
ipo->len = bytes;
ipo->ddir = (enum fio_ddir) rw;
- if (bytes > td->max_bs[rw])
- td->max_bs[rw] = bytes;
+ if (bytes > td->o.max_bs[rw])
+ td->o.max_bs[rw] = bytes;
list_add_tail(&ipo->list, &td->io_log_list);
}
if (!reads && !writes)
return 1;
else if (reads && !writes)
- td->td_ddir = TD_DDIR_READ;
+ td->o.td_ddir = TD_DDIR_READ;
else if (!reads && writes)
- td->td_ddir = TD_DDIR_READ;
+ td->o.td_ddir = TD_DDIR_READ;
else
- td->td_ddir = TD_DDIR_RW;
+ td->o.td_ddir = TD_DDIR_RW;
return 0;
}
{
FILE *f;
- f = fopen(td->write_iolog_file, "w+");
+ f = fopen(td->o.write_iolog_file, "w+");
if (!f) {
perror("fopen write iolog");
return 1;
if (td->io_ops->flags & FIO_DISKLESSIO)
return 0;
- if (td->read_iolog_file)
+ if (td->o.read_iolog_file)
ret = init_iolog_read(td);
- else if (td->write_iolog_file)
+ else if (td->o.write_iolog_file)
ret = init_iolog_write(td);
return ret;
*/
int allocate_io_mem(struct thread_data *td)
{
- if (td->mem_type == MEM_MALLOC)
+ if (td->o.mem_type == MEM_MALLOC)
td->orig_buffer = malloc(td->orig_buffer_size);
- else if (td->mem_type == MEM_SHM || td->mem_type == MEM_SHMHUGE) {
+ else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE) {
int flags = IPC_CREAT | SHM_R | SHM_W;
- if (td->mem_type == MEM_SHMHUGE)
+ if (td->o.mem_type == MEM_SHMHUGE)
flags |= SHM_HUGETLB;
td->shm_id = shmget(IPC_PRIVATE, td->orig_buffer_size, flags);
td->orig_buffer = NULL;
return 1;
}
- } else if (td->mem_type == MEM_MMAP || td->mem_type == MEM_MMAPHUGE) {
+ } else if (td->o.mem_type == MEM_MMAP ||
+ td->o.mem_type == MEM_MMAPHUGE) {
int flags = MAP_PRIVATE;
td->mmapfd = 0;
void free_io_mem(struct thread_data *td)
{
- if (td->mem_type == MEM_MALLOC)
+ if (td->o.mem_type == MEM_MALLOC)
free(td->orig_buffer);
- else if (td->mem_type == MEM_SHM || td->mem_type == MEM_SHMHUGE) {
+ else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE) {
struct shmid_ds sbuf;
shmdt(td->orig_buffer);
shmctl(td->shm_id, IPC_RMID, &sbuf);
- } else if (td->mem_type == MEM_MMAP || td->mem_type == MEM_MMAPHUGE) {
+ } else if (td->o.mem_type == MEM_MMAP ||
+ td->o.mem_type == MEM_MMAPHUGE) {
munmap(td->orig_buffer, td->orig_buffer_size);
if (td->mmapfile) {
close(td->mmapfd);
free(td->mmapfile);
}
} else
- log_err("Bad memory type %u\n", td->mem_type);
+ log_err("Bad memory type %u\n", td->o.mem_type);
td->orig_buffer = NULL;
}
#include "fio.h"
#include "parse.h"
-#define td_var_offset(var) ((size_t) &((struct thread_data *)0)->var)
+#define td_var_offset(var) ((size_t) &((struct thread_options *)0)->var)
/*
* Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
{
struct thread_data *td = data;
- if (td->mem_type == MEM_MMAPHUGE || td->mem_type == MEM_MMAP) {
+ if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP) {
td->mmapfile = get_opt_postfix(mem);
- if (td->mem_type == MEM_MMAPHUGE && !td->mmapfile) {
+ if (td->o.mem_type == MEM_MMAPHUGE && !td->mmapfile) {
log_err("fio: mmaphuge:/path/to/file\n");
return 1;
}
{
struct thread_data *td = data;
- fill_cpu_mask(td->cpumask, *val);
+ fill_cpu_mask(td->o.cpumask, *val);
return 0;
}
strip_blank_end(str);
if (!td->files_index)
- td->nr_files = 0;
+ td->o.nr_files = 0;
while ((fname = strsep(&str, ":")) != NULL) {
if (!strlen(fname))
break;
add_file(td, fname);
- td->nr_files++;
+ td->o.nr_files++;
}
free(p);
struct thread_data *td = data;
struct stat sb;
- if (lstat(td->directory, &sb) < 0) {
- log_err("fio: %s is not a directory\n", td->directory);
+ if (lstat(td->o.directory, &sb) < 0) {
+ log_err("fio: %s is not a directory\n", td->o.directory);
td_verror(td, errno, "lstat");
return 1;
}
if (!S_ISDIR(sb.st_mode)) {
- log_err("fio: %s is not a directory\n", td->directory);
+ log_err("fio: %s is not a directory\n", td->o.directory);
return 1;
}
struct thread_data *td = data;
if (!td->files_index)
- td->nr_files = 0;
+ td->o.nr_files = 0;
- return add_dir_files(td, td->opendir);
+ return add_dir_files(td, td->o.opendir);
}
{
.name = "size",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(total_file_size),
+ .off1 = td_var_offset(size),
.help = "Total size of device or files",
},
{
posix_fadvise((fd), (off_t)(off), (len), (advice))
#define fio_setaffinity(td) \
- sched_setaffinity((td)->pid, sizeof((td)->cpumask), &(td)->cpumask)
+ sched_setaffinity((td)->pid, sizeof((td)->o.cpumask), &(td)->o.cpumask)
#define fio_getaffinity(pid, ptr) \
sched_getaffinity((pid), sizeof(cpu_set_t), (ptr))
sprintf(foo, "%s", tmp);
}
- if (td->ioscheduler && !td->sysfs_root)
+ if (td->o.ioscheduler && !td->sysfs_root)
td->sysfs_root = strdup(foo);
disk_util_add(dev, foo);
struct fio_file *f;
unsigned int i;
- if (!td->do_disk_util ||
+ if (!td->o.do_disk_util ||
(td->io_ops->flags & (FIO_DISKLESSIO | FIO_NODISKUTIL)))
return;
nr_ts = 0;
last_ts = -1;
for_each_td(td, i) {
- if (!td->group_reporting) {
+ if (!td->o.group_reporting) {
nr_ts++;
continue;
}
last_ts = -1;
idx = 0;
for_each_td(td, i) {
- if (idx && (!td->group_reporting ||
- (td->group_reporting && last_ts != td->groupid))) {
+ if (idx && (!td->o.group_reporting ||
+ (td->o.group_reporting && last_ts != td->groupid))) {
idx = 0;
j++;
}
/*
* These are per-group shared already
*/
- ts->name = td->name;
- ts->description = td->description;
+ ts->name = td->o.name;
+ ts->description = td->o.description;
ts->groupid = td->groupid;
/*
unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
unsigned long rate;
- if (spent < td->bw_avg_time)
+ if (spent < td->o.bw_avg_time)
return;
rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) / spent;
unsigned long usec_cycle;
unsigned int bs;
- if (!td->rate && !td->rate_iops)
+ if (!td->o.rate && !td->o.rate_iops)
return;
if (td_rw(td))
- bs = td->rw_min_bs;
+ bs = td->o.rw_min_bs;
else if (td_read(td))
- bs = td->min_bs[DDIR_READ];
+ bs = td->o.min_bs[DDIR_READ];
else
- bs = td->min_bs[DDIR_WRITE];
+ bs = td->o.min_bs[DDIR_WRITE];
usec_cycle = td->rate_usec_cycle * (bytes / bs);
p += sizeof(hdr);
fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
- if (td->verify == VERIFY_MD5) {
+ if (td->o.verify == VERIFY_MD5) {
fill_md5(&hdr, p, io_u->buflen - sizeof(hdr));
hdr.verify_type = VERIFY_MD5;
} else {