We didn't use to look at the previous bytes done for the
data directions, which skewed the mix for buffered IO.
Fix that up.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
unsigned int iolog;
unsigned int read_iolog;
unsigned int rwmixcycle;
unsigned int iolog;
unsigned int read_iolog;
unsigned int rwmixcycle;
- unsigned int rwmixread;
- unsigned int rwmixwrite;
unsigned int nice;
unsigned int file_service_type;
unsigned int group_reporting;
unsigned int nice;
unsigned int file_service_type;
unsigned int group_reporting;
{
struct thread_options *o = &td->o;
{
struct thread_options *o = &td->o;
- if (!o->rwmixread && o->rwmixwrite)
- o->rwmixread = 100 - o->rwmixwrite;
+ if (o->rwmix[DDIR_READ] + o->rwmix[DDIR_WRITE] > 100)
+ o->rwmix[DDIR_WRITE] = 100 - o->rwmix[DDIR_READ];
if (o->write_iolog_file && o->read_iolog_file) {
log_err("fio: read iolog overrides write_iolog\n");
if (o->write_iolog_file && o->read_iolog_file) {
log_err("fio: read iolog overrides write_iolog\n");
* whereas reads do not.
*/
rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes;
* whereas reads do not.
*/
rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes;
- diff = td->o.rwmixread;
- if (td->rwmix_ddir == DDIR_WRITE)
- diff = 100 - diff;
+ diff = td->o.rwmix[td->rwmix_ddir ^ 1];
- td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * (100 - diff)) / diff;
+ td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * ((100 - diff)) / diff);
+}
+
+static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
+{
+ unsigned int v;
+ long r;
+
+ r = os_random_long(&td->rwmix_state);
+ v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
+ if (v < td->o.rwmix[DDIR_READ])
+ return DDIR_READ;
+
+ return DDIR_WRITE;
fio_gettime(&now, NULL);
elapsed = mtime_since_now(&td->rwmix_switch);
fio_gettime(&now, NULL);
elapsed = mtime_since_now(&td->rwmix_switch);
+ /*
+ * if this is the first cycle, make it shorter
+ */
cycle = td->o.rwmixcycle;
if (!td->rwmix_bytes)
cycle /= 10;
cycle = td->o.rwmixcycle;
if (!td->rwmix_bytes)
cycle /= 10;
/*
* Check if it's time to seed a new data direction.
*/
/*
* Check if it's time to seed a new data direction.
*/
- if (elapsed >= cycle &&
+ if (elapsed >= cycle ||
td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) {
td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) {
- unsigned int v;
- long r;
-
- r = os_random_long(&td->rwmix_state);
- v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
- if (v < td->o.rwmixread) {
- if (td->rwmix_ddir != DDIR_READ)
- set_rwmix_bytes(td);
- td->rwmix_ddir = DDIR_READ;
- } else {
- if (td->rwmix_ddir != DDIR_WRITE)
- set_rwmix_bytes(td);
- td->rwmix_ddir = DDIR_WRITE;
- }
+ unsigned long long max_bytes;
+ enum fio_ddir ddir;
+
+ /*
+ * Put a top limit on how many bytes we do for
+ * one data direction, to avoid overflowing the
+ * ranges too much
+ */
+ ddir = get_rand_ddir(td);
+ max_bytes = td->this_io_bytes[ddir];
+ if (max_bytes >= (td->io_size * td->o.rwmix[ddir] / 100))
+ ddir ^= 1;
+
+ if (ddir != td->rwmix_ddir)
+ set_rwmix_bytes(td);
+
+ td->rwmix_ddir = ddir;
memcpy(&td->rwmix_switch, &now, sizeof(now));
}
return td->rwmix_ddir;
memcpy(&td->rwmix_switch, &now, sizeof(now));
}
return td->rwmix_ddir;
{
.name = "rwmixread",
.type = FIO_OPT_INT,
{
.name = "rwmixread",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rwmixread),
+ .off1 = td_var_offset(rwmix[DDIR_READ]),
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
{
.name = "rwmixwrite",
.type = FIO_OPT_INT,
{
.name = "rwmixwrite",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rwmixwrite),
+ .off1 = td_var_offset(rwmix[DDIR_WRITE]),
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",