+static void set_rwmix_bytes(struct thread_data *td)
+{
+ unsigned int diff;
+
+ /*
+ * we do time or byte based switch. this is needed because
+ * buffered writes may issue a lot quicker than they complete,
+ * whereas reads do not.
+ */
+ diff = td->o.rwmix[td->rwmix_ddir ^ 1];
+ td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
+}
+
+static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
+{
+ unsigned int v;
+ long r;
+
+ r = os_random_long(&td->rwmix_state);
+ v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
+ if (v <= td->o.rwmix[DDIR_READ])
+ return DDIR_READ;
+
+ return DDIR_WRITE;
+}
+
+static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
+{
+ enum fio_ddir odir = ddir ^ 1;
+ struct timeval t;
+ long usec;
+
+ if (td->rate_pending_usleep[ddir] <= 0)
+ return ddir;
+
+ /*
+ * We have too much pending sleep in this direction. See if we
+ * should switch.
+ */
+ if (td_rw(td)) {
+ /*
+ * Other direction does not have too much pending, switch
+ */
+ if (td->rate_pending_usleep[odir] < 100000)
+ return odir;
+
+ /*
+ * Both directions have pending sleep. Sleep the minimum time
+ * and deduct from both.
+ */
+ if (td->rate_pending_usleep[ddir] <=
+ td->rate_pending_usleep[odir]) {
+ usec = td->rate_pending_usleep[ddir];
+ } else {
+ usec = td->rate_pending_usleep[odir];
+ ddir = odir;
+ }
+ } else
+ usec = td->rate_pending_usleep[ddir];
+
+ fio_gettime(&t, NULL);
+ usec_sleep(td, usec);
+ usec = utime_since_now(&t);
+
+ td->rate_pending_usleep[ddir] -= usec;
+
+ odir = ddir ^ 1;
+ if (td_rw(td) && __should_check_rate(td, odir))
+ td->rate_pending_usleep[odir] -= usec;
+
+ return ddir;
+}
+