if (td->o.zone_range)
max_size = td->o.zone_range;
+ if (td->o.min_bs[ddir] > td->o.ba[ddir])
+ max_size -= td->o.min_bs[ddir] - td->o.ba[ddir];
+
max_blocks = max_size / (uint64_t) td->o.ba[ddir];
if (!max_blocks)
return 0;
* io's that have been actually submitted to an async engine,
* and cur_depth is meaningless for sync engines.
*/
+ if (td->io_u_queued || td->cur_depth) {
+ int fio_unused ret;
+
+ ret = td_io_commit(td);
+ }
+
while (td->io_u_in_flight) {
int fio_unused ret;
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
{
enum fio_ddir odir = ddir ^ 1;
- struct timeval t;
long usec;
assert(ddir_rw(ddir));
io_u_quiesce(td);
- fio_gettime(&t, NULL);
- usec_sleep(td, usec);
- usec = utime_since_now(&t);
+ usec = usec_sleep(td, usec);
td->rate_pending_usleep[ddir] -= usec;
if (td_rw(td) && __should_check_rate(td, odir))
td->rate_pending_usleep[odir] -= usec;
- if (ddir == DDIR_TRIM)
- return DDIR_TRIM;
-
return ddir;
}
{
struct thread_options *o = &td->o;
- if (o->compress_percentage) {
+ if (o->compress_percentage || o->dedupe_percentage) {
unsigned int perc = td->o.compress_percentage;
struct frand_state *rs;
unsigned int left = max_bs;
} while (left);
} else if (o->buffer_pattern_bytes)
fill_buffer_pattern(td, buf, max_bs);
- else
+ else if (o->zero_buffers)
memset(buf, 0, max_bs);
+ else
+ fill_random_buf(get_buf_state(td), buf, max_bs);
}
/*