else
mask = ((1U << this_blocks) - 1) << bit;
- fio_assert(td, !(f->file_map[idx] & mask));
f->file_map[idx] |= mask;
nr_blocks -= this_blocks;
blocks += this_blocks;
r = os_random_long(&td->random_state);
dprint(FD_RANDOM, "off rand %llu\n", r);
*b = (last_block(td, f, ddir) - 1)
- * (r / ((unsigned long long) RAND_MAX + 1.0));
+ * (r / ((unsigned long long) OS_RAND_MAX + 1.0));
/*
* if we are not maintaining a random map, we are done.
*/
loops = 10;
do {
- f->last_free_lookup = (f->num_maps - 1) * (r / (RAND_MAX+1.0));
+ f->last_free_lookup = (f->num_maps - 1) *
+ (r / (OS_RAND_MAX + 1.0));
if (!get_next_free_block(td, f, ddir, b))
return 0;
return 0;
}
+static inline int is_power_of_2(unsigned int val)
+{
+ return (val != 0 && ((val & (val - 1)) == 0));
+}
+
static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
{
const int ddir = io_u->ddir;
- unsigned int buflen = buflen; /* silence dumb gcc warning */
+ unsigned int uninitialized_var(buflen);
+ unsigned int minbs, maxbs;
long r;
- if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
- buflen = td->o.min_bs[ddir];
+ minbs = td->o.min_bs[ddir];
+ maxbs = td->o.max_bs[ddir];
+
+ if (minbs == maxbs)
+ buflen = minbs;
else {
r = os_random_long(&td->bsrange_state);
if (!td->o.bssplit_nr) {
- buflen = (unsigned int)
- (1 + (double) (td->o.max_bs[ddir] - 1)
- * r / (RAND_MAX + 1.0));
+ buflen = 1 + (unsigned int) ((double) maxbs *
+ (r / (OS_RAND_MAX + 1.0)));
+ if (buflen < minbs)
+ buflen = minbs;
} else {
long perc = 0;
unsigned int i;
buflen = bsp->bs;
perc += bsp->perc;
- if (r <= ((LONG_MAX / 100L) * perc))
+ if (r <= ((OS_RAND_MAX / 100L) * perc))
break;
}
}
- if (!td->o.bs_unaligned) {
- buflen = (buflen + td->o.min_bs[ddir] - 1)
- & ~(td->o.min_bs[ddir] - 1);
- }
+ if (!td->o.bs_unaligned && is_power_of_2(minbs))
+ buflen = (buflen + minbs - 1) & ~(minbs - 1);
}
if (io_u->offset + buflen > io_u->file->real_file_size) {
dprint(FD_IO, "lower buflen %u -> %u (ddir=%d)\n", buflen,
- td->o.min_bs[ddir], ddir);
- buflen = td->o.min_bs[ddir];
+ minbs, ddir);
+ buflen = minbs;
}
return buflen;
long r;
r = os_random_long(&td->rwmix_state);
- v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
+ v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
if (v <= td->o.rwmix[DDIR_READ])
return DDIR_READ;
long r = os_random_long(&td->next_file_state);
fno = (unsigned int) ((double) td->o.nr_files
- * (r / (RAND_MAX + 1.0)));
+ * (r / (OS_RAND_MAX + 1.0)));
f = td->files[fno];
if (f->flags & FIO_FILE_DONE)
continue;
td->io_bytes[idx] += bytes;
td->this_io_bytes[idx] += bytes;
- usec = utime_since(&io_u->issue_time, &icd->time);
+ if (ramp_time_over(td)) {
+ usec = utime_since(&io_u->issue_time, &icd->time);
- add_clat_sample(td, idx, usec);
- add_bw_sample(td, idx, &icd->time);
- io_u_mark_latency(td, usec);
+ add_clat_sample(td, idx, usec);
+ add_bw_sample(td, idx, &icd->time);
+ io_u_mark_latency(td, usec);
+ }
if (td_write(td) && idx == DDIR_WRITE &&
td->o.do_verify &&
/*
* Called to complete min_events number of io for the async engines.
*/
-long io_u_queued_complete(struct thread_data *td, int min_events)
+long io_u_queued_complete(struct thread_data *td, int min_evts)
{
struct io_completion_data icd;
struct timespec *tvp = NULL;
int ret;
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
- dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_events);
+ dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
- if (!min_events)
+ if (!min_evts)
tvp = &ts;
- ret = td_io_getevents(td, min_events, td->cur_depth, tvp);
+ ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
if (ret < 0) {
td_verror(td, -ret, "td_io_getevents");
return ret;