/*
* Hmm, should we make sure that ->io_size <= ->real_file_size?
+ * -> not for now since there is code assuming it could go either.
*/
max_size = f->io_size;
if (max_size > f->real_file_size)
if (f->last_pos[ddir] < f->real_file_size) {
uint64_t pos;
- if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0)
- f->last_pos[ddir] = f->real_file_size;
+ if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) {
+ if (f->real_file_size > f->io_size)
+ f->last_pos[ddir] = f->io_size;
+ else
+ f->last_pos[ddir] = f->real_file_size;
+ }
pos = f->last_pos[ddir] - f->file_offset;
if (pos && o->ddir_seq_add) {
if (pos >= f->real_file_size) {
if (o->ddir_seq_add > 0)
pos = f->file_offset;
- else
- pos = f->real_file_size + o->ddir_seq_add;
+ else {
+ if (f->real_file_size > f->io_size)
+ pos = f->io_size;
+ else
+ pos = f->real_file_size;
+
+ pos += o->ddir_seq_add;
+ }
}
}
int ddir = io_u->ddir;
unsigned int buflen = 0;
unsigned int minbs, maxbs;
- uint64_t frand_max;
- unsigned long r;
+ uint64_t frand_max, r;
assert(ddir_rw(ddir));
if (buflen < minbs)
buflen = minbs;
} else {
- long perc = 0;
+ long long perc = 0;
unsigned int i;
for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
buflen = bsp->bs;
perc += bsp->perc;
- if ((r * 100UL <= frand_max * perc) &&
+ if (!perc)
+ break;
+ if ((r / perc <= frand_max / 100ULL) &&
io_u_fits(td, io_u, buflen))
break;
}
}
- if (td->o.verify != VERIFY_NONE)
- buflen = (buflen + td->o.verify_interval - 1) &
- ~(td->o.verify_interval - 1);
-
if (!td->o.bs_unaligned && is_power_of_2(minbs))
buflen &= ~(minbs - 1);
}
while (td->io_u_in_flight) {
- int fio_unused ret;
+ int ret;
ret = io_u_queued_complete(td, 1);
if (ret > 0)
completed += ret;
}
+ if (td->flags & TD_F_REGROW_LOGS)
+ regrow_logs(td);
+
return completed;
}
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
{
enum fio_ddir odir = ddir ^ 1;
- long usec, now;
+ long usec;
+ uint64_t now;
assert(ddir_rw(ddir));
now = utime_since_now(&td->start);
ddir = DDIR_READ;
else if (td_write(td))
ddir = DDIR_WRITE;
- else
+ else if (td_trim(td))
ddir = DDIR_TRIM;
+ else
+ ddir = DDIR_INVAL;
td->rwmix_ddir = rate_ddir(td, ddir);
return td->rwmix_ddir;
get_trim = 1;
}
- if (get_trim && !get_next_trim(td, io_u))
+ if (get_trim && get_next_trim(td, io_u))
return true;
}
icd->nr = nr;
icd->error = 0;
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
icd->bytes_done[ddir] = 0;
}
return -1;
}
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
td->bytes_done[ddir] += icd.bytes_done[ddir];
return 0;
int ret, ddir;
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
- dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
+ dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts);
if (!min_evts)
tvp = &ts;
return -1;
}
- for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
+ for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
td->bytes_done[ddir] += icd.bytes_done[ddir];
return ret;
*/
void io_u_queued(struct thread_data *td, struct io_u *io_u)
{
- if (!td->o.disable_slat) {
+ if (!td->o.disable_slat && ramp_time_over(td)) {
unsigned long slat_time;
slat_time = utime_since(&io_u->start_time, &io_u->issue_time);