unsigned int buflen = 0;
unsigned int minbs, maxbs;
uint64_t frand_max, r;
+ bool power_2;
assert(ddir_rw(ddir));
}
}
- if (!td->o.bs_unaligned && is_power_of_2(minbs))
+ power_2 = is_power_of_2(minbs);
+ if (!td->o.bs_unaligned && power_2)
buflen &= ~(minbs - 1);
-
+ else if (!td->o.bs_unaligned && !power_2)
+ buflen -= buflen % minbs;
} while (!io_u_fits(td, io_u, buflen));
return buflen;
enum fio_ddir ddir;
/*
- * see if it's time to fsync
- */
- if (td->o.fsync_blocks &&
- !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
- td->io_issues[DDIR_WRITE] && should_fsync(td))
- return DDIR_SYNC;
-
- /*
- * see if it's time to fdatasync
- */
- if (td->o.fdatasync_blocks &&
- !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
- td->io_issues[DDIR_WRITE] && should_fsync(td))
- return DDIR_DATASYNC;
-
- /*
- * see if it's time to sync_file_range
+ * See if it's time to fsync/fdatasync/sync_file_range first,
+ * and if not then move on to check regular I/Os.
*/
- if (td->sync_file_range_nr &&
- !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
- td->io_issues[DDIR_WRITE] && should_fsync(td))
- return DDIR_SYNC_FILE_RANGE;
+ if (should_fsync(td)) {
+ if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] &&
+ !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks))
+ return DDIR_SYNC;
+
+ if (td->o.fdatasync_blocks && td->io_issues[DDIR_WRITE] &&
+ !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks))
+ return DDIR_DATASYNC;
+
+ if (td->sync_file_range_nr && td->io_issues[DDIR_WRITE] &&
+ !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr))
+ return DDIR_SYNC_FILE_RANGE;
+ }
if (td_rw(td)) {
/*
}
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
- dprint(FD_IO, "io_u %p, offset too large\n", io_u);
- dprint(FD_IO, " off=%llu/%lu > %llu\n",
+ dprint(FD_IO, "io_u %p, offset + buflen exceeds file size\n",
+ io_u);
+ dprint(FD_IO, " offset=%llu/buflen=%lu > %llu\n",
(unsigned long long) io_u->offset, io_u->buflen,
(unsigned long long) io_u->file->real_file_size);
return 1;
if (!td_io_prep(td, io_u)) {
if (!td->o.disable_lat)
fio_gettime(&io_u->start_time, NULL);
+
if (do_scramble)
small_content_scramble(io_u);
+
return io_u;
}
err_put:
if (td->parent)
td = td->parent;
+ if (!td->o.stats)
+ return;
+
if (no_reduce)
lusec = utime_since(&io_u->issue_time, &icd->time);
*/
void io_u_queued(struct thread_data *td, struct io_u *io_u)
{
- if (!td->o.disable_slat && ramp_time_over(td)) {
+ if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) {
unsigned long slat_time;
slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
{
struct thread_options *o = &td->o;
+ if (o->mem_type == MEM_CUDA_MALLOC)
+ return;
+
if (o->compress_percentage || o->dedupe_percentage) {
unsigned int perc = td->o.compress_percentage;
struct frand_state *rs;
io_u->buf_filled_len = 0;
fill_io_buffer(td, io_u->buf, min_write, max_bs);
}
+
+static int do_sync_file_range(const struct thread_data *td,
+ struct fio_file *f)
+{
+ off64_t offset, nbytes;
+
+ offset = f->first_write;
+ nbytes = f->last_write - f->first_write;
+
+ if (!nbytes)
+ return 0;
+
+ return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
+}
+
+int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
+{
+ int ret;
+
+ if (io_u->ddir == DDIR_SYNC) {
+ ret = fsync(io_u->file->fd);
+ } else if (io_u->ddir == DDIR_DATASYNC) {
+#ifdef CONFIG_FDATASYNC
+ ret = fdatasync(io_u->file->fd);
+#else
+ ret = io_u->xfer_buflen;
+ io_u->error = EINVAL;
+#endif
+ } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
+ ret = do_sync_file_range(td, io_u->file);
+ else {
+ ret = io_u->xfer_buflen;
+ io_u->error = EINVAL;
+ }
+
+ if (ret < 0)
+ io_u->error = errno;
+
+ return ret;
+}
+
+int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
+{
+#ifndef FIO_HAVE_TRIM
+ io_u->error = EINVAL;
+ return 0;
+#else
+ struct fio_file *f = io_u->file;
+ int ret;
+
+ ret = os_trim(f->fd, io_u->offset, io_u->xfer_buflen);
+ if (!ret)
+ return io_u->xfer_buflen;
+
+ io_u->error = ret;
+ return 0;
+#endif
+}