summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
2fc2698)
We need this for requeuing support, the network engine makes this
pretty apparent (it's not unusual to see short tranfers there).
Basically we add an xfer_buf and xfer_buflen member to the io_u,
and these are the fields that the io engine MUST use. That allows
fio to increment and reset these appropriately, and simply requeue
the io_u for service of the next part of it.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
struct fio_file *f = io_u->file;
if (io_u->ddir == DDIR_READ)
struct fio_file *f = io_u->file;
if (io_u->ddir == DDIR_READ)
- io_prep_pread(&io_u->iocb, f->fd, io_u->buf, io_u->buflen, io_u->offset);
+ io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
else if (io_u->ddir == DDIR_WRITE)
else if (io_u->ddir == DDIR_WRITE)
- io_prep_pwrite(&io_u->iocb, f->fd, io_u->buf, io_u->buflen, io_u->offset);
+ io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
else if (io_u->ddir == DDIR_SYNC)
io_prep_fsync(&io_u->iocb, f->fd);
else
else if (io_u->ddir == DDIR_SYNC)
io_prep_fsync(&io_u->iocb, f->fd);
else
} while (1);
if (ret <= 0) {
} while (1);
if (ret <= 0) {
- io_u->resid = io_u->buflen;
+ io_u->resid = io_u->xfer_buflen;
io_u->error = -ret;
return 1;
}
io_u->error = -ret;
return 1;
}
struct mmapio_data *sd = td->io_ops->data;
if (io_u->ddir == DDIR_READ)
struct mmapio_data *sd = td->io_ops->data;
if (io_u->ddir == DDIR_READ)
- memcpy(io_u->buf, f->mmap + real_off, io_u->buflen);
+ memcpy(io_u->xfer_buf, f->mmap + real_off, io_u->xfer_buflen);
else if (io_u->ddir == DDIR_WRITE)
else if (io_u->ddir == DDIR_WRITE)
- memcpy(f->mmap + real_off, io_u->buf, io_u->buflen);
+ memcpy(f->mmap + real_off, io_u->xfer_buf, io_u->xfer_buflen);
else if (io_u->ddir == DDIR_SYNC) {
if (msync(f->mmap, f->file_size, MS_SYNC))
io_u->error = errno;
else if (io_u->ddir == DDIR_SYNC) {
if (msync(f->mmap, f->file_size, MS_SYNC))
io_u->error = errno;
* not really direct, but should drop the pages from the cache
*/
if (td->odirect && io_u->ddir != DDIR_SYNC) {
* not really direct, but should drop the pages from the cache
*/
if (td->odirect && io_u->ddir != DDIR_SYNC) {
- if (msync(f->mmap + real_off, io_u->buflen, MS_SYNC) < 0)
+ if (msync(f->mmap + real_off, io_u->xfer_buflen, MS_SYNC) < 0)
- if (madvise(f->mmap + real_off, io_u->buflen, MADV_DONTNEED) < 0)
+ if (madvise(f->mmap + real_off, io_u->xfer_buflen, MADV_DONTNEED) < 0)
{
struct net_data *nd = td->io_ops->data;
struct fio_file *f = io_u->file;
{
struct net_data *nd = td->io_ops->data;
struct fio_file *f = io_u->file;
if (io_u->ddir == DDIR_WRITE)
if (io_u->ddir == DDIR_WRITE)
- ret = write(f->fd, io_u->buf, io_u->buflen);
+ ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
else if (io_u->ddir == DDIR_READ)
else if (io_u->ddir == DDIR_READ)
- ret = read(f->fd, io_u->buf, io_u->buflen);
+ ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
- if (ret != io_u->buflen) {
+ if (ret != (int) io_u->xfer_buflen) {
- io_u->resid = io_u->buflen - ret;
- io_u->error = EIO;
+ io_u->resid = io_u->xfer_buflen - ret;
+ io_u->error = 0;
+ return ret;
} else
io_u->error = errno;
}
} else
io_u->error = errno;
}
struct fio_file *f = io_u->file;
aiocb->aio_fildes = f->fd;
struct fio_file *f = io_u->file;
aiocb->aio_fildes = f->fd;
- aiocb->aio_buf = io_u->buf;
- aiocb->aio_nbytes = io_u->buflen;
+ aiocb->aio_buf = io_u->xfer_buf;
+ aiocb->aio_nbytes = io_u->xfer_buflen;
aiocb->aio_offset = io_u->offset;
io_u->seen = 0;
aiocb->aio_offset = io_u->offset;
io_u->seen = 0;
hdr->usr_ptr = io_u;
if (fs) {
hdr->usr_ptr = io_u;
if (fs) {
- hdr->dxferp = io_u->buf;
- hdr->dxfer_len = io_u->buflen;
+ hdr->dxferp = io_u->xfer_buf;
+ hdr->dxfer_len = io_u->xfer_buflen;
struct sgio_data *sd = td->io_ops->data;
int nr_blocks, lba;
struct sgio_data *sd = td->io_ops->data;
int nr_blocks, lba;
- if (io_u->buflen & (sd->bs - 1)) {
+ if (io_u->xfer_buflen & (sd->bs - 1)) {
log_err("read/write not sector aligned\n");
return EINVAL;
}
log_err("read/write not sector aligned\n");
return EINVAL;
}
}
if (hdr->dxfer_direction != SG_DXFER_NONE) {
}
if (hdr->dxfer_direction != SG_DXFER_NONE) {
- nr_blocks = io_u->buflen / sd->bs;
+ nr_blocks = io_u->xfer_buflen / sd->bs;
lba = io_u->offset / sd->bs;
hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
lba = io_u->offset / sd->bs;
hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
void *p;
offset = io_u->offset;
void *p;
offset = io_u->offset;
- buflen = io_u->buflen;
- p = io_u->buf;
+ buflen = io_u->xfer_buflen;
+ p = io_u->xfer_buf;
while (buflen) {
int this_len = buflen;
while (buflen) {
int this_len = buflen;
+ return io_u->xfer_buflen;
struct spliceio_data *sd = td->io_ops->data;
struct iovec iov[1] = {
{
struct spliceio_data *sd = td->io_ops->data;
struct iovec iov[1] = {
{
- .iov_base = io_u->buf,
- .iov_len = io_u->buflen,
+ .iov_base = io_u->xfer_buf,
+ .iov_len = io_u->xfer_buflen,
}
};
struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
}
};
struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
+ return io_u->xfer_buflen;
}
static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u)
{
struct spliceio_data *sd = td->io_ops->data;
}
static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u)
{
struct spliceio_data *sd = td->io_ops->data;
if (io_u->ddir == DDIR_READ)
ret = fio_splice_read(td, io_u);
if (io_u->ddir == DDIR_READ)
ret = fio_splice_read(td, io_u);
else
ret = fsync(io_u->file->fd);
else
ret = fsync(io_u->file->fd);
- if (ret != io_u->buflen) {
+ if (ret != (int) io_u->xfer_buflen) {
- io_u->resid = io_u->buflen - ret;
- io_u->error = ENODATA;
+ io_u->resid = io_u->xfer_buflen - ret;
+ io_u->error = 0;
+ return ret;
} else
io_u->error = errno;
}
} else
io_u->error = errno;
}
{
struct syncio_data *sd = td->io_ops->data;
struct fio_file *f = io_u->file;
{
struct syncio_data *sd = td->io_ops->data;
struct fio_file *f = io_u->file;
if (io_u->ddir == DDIR_READ)
if (io_u->ddir == DDIR_READ)
- ret = read(f->fd, io_u->buf, io_u->buflen);
+ ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
else if (io_u->ddir == DDIR_WRITE)
else if (io_u->ddir == DDIR_WRITE)
- ret = write(f->fd, io_u->buf, io_u->buflen);
+ ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
- if (ret != io_u->buflen) {
+ if (ret != (int) io_u->xfer_buflen) {
- io_u->resid = io_u->buflen - ret;
- io_u->error = EIO;
+ io_u->resid = io_u->xfer_buflen - ret;
+ io_u->error = 0;
+ return ret;
} else
io_u->error = errno;
}
} else
io_u->error = errno;
}
memcpy(&s, &io_u->start_time, sizeof(s));
memcpy(&s, &io_u->start_time, sizeof(s));
ret = td_io_queue(td, io_u);
if (ret) {
ret = td_io_queue(td, io_u);
if (ret) {
- td_verror(td, io_u->error);
- put_io_u(td, io_u);
- break;
+ if (ret > 0 && (io_u->xfer_buflen != io_u->resid) &&
+ io_u->resid) {
+ /*
+ * short read/write. requeue.
+ */
+ io_u->xfer_buflen = io_u->resid;
+ io_u->xfer_buf += ret;
+ goto requeue;
+ } else {
+ td_verror(td, io_u->error);
+ put_io_u(td, io_u);
+ break;
+ }
}
add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time));
}
add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time));
unsigned int buflen;
unsigned long long offset;
unsigned int buflen;
unsigned long long offset;
+ void *xfer_buf;
+ unsigned int xfer_buflen;
+
unsigned int resid;
unsigned int error;
unsigned int resid;
unsigned int error;
+ io_u->xfer_buf = io_u->buf;
+ io_u->xfer_buflen = io_u->buflen;
fio_gettime(&io_u->start_time, NULL);
return io_u;
}
fio_gettime(&io_u->start_time, NULL);
return io_u;
}