#include <sys/mman.h>
#include "../fio.h"
+#include "../verify.h"
/*
- * Limits us to 2GB of mapped files in total
+ * Limits us to 1GB of mapped files in total
*/
-#define MMAP_TOTAL_SZ (2 * 1024 * 1024 * 1024UL)
+#define MMAP_TOTAL_SZ (1 * 1024 * 1024 * 1024UL)
static unsigned long mmap_map_size;
static unsigned long mmap_map_mask;
size_t length, off_t off)
{
int flags = 0;
- int ret = 0;
if (td_rw(td))
flags = PROT_READ | PROT_WRITE;
f->mmap_ptr = mmap(NULL, length, flags, MAP_SHARED, f->fd, off);
if (f->mmap_ptr == MAP_FAILED) {
- int err = errno;
-
f->mmap_ptr = NULL;
- td_verror(td, err, "mmap");
+ td_verror(td, errno, "mmap");
goto err;
}
}
err:
- return ret;
+ if (td->error && f->mmap_ptr)
+ munmap(f->mmap_ptr, length);
+
+ return td->error;
}
-static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u)
+/*
+ * Just mmap an appropriate portion, we cannot mmap the full extent
+ */
+static int fio_mmapio_prep_limited(struct thread_data *td, struct io_u *io_u)
{
struct fio_file *f = io_u->file;
- int ret = 0;
if (io_u->buflen > mmap_map_size) {
log_err("fio: bs too big for mmap engine\n");
- ret = EIO;
- goto err;
+ return EIO;
}
+ f->mmap_sz = mmap_map_size;
+ if (f->mmap_sz > f->io_size)
+ f->mmap_sz = f->io_size;
+
+ f->mmap_off = io_u->offset;
+
+ return fio_mmap_file(td, f, f->mmap_sz, f->mmap_off);
+}
+
+/*
+ * Attempt to mmap the entire file
+ */
+static int fio_mmapio_prep_full(struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ int ret;
+
+ if (fio_file_partial_mmap(f))
+ return EINVAL;
+
+ f->mmap_sz = f->io_size;
+ f->mmap_off = 0;
+
+ ret = fio_mmap_file(td, f, f->mmap_sz, f->mmap_off);
+ if (ret)
+ fio_file_set_partial_mmap(f);
+
+ return ret;
+}
+
+static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ int ret;
+
+ /*
+ * It fits within existing mapping, use it
+ */
if (io_u->offset >= f->mmap_off &&
io_u->offset + io_u->buflen < f->mmap_off + f->mmap_sz)
goto done;
+ /*
+ * unmap any existing mapping
+ */
if (f->mmap_ptr) {
- if (munmap(f->mmap_ptr, f->mmap_sz) < 0) {
- ret = errno;
- goto err;
- }
+ if (munmap(f->mmap_ptr, f->mmap_sz) < 0)
+ return errno;
f->mmap_ptr = NULL;
}
- f->mmap_sz = mmap_map_size;
- if (f->mmap_sz > f->io_size)
- f->mmap_sz = f->io_size;
-
- f->mmap_off = io_u->offset & ~mmap_map_mask;
- if (io_u->offset + io_u->buflen >= f->mmap_off + f->mmap_sz)
- f->mmap_off -= io_u->buflen;
+ if (fio_mmapio_prep_full(td, io_u)) {
+ td_clear_error(td);
+ ret = fio_mmapio_prep_limited(td, io_u);
+ if (ret)
+ return ret;
+ }
- ret = fio_mmap_file(td, f, f->mmap_sz, f->mmap_off);
done:
- if (!ret)
- io_u->mmap_data = f->mmap_ptr + io_u->offset - f->mmap_off -
- f->file_offset;
-err:
- return ret;
+ io_u->mmap_data = f->mmap_ptr + io_u->offset - f->mmap_off -
+ f->file_offset;
+ return 0;
}
static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
else if (io_u->ddir == DDIR_WRITE)
memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
- else if (io_u->ddir == DDIR_SYNC) {
+ else if (ddir_sync(io_u->ddir)) {
if (msync(f->mmap_ptr, f->mmap_sz, MS_SYNC)) {
io_u->error = errno;
td_verror(td, io_u->error, "msync");
/*
* not really direct, but should drop the pages from the cache
*/
- if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
+ if (td->o.odirect && !ddir_sync(io_u->ddir)) {
if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
io_u->error = errno;
td_verror(td, io_u->error, "msync");