X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=engines%2Fmmap.c;h=bc038f4febac88d68167f30e09d212942bbce582;hb=be391e20c4db6dc8d3be3af0811419741fe5fd6e;hp=fde68f1d9b0b5b6a7e3e6f404a003a02f1b4bf45;hpb=4f5af7b2370a6d3e64bc5128905c1aa8b0dc51b0;p=fio.git diff --git a/engines/mmap.c b/engines/mmap.c index fde68f1d..bc038f4f 100644 --- a/engines/mmap.c +++ b/engines/mmap.c @@ -15,18 +15,23 @@ #include "../verify.h" /* - * Limits us to 2GB of mapped files in total + * Limits us to 1GiB of mapped files in total */ -#define MMAP_TOTAL_SZ (2 * 1024 * 1024 * 1024UL) +#define MMAP_TOTAL_SZ (1 * 1024 * 1024 * 1024UL) static unsigned long mmap_map_size; -static unsigned long mmap_map_mask; + +struct fio_mmap_data { + void *mmap_ptr; + size_t mmap_sz; + off_t mmap_off; +}; static int fio_mmap_file(struct thread_data *td, struct fio_file *f, size_t length, off_t off) { + struct fio_mmap_data *fmd = FILE_ENG_DATA(f); int flags = 0; - int ret = 0; if (td_rw(td)) flags = PROT_READ | PROT_WRITE; @@ -38,74 +43,129 @@ static int fio_mmap_file(struct thread_data *td, struct fio_file *f, } else flags = PROT_READ; - f->mmap_ptr = mmap(NULL, length, flags, MAP_SHARED, f->fd, off); - if (f->mmap_ptr == MAP_FAILED) { - int err = errno; - - f->mmap_ptr = NULL; - td_verror(td, err, "mmap"); + fmd->mmap_ptr = mmap(NULL, length, flags, MAP_SHARED, f->fd, off); + if (fmd->mmap_ptr == MAP_FAILED) { + fmd->mmap_ptr = NULL; + td_verror(td, errno, "mmap"); goto err; } if (!td_random(td)) { - if (madvise(f->mmap_ptr, length, MADV_SEQUENTIAL) < 0) { + if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_SEQUENTIAL) < 0) { td_verror(td, errno, "madvise"); goto err; } } else { - if (madvise(f->mmap_ptr, length, MADV_RANDOM) < 0) { + if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_RANDOM) < 0) { td_verror(td, errno, "madvise"); goto err; } } + if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_DONTNEED) < 0) { + td_verror(td, errno, "madvise"); + goto err; + } + +#ifdef FIO_MADV_FREE + if (f->filetype == FIO_TYPE_BLOCK) + (void) posix_madvise(fmd->mmap_ptr, fmd->mmap_sz, FIO_MADV_FREE); +#endif err: - return ret; + if (td->error && fmd->mmap_ptr) + munmap(fmd->mmap_ptr, length); + + return td->error; } -static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u) +/* + * Just mmap an appropriate portion, we cannot mmap the full extent + */ +static int fio_mmapio_prep_limited(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; - int ret = 0; + struct fio_mmap_data *fmd = FILE_ENG_DATA(f); if (io_u->buflen > mmap_map_size) { log_err("fio: bs too big for mmap engine\n"); - ret = EIO; - goto err; + return EIO; } - if (io_u->offset >= f->mmap_off && - io_u->offset + io_u->buflen < f->mmap_off + f->mmap_sz) - goto done; + fmd->mmap_sz = mmap_map_size; + if (fmd->mmap_sz > f->io_size) + fmd->mmap_sz = f->io_size; - if (f->mmap_ptr) { - if (munmap(f->mmap_ptr, f->mmap_sz) < 0) { - ret = errno; - goto err; - } - f->mmap_ptr = NULL; + fmd->mmap_off = io_u->offset; + + return fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off); +} + +/* + * Attempt to mmap the entire file + */ +static int fio_mmapio_prep_full(struct thread_data *td, struct io_u *io_u) +{ + struct fio_file *f = io_u->file; + struct fio_mmap_data *fmd = FILE_ENG_DATA(f); + int ret; + + if (fio_file_partial_mmap(f)) + return EINVAL; + if (io_u->offset != (size_t) io_u->offset || + f->io_size != (size_t) f->io_size) { + fio_file_set_partial_mmap(f); + return EINVAL; } - f->mmap_sz = mmap_map_size; - if (f->mmap_sz > f->io_size) - f->mmap_sz = f->io_size; + fmd->mmap_sz = f->io_size; + fmd->mmap_off = 0; - f->mmap_off = io_u->offset & ~mmap_map_mask; - if (io_u->offset + io_u->buflen >= f->mmap_off + f->mmap_sz) - f->mmap_off -= io_u->buflen; + ret = fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off); + if (ret) + fio_file_set_partial_mmap(f); - ret = fio_mmap_file(td, f, f->mmap_sz, f->mmap_off); -done: - if (!ret) - io_u->mmap_data = f->mmap_ptr + io_u->offset - f->mmap_off - - f->file_offset; -err: return ret; } +static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u) +{ + struct fio_file *f = io_u->file; + struct fio_mmap_data *fmd = FILE_ENG_DATA(f); + int ret; + + /* + * It fits within existing mapping, use it + */ + if (io_u->offset >= fmd->mmap_off && + io_u->offset + io_u->buflen < fmd->mmap_off + fmd->mmap_sz) + goto done; + + /* + * unmap any existing mapping + */ + if (fmd->mmap_ptr) { + if (munmap(fmd->mmap_ptr, fmd->mmap_sz) < 0) + return errno; + fmd->mmap_ptr = NULL; + } + + if (fio_mmapio_prep_full(td, io_u)) { + td_clear_error(td); + ret = fio_mmapio_prep_limited(td, io_u); + if (ret) + return ret; + } + +done: + io_u->mmap_data = fmd->mmap_ptr + io_u->offset - fmd->mmap_off - + f->file_offset; + return 0; +} + static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; + struct fio_mmap_data *fmd = FILE_ENG_DATA(f); fio_ro_check(td, io_u); @@ -113,22 +173,28 @@ static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u) memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen); else if (io_u->ddir == DDIR_WRITE) memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen); - else if (io_u->ddir == DDIR_SYNC) { - if (msync(f->mmap_ptr, f->mmap_sz, MS_SYNC)) { + else if (ddir_sync(io_u->ddir)) { + if (msync(fmd->mmap_ptr, fmd->mmap_sz, MS_SYNC)) { io_u->error = errno; td_verror(td, io_u->error, "msync"); } + } else if (io_u->ddir == DDIR_TRIM) { + int ret = do_io_u_trim(td, io_u); + + if (!ret) + td_verror(td, io_u->error, "trim"); } + /* * not really direct, but should drop the pages from the cache */ - if (td->o.odirect && io_u->ddir != DDIR_SYNC) { + if (td->o.odirect && ddir_rw(io_u->ddir)) { if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) { io_u->error = errno; td_verror(td, io_u->error, "msync"); } - if (madvise(io_u->mmap_data, io_u->xfer_buflen, MADV_DONTNEED) < 0) { + if (posix_madvise(io_u->mmap_data, io_u->xfer_buflen, POSIX_MADV_DONTNEED) < 0) { io_u->error = errno; td_verror(td, io_u->error, "madvise"); } @@ -139,30 +205,58 @@ static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u) static int fio_mmapio_init(struct thread_data *td) { - unsigned long shift, mask; - - mmap_map_size = MMAP_TOTAL_SZ / td->o.nr_files; - mask = mmap_map_size; - shift = 0; - do { - mask >>= 1; - if (!mask) - break; - shift++; - } while (1); - - mmap_map_mask = 1UL << shift; + struct thread_options *o = &td->o; + + if ((o->rw_min_bs & page_mask) && + (o->odirect || o->fsync_blocks || o->fdatasync_blocks)) { + log_err("fio: mmap options dictate a minimum block size of " + "%llu bytes\n", (unsigned long long) page_size); + return 1; + } + + mmap_map_size = MMAP_TOTAL_SZ / o->nr_files; + return 0; +} + +static int fio_mmapio_open_file(struct thread_data *td, struct fio_file *f) +{ + struct fio_mmap_data *fmd; + int ret; + + ret = generic_open_file(td, f); + if (ret) + return ret; + + fmd = calloc(1, sizeof(*fmd)); + if (!fmd) { + int fio_unused __ret; + __ret = generic_close_file(td, f); + return 1; + } + + FILE_SET_ENG_DATA(f, fmd); return 0; } +static int fio_mmapio_close_file(struct thread_data *td, struct fio_file *f) +{ + struct fio_mmap_data *fmd = FILE_ENG_DATA(f); + + FILE_SET_ENG_DATA(f, NULL); + free(fmd); + fio_file_clear_partial_mmap(f); + + return generic_close_file(td, f); +} + static struct ioengine_ops ioengine = { .name = "mmap", .version = FIO_IOOPS_VERSION, .init = fio_mmapio_init, .prep = fio_mmapio_prep, .queue = fio_mmapio_queue, - .open_file = generic_open_file, - .close_file = generic_close_file, + .open_file = fio_mmapio_open_file, + .close_file = fio_mmapio_close_file, .get_file_size = generic_get_file_size, .flags = FIO_SYNCIO | FIO_NOEXTEND, };