2 * libpmem: IO engine that uses NVML libpmem to read and write data
4 * Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License,
8 * version 2 as published by the Free Software Foundation..
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
20 * IO engine that uses libpmem to read and write data
25 * Other relevant settings:
28 * directory=/mnt/pmem0/
31 * direct=1 means that pmem_drain() is executed for each write operation.
32 * In contrast, direct=0 means that pmem_drain() is not executed.
34 * The pmem device must have a DAX-capable filesystem and be mounted
35 * with DAX enabled. directory must point to a mount point of DAX FS.
40 * mount -o dax /dev/pmem0 /mnt/pmem0
43 * See examples/libpmem.fio for more.
47 * By default, the libpmem engine will let the system find the libpmem.so
48 * that it uses. You can use an alternative libpmem by setting the
49 * FIO_PMEM_LIB environment variable to the full path to the desired
60 #include <sys/sysmacros.h>
65 #include "../verify.h"
68 * Limits us to 1GiB of mapped files in total to model after
69 * libpmem engine behavior
71 #define MMAP_TOTAL_SZ (1 * 1024 * 1024 * 1024UL)
73 struct fio_libpmem_data {
79 #define MEGABYTE ((uintptr_t)1 << 20)
80 #define GIGABYTE ((uintptr_t)1 << 30)
81 #define PROCMAXLEN 2048 /* maximum expected line length in /proc files */
82 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
84 static int Mmap_no_random;
85 static void *Mmap_hint;
86 static unsigned long long Mmap_align;
87 static unsigned long long Pagesize = 0;
90 * util_map_hint_align -- choose the desired mapping alignment
92 * Use 2MB/1GB page alignment only if the mapping length is at least
93 * twice as big as the page size.
96 util_map_hint_align(size_t len, size_t req_align)
100 dprint(FD_IO, "DEBUG util_map_hint_align\n" );
102 Mmap_align = Pagesize;
104 if (Mmap_align == 0) {
107 Mmap_align = si.dwAllocationGranularity;
115 else if (len >= 2 * GIGABYTE)
117 else if (len >= 4 * MEGABYTE)
118 align = 2 * MEGABYTE;
120 dprint(FD_IO, "align=%d\n", (int)align);
125 static const char *sscanf_os = "%p %p";
126 #define MAP_NORESERVE 0
127 #define OS_MAPFILE "/proc/curproc/map"
129 static const char *sscanf_os = "%p-%p";
130 #define OS_MAPFILE "/proc/self/maps"
134 * util_map_hint_unused -- use /proc to determine a hint address for mmap()
136 * This is a helper function for util_map_hint().
137 * It opens up /proc/self/maps and looks for the first unused address
138 * in the process address space that is:
139 * - greater or equal 'minaddr' argument,
140 * - large enough to hold range of given length,
141 * - aligned to the specified unit.
143 * Asking for aligned address like this will allow the DAX code to use large
144 * mappings. It is not an error if mmap() ignores the hint and chooses
147 static char * util_map_hint_unused(void *minaddr, size_t len, size_t align)
149 char *lo = NULL; /* beginning of current range in maps file */
150 char *hi = NULL; /* end of current range in maps file */
151 char *raddr = minaddr; /* ignore regions below 'minaddr' */
154 MEMORY_BASIC_INFORMATION mi;
157 char line[PROCMAXLEN]; /* for fgets() */
160 dprint(FD_IO, "DEBUG util_map_hint_unused\n");
163 /* XXX - replace sysconf() with util_get_sys_xxx() */
164 Pagesize = (unsigned long) sysconf(_SC_PAGESIZE);
169 raddr = (char *)roundup((uintptr_t)raddr, align);
172 while ((uintptr_t)raddr < UINTPTR_MAX - len) {
173 size_t ret = VirtualQuery(raddr, &mi, sizeof(mi));
175 ERR("VirtualQuery %p", raddr);
178 dprint(FD_IO, "addr %p len %zu state %d",
179 mi.BaseAddress, mi.RegionSize, mi.State);
181 if ((mi.State != MEM_FREE) || (mi.RegionSize < len)) {
182 raddr = (char *)mi.BaseAddress + mi.RegionSize;
183 raddr = (char *)roundup((uintptr_t)raddr, align);
184 dprint(FD_IO, "nearest aligned addr %p", raddr);
186 dprint(FD_IO, "unused region of size %zu found at %p",
187 mi.RegionSize, mi.BaseAddress);
188 return mi.BaseAddress;
192 dprint(FD_IO, "end of address space reached");
195 if ((fp = fopen(OS_MAPFILE, "r")) == NULL) {
196 log_err("!%s\n", OS_MAPFILE);
200 while (fgets(line, PROCMAXLEN, fp) != NULL) {
201 /* check for range line */
202 if (sscanf(line, sscanf_os, &lo, &hi) == 2) {
203 dprint(FD_IO, "%p-%p\n", lo, hi);
205 if ((uintptr_t)(lo - raddr) >= len) {
206 dprint(FD_IO, "unused region of size "
211 dprint(FD_IO, "region is too small: "
218 raddr = (char *)roundup((uintptr_t)hi, align);
219 dprint(FD_IO, "nearest aligned addr %p\n",
224 dprint(FD_IO, "end of address space reached\n");
231 * Check for a case when this is the last unused range in the address
232 * space, but is not large enough. (very unlikely)
234 if ((raddr != NULL) && (UINTPTR_MAX - (uintptr_t)raddr < len)) {
235 dprint(FD_IO, "end of address space reached");
241 dprint(FD_IO, "returning %p", raddr);
247 * util_map_hint -- determine hint address for mmap()
249 * If PMEM_MMAP_HINT environment variable is not set, we let the system to pick
250 * the randomized mapping address. Otherwise, a user-defined hint address
253 * Windows Environment:
254 * XXX - Windows doesn't support large DAX pages yet, so there is
255 * no point in aligning for the same.
257 * Except for Windows Environment:
258 * ALSR in 64-bit Linux kernel uses 28-bit of randomness for mmap
259 * (bit positions 12-39), which means the base mapping address is randomized
260 * within [0..1024GB] range, with 4KB granularity. Assuming additional
261 * 1GB alignment, it results in 1024 possible locations.
263 * Configuring the hint address via PMEM_MMAP_HINT environment variable
264 * disables address randomization. In such case, the function will search for
265 * the first unused, properly aligned region of given size, above the
268 static char * util_map_hint(size_t len, size_t req_align)
274 dprint(FD_IO, "DEBUG util_map_hint\n");
275 dprint(FD_IO, "len %zu req_align %zu\n", len, req_align);
277 /* choose the desired alignment based on the requested length */
278 align = util_map_hint_align(len, req_align);
280 e = getenv("PMEM_MMAP_HINT");
283 unsigned long long val = 0;
287 val = strtoull(e, &endp, 16);
288 if (errno || endp == e) {
289 dprint(FD_IO, "Invalid PMEM_MMAP_HINT\n");
291 Mmap_hint = (void *)val;
293 dprint(FD_IO, "PMEM_MMAP_HINT set to %p\n", Mmap_hint);
297 if (Mmap_no_random) {
298 dprint(FD_IO, "user-defined hint %p\n", (void *)Mmap_hint);
299 addr = util_map_hint_unused((void *)Mmap_hint, len, align);
302 * Create dummy mapping to find an unused region of given size.
303 * * Request for increased size for later address alignment.
305 * Windows Environment:
306 * Use MAP_NORESERVE flag to only reserve the range of pages
307 * rather than commit. We don't want the pages to be actually
308 * backed by the operating system paging file, as the swap
309 * file is usually too small to handle terabyte pools.
311 * Except for Windows Environment:
312 * Use MAP_PRIVATE with read-only access to simulate
313 * zero cost for overcommit accounting. Note: MAP_NORESERVE
314 * flag is ignored if overcommit is disabled (mode 2).
317 addr = mmap(NULL, len + align, PROT_READ,
318 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
320 addr = mmap(NULL, len + align, PROT_READ,
321 MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0);
323 if (addr != MAP_FAILED) {
324 dprint(FD_IO, "system choice %p\n", addr);
325 munmap(addr, len + align);
326 addr = (char *)roundup((uintptr_t)addr, align);
330 dprint(FD_IO, "hint %p\n", addr);
336 * This is the mmap execution function
338 static int fio_libpmem_file(struct thread_data *td, struct fio_file *f,
339 size_t length, off_t off)
341 struct fio_libpmem_data *fdd = FILE_ENG_DATA(f);
345 dprint(FD_IO, "DEBUG fio_libpmem_file\n");
348 flags = PROT_READ | PROT_WRITE;
349 else if (td_write(td)) {
352 if (td->o.verify != VERIFY_NONE)
357 dprint(FD_IO, "f->file_name = %s td->o.verify = %d \n", f->file_name,
359 dprint(FD_IO, "length = %ld flags = %d f->fd = %d off = %ld \n",
360 length, flags, f->fd,off);
362 addr = util_map_hint(length, 0);
364 fdd->libpmem_ptr = mmap(addr, length, flags, MAP_SHARED, f->fd, off);
365 if (fdd->libpmem_ptr == MAP_FAILED) {
366 fdd->libpmem_ptr = NULL;
367 td_verror(td, errno, "mmap");
370 if (td->error && fdd->libpmem_ptr)
371 munmap(fdd->libpmem_ptr, length);
377 * XXX Just mmap an appropriate portion, we cannot mmap the full extent
379 static int fio_libpmem_prep_limited(struct thread_data *td, struct io_u *io_u)
381 struct fio_file *f = io_u->file;
382 struct fio_libpmem_data *fdd = FILE_ENG_DATA(f);
384 dprint(FD_IO, "DEBUG fio_libpmem_prep_limited\n" );
386 if (io_u->buflen > f->real_file_size) {
387 log_err("libpmem: bs too big for libpmem engine\n");
391 fdd->libpmem_sz = min(MMAP_TOTAL_SZ, f->real_file_size);
392 if (fdd->libpmem_sz > f->io_size)
393 fdd->libpmem_sz = f->io_size;
395 fdd->libpmem_off = io_u->offset;
397 return fio_libpmem_file(td, f, fdd->libpmem_sz, fdd->libpmem_off);
401 * Attempt to mmap the entire file
403 static int fio_libpmem_prep_full(struct thread_data *td, struct io_u *io_u)
405 struct fio_file *f = io_u->file;
406 struct fio_libpmem_data *fdd = FILE_ENG_DATA(f);
409 dprint(FD_IO, "DEBUG fio_libpmem_prep_full\n" );
411 if (fio_file_partial_mmap(f))
414 dprint(FD_IO," f->io_size %ld : io_u->offset %lld \n",
415 f->io_size, io_u->offset);
417 if (io_u->offset != (size_t) io_u->offset ||
418 f->io_size != (size_t) f->io_size) {
419 fio_file_set_partial_mmap(f);
422 fdd->libpmem_sz = f->io_size;
423 fdd->libpmem_off = 0;
425 ret = fio_libpmem_file(td, f, fdd->libpmem_sz, fdd->libpmem_off);
427 fio_file_set_partial_mmap(f);
432 static int fio_libpmem_prep(struct thread_data *td, struct io_u *io_u)
434 struct fio_file *f = io_u->file;
435 struct fio_libpmem_data *fdd = FILE_ENG_DATA(f);
438 dprint(FD_IO, "DEBUG fio_libpmem_prep\n" );
440 * It fits within existing mapping, use it
442 dprint(FD_IO," io_u->offset %lld : fdd->libpmem_off %ld : "
443 "io_u->buflen %ld : fdd->libpmem_sz %ld\n",
444 io_u->offset, fdd->libpmem_off,
445 io_u->buflen, fdd->libpmem_sz);
447 if (io_u->offset >= fdd->libpmem_off &&
448 io_u->offset + io_u->buflen <
449 fdd->libpmem_off + fdd->libpmem_sz)
453 * unmap any existing mapping
455 if (fdd->libpmem_ptr) {
456 dprint(FD_IO,"munmap \n");
457 if (munmap(fdd->libpmem_ptr, fdd->libpmem_sz) < 0)
459 fdd->libpmem_ptr = NULL;
462 if (fio_libpmem_prep_full(td, io_u)) {
464 ret = fio_libpmem_prep_limited(td, io_u);
470 io_u->mmap_data = fdd->libpmem_ptr + io_u->offset - fdd->libpmem_off
475 static int fio_libpmem_queue(struct thread_data *td, struct io_u *io_u)
477 fio_ro_check(td, io_u);
480 dprint(FD_IO, "DEBUG fio_libpmem_queue\n");
482 switch (io_u->ddir) {
484 memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
487 dprint(FD_IO, "DEBUG mmap_data=%p, xfer_buf=%p\n",
488 io_u->mmap_data, io_u->xfer_buf );
489 dprint(FD_IO,"td->o.odirect %d \n",td->o.odirect);
490 if(td->o.odirect == 1){
491 pmem_memcpy_persist(io_u->mmap_data,
495 pmem_memcpy_nodrain(io_u->mmap_data,
502 case DDIR_SYNC_FILE_RANGE:
505 io_u->error = EINVAL;
509 return FIO_Q_COMPLETED;
512 static int fio_libpmem_init(struct thread_data *td)
514 struct thread_options *o = &td->o;
516 dprint(FD_IO,"o->rw_min_bs %d \n o->fsync_blocks %d \n o->fdatasync_blocks %d \n",
517 o->rw_min_bs,o->fsync_blocks,o->fdatasync_blocks);
518 dprint(FD_IO, "DEBUG fio_libpmem_init\n");
520 if ((o->rw_min_bs & page_mask) &&
521 (o->fsync_blocks || o->fdatasync_blocks)) {
522 log_err("libpmem: mmap options dictate a minimum block size of "
523 "%llu bytes\n", (unsigned long long) page_size);
529 static int fio_libpmem_open_file(struct thread_data *td, struct fio_file *f)
531 struct fio_libpmem_data *fdd;
534 dprint(FD_IO,"DEBUG fio_libpmem_open_file\n");
535 dprint(FD_IO,"f->io_size=%ld \n",f->io_size);
536 dprint(FD_IO,"td->o.size=%lld \n",td->o.size);
537 dprint(FD_IO,"td->o.iodepth=%d\n",td->o.iodepth);
538 dprint(FD_IO,"td->o.iodepth_batch=%d \n",td->o.iodepth_batch);
540 ret = generic_open_file(td, f);
544 fdd = calloc(1, sizeof(*fdd));
546 int fio_unused __ret;
547 __ret = generic_close_file(td, f);
551 FILE_SET_ENG_DATA(f, fdd);
556 static int fio_libpmem_close_file(struct thread_data *td, struct fio_file *f)
558 struct fio_libpmem_data *fdd = FILE_ENG_DATA(f);
560 dprint(FD_IO,"DEBUG fio_libpmem_close_file\n");
561 dprint(FD_IO,"td->o.odirect %d \n",td->o.odirect);
563 if (td->o.odirect != 1) {
564 dprint(FD_IO,"pmem_drain\n");
568 FILE_SET_ENG_DATA(f, NULL);
570 fio_file_clear_partial_mmap(f);
572 return generic_close_file(td, f);
575 static struct ioengine_ops ioengine = {
577 .version = FIO_IOOPS_VERSION,
578 .init = fio_libpmem_init,
579 .prep = fio_libpmem_prep,
580 .queue = fio_libpmem_queue,
581 .open_file = fio_libpmem_open_file,
582 .close_file = fio_libpmem_close_file,
583 .get_file_size = generic_get_file_size,
584 .flags = FIO_SYNCIO |FIO_NOEXTEND,
587 static void fio_init fio_libpmem_register(void)
589 register_ioengine(&ioengine);
592 static void fio_exit fio_libpmem_unregister(void)
594 unregister_ioengine(&ioengine);