3 #include <nfsc/libnfs.h>
4 #include <nfsc/libnfs-raw.h>
5 #include <nfsc/libnfs-raw-mount.h>
8 #include "../optgroup.h"
16 struct fio_libnfs_options {
17 struct nfs_context *context;
19 unsigned int queue_depth; /* nfs_callback needs this info, but doesn't have fio td structure to pull it from */
20 /* the following implement a circular queue of outstanding IOs */
21 int outstanding_events; /* IOs issued to libnfs, that have not returned yet */
22 int prev_requested_event_index; /* event last returned via fio_libnfs_event */
23 int next_buffered_event; /* round robin-pointer within events[] */
24 int buffered_event_count; /* IOs completed by libnfs, waiting for FIO */
25 int free_event_buffer_index; /* next free buffer */
31 struct fio_libnfs_options *options;
34 static struct fio_option options[] = {
38 .type = FIO_OPT_STR_STORE,
39 .help = "URL in libnfs format, eg nfs://<server|ipv4|ipv6>/path[?arg=val[&arg=val]*]",
40 .off1 = offsetof(struct fio_libnfs_options, nfs_url),
41 .category = FIO_OPT_C_ENGINE,
42 .group = __FIO_OPT_G_NFS,
49 static struct io_u *fio_libnfs_event(struct thread_data *td, int event)
51 struct fio_libnfs_options *o = td->eo;
52 struct io_u *io_u = o->events[o->next_buffered_event];
53 assert(o->events[o->next_buffered_event]);
54 o->events[o->next_buffered_event] = NULL;
55 o->next_buffered_event = (o->next_buffered_event + 1) % td->o.iodepth;
56 /* validate our state machine */
57 assert(o->buffered_event_count);
58 o->buffered_event_count--;
60 /* assert that fio_libnfs_event is being called in sequential fashion */
61 assert(event == 0 || o->prev_requested_event_index + 1 == event);
62 if (o->buffered_event_count == 0) {
63 o->prev_requested_event_index = -1;
65 o->prev_requested_event_index = event;
70 static int nfs_event_loop(struct thread_data *td, bool flush) {
71 struct fio_libnfs_options *o = td->eo;
72 struct pollfd pfds[1]; /* nfs:0 */
73 /* we already have stuff queued for fio, no need to waste cpu on poll() */
74 if (o->buffered_event_count)
75 return o->buffered_event_count;
76 /* fio core logic seems to stop calling this event-loop if we ever return with 0 events */
77 #define SHOULD_WAIT() (o->outstanding_events == td->o.iodepth || (flush && o->outstanding_events))
80 int timeout = SHOULD_WAIT() ? -1 : 0;
82 pfds[0].fd = nfs_get_fd(o->context);
83 pfds[0].events = nfs_which_events(o->context);
84 ret = poll(&pfds[0], 1, timeout);
86 if (errno == EINTR || errno == EAGAIN) {
89 log_err("nfs: failed to poll events: %s.\n",
94 ret = nfs_service(o->context, pfds[0].revents);
96 log_err("nfs: socket is in an unrecoverable error state.\n");
99 } while (SHOULD_WAIT());
100 return o->buffered_event_count;
104 static int fio_libnfs_getevents(struct thread_data *td, unsigned int min,
105 unsigned int max, const struct timespec *t)
107 return nfs_event_loop(td, false);
110 static void nfs_callback(int res, struct nfs_context *nfs, void *data,
113 struct io_u *io_u = private_data;
114 struct nfs_data *nfs_data = io_u->file->engine_data;
115 struct fio_libnfs_options *o = nfs_data->options;
117 log_err("Failed NFS operation(code:%d): %s\n", res, nfs_get_error(o->context));
119 /* res is used for read math below, don't wanna pass negative there */
121 } else if (io_u->ddir == DDIR_READ) {
122 memcpy(io_u->buf, data, res);
124 log_err("Got NFS EOF, this is probably not expected\n");
126 /* fio uses resid to track remaining data */
127 io_u->resid = io_u->xfer_buflen - res;
129 assert(!o->events[o->free_event_buffer_index]);
130 o->events[o->free_event_buffer_index] = io_u;
131 o->free_event_buffer_index = (o->free_event_buffer_index + 1) % o->queue_depth;
132 o->outstanding_events--;
133 o->buffered_event_count++;
136 static int queue_write(struct fio_libnfs_options *o, struct io_u *io_u) {
137 struct nfs_data *nfs_data = io_u->engine_data;
138 return nfs_pwrite_async(o->context, nfs_data->nfsfh,
139 io_u->offset, io_u->buflen, io_u->buf, nfs_callback,
143 static int queue_read(struct fio_libnfs_options *o, struct io_u *io_u) {
144 struct nfs_data *nfs_data = io_u->engine_data;
145 return nfs_pread_async(o->context, nfs_data->nfsfh, io_u->offset, io_u->buflen, nfs_callback, io_u);
148 static enum fio_q_status fio_libnfs_queue(struct thread_data *td,
151 struct nfs_data *nfs_data = io_u->file->engine_data;
152 struct fio_libnfs_options *o = nfs_data->options;
153 struct nfs_context *nfs = o->context;
155 enum fio_q_status ret = FIO_Q_QUEUED;
157 io_u->engine_data = nfs_data;
160 err = queue_write(o, io_u);
163 err = queue_read(o, io_u);
166 log_err("nfs: trim is not supported");
170 log_err("nfs: unhandled io %d\n", io_u->ddir);
174 log_err("nfs: Failed to queue nfs op: %s\n", nfs_get_error(nfs));
176 return FIO_Q_COMPLETED;
178 o->outstanding_events++;
183 * Do a mount if one has not been done before
185 static int do_mount(struct thread_data *td, const char *url)
187 size_t event_size = sizeof(struct io_u **) * td->o.iodepth;
188 struct fio_libnfs_options *options = td->eo;
189 struct nfs_url *nfs_url = NULL;
192 char *mnt_dir = NULL;
194 if (options->context)
197 options->context = nfs_init_context();
198 if (options->context == NULL) {
199 log_err("nfs: failed to init nfs context\n");
203 options->events = malloc(event_size);
204 memset(options->events, 0, event_size);
206 options->prev_requested_event_index = -1;
207 options->queue_depth = td->o.iodepth;
209 nfs_url = nfs_parse_url_full(options->context, url);
210 path_len = strlen(nfs_url->path);
211 mnt_dir = malloc(path_len + strlen(nfs_url->file) + 1);
212 strcpy(mnt_dir, nfs_url->path);
213 strcpy(mnt_dir + strlen(nfs_url->path), nfs_url->file);
214 ret = nfs_mount(options->context, nfs_url->server, mnt_dir);
216 nfs_destroy_url(nfs_url);
220 static int fio_libnfs_setup(struct thread_data *td)
222 /* Using threads with libnfs causes fio to hang on exit, lower performance */
223 td->o.use_thread = 0;
227 static void fio_libnfs_cleanup(struct thread_data *td)
229 struct fio_libnfs_options *o = td->eo;
230 nfs_umount(o->context);
231 nfs_destroy_context(o->context);
235 static int fio_libnfs_open(struct thread_data *td, struct fio_file *f)
238 struct fio_libnfs_options *options = td->eo;
239 struct nfs_data *nfs_data = NULL;
242 if (!options->nfs_url) {
243 log_err("nfs: nfs_url is a required parameter\n");
247 ret = do_mount(td, options->nfs_url);
250 log_err("nfs: Failed to mount %s with code %d: %s\n", options->nfs_url, ret, nfs_get_error(options->context));
253 nfs_data = malloc(sizeof(struct nfs_data));
254 memset(nfs_data, 0, sizeof(struct nfs_data));
255 nfs_data->options = options;
257 if (td->o.td_ddir == TD_DDIR_WRITE) {
258 flags |= O_CREAT | O_RDWR;
262 ret = nfs_open(options->context, f->file_name, flags, &nfs_data->nfsfh);
265 log_err("Failed to open %s: %s\n", f->file_name, nfs_get_error(options->context));
266 f->engine_data = nfs_data;
270 static int fio_libnfs_close(struct thread_data *td, struct fio_file *f)
272 struct nfs_data *nfs_data = f->engine_data;
273 struct fio_libnfs_options *o = nfs_data->options;
276 ret = nfs_close(o->context, nfs_data->nfsfh);
278 f->engine_data = NULL;
282 struct ioengine_ops ioengine = {
284 .version = FIO_IOOPS_VERSION,
285 .setup = fio_libnfs_setup,
286 .queue = fio_libnfs_queue,
287 .getevents = fio_libnfs_getevents,
288 .event = fio_libnfs_event,
289 .cleanup = fio_libnfs_cleanup,
290 .open_file = fio_libnfs_open,
291 .close_file = fio_libnfs_close,
292 .flags = FIO_DISKLESSIO | FIO_NOEXTEND | FIO_NODISKUTIL,
294 .option_struct_size = sizeof(struct fio_libnfs_options),
297 static void fio_init fio_nfs_register(void)
299 register_ioengine(&ioengine);
302 static void fio_exit fio_nfs_unregister(void)
304 unregister_ioengine(&ioengine);