1 // https://github.com/axboe/fio/pull/762 sample pull req for new engine
4 #include <nfsc/libnfs.h>
5 #include <nfsc/libnfs-raw.h>
6 #include <nfsc/libnfs-raw-mount.h>
9 #include "../optgroup.h"
17 struct fio_libnfs_options {
18 struct nfs_context *context;
20 // the following implements a circular queue of outstanding IOs
21 int outstanding_events; // IOs issued to libnfs, that have not returned yet
22 int prev_requested_event_index; // event last returned via fio_libnfs_event
23 int next_buffered_event; // round robin-pointer within events[]
24 int buffered_event_count; // IOs completed by libnfs faiting for FIO
25 int free_event_buffer_index; // next empty buffer
26 unsigned int queue_depth; // nfs_callback needs this info, but doesn't have fio td structure to pull it from
32 struct fio_libnfs_options *options;
35 static struct fio_option options[] = {
39 .type = FIO_OPT_STR_STORE,
40 .help = "URL in libnfs format, eg nfs://<server|ipv4|ipv6>/path[?arg=val[&arg=val]*]",
41 .off1 = offsetof(struct fio_libnfs_options, nfs_url),
42 .category = FIO_OPT_C_ENGINE,
43 .group = __FIO_OPT_G_NFS,
51 * The ->event() hook is called to match an event number with an io_u.
52 * After the core has called ->getevents() and it has returned eg 3,
53 * the ->event() hook must return the 3 events that have completed for
54 * subsequent calls to ->event() with [0-2]. Required.
56 static struct io_u *fio_libnfs_event(struct thread_data *td, int event)
58 struct fio_libnfs_options *o = td->eo;
59 struct io_u *io_u = o->events[o->next_buffered_event];
60 assert(o->events[o->next_buffered_event]);
61 o->events[o->next_buffered_event] = NULL;
62 o->next_buffered_event = (o->next_buffered_event + 1) % td->o.iodepth;
63 // validate our state machine
64 assert(o->buffered_event_count);
65 o->buffered_event_count--;
67 // assert that fio_libnfs_event is being called in sequential fashion
68 assert(event == 0 || o->prev_requested_event_index + 1 == event);
69 if (o->buffered_event_count == 0) {
70 o->prev_requested_event_index = -1;
72 o->prev_requested_event_index = event;
77 static int nfs_event_loop(struct thread_data *td, bool flush) {
78 struct fio_libnfs_options *o = td->eo;
79 struct pollfd pfds[1]; /* nfs:0 */
80 // we already have stuff queued for fio, no need to waste cpu on poll()
81 if (o->buffered_event_count) {
82 return o->buffered_event_count;
84 // fio core logic seems to stop calling this event-loop if we ever return with 0 events
85 #define SHOULD_WAIT() (o->outstanding_events == td->o.iodepth || (flush && o->outstanding_events))
88 int timeout = SHOULD_WAIT() ? -1 : 0;
90 pfds[0].fd = nfs_get_fd(o->context);
91 pfds[0].events = nfs_which_events(o->context);
92 ret = poll(&pfds[0], 1, timeout);
94 if (errno == EINTR || errno == EAGAIN) {
97 log_err("nfs: failed to poll events: %s.\n",
102 ret = nfs_service(o->context, pfds[0].revents);
104 log_err("nfs: socket is in an unrecoverable error state.\n");
107 } while (SHOULD_WAIT());
108 return o->buffered_event_count;
113 * The ->getevents() hook is used to reap completion events from an async
114 * io engine. It returns the number of completed events since the last call,
115 * which may then be retrieved by calling the ->event() hook with the event
118 static int fio_libnfs_getevents(struct thread_data *td, unsigned int min,
119 unsigned int max, const struct timespec *t)
121 return nfs_event_loop(td, false);
124 static void nfs_callback(int res, struct nfs_context *nfs, void *data,
127 struct io_u *io_u = private_data;
128 struct nfs_data *nfs_data = io_u->file->engine_data;
129 struct fio_libnfs_options *o = nfs_data->options;
131 log_err("Failed NFS operation(code:%d): %s\n", res, nfs_get_error(o->context));
133 // res is used for read math below, don't wanna pass negative there
135 } else if (io_u->ddir == DDIR_READ) {
136 memcpy(io_u->buf, data, res);
138 log_err("Got NFS EOF, this is probably not expected\n");
141 // fio uses resid to track remaining data
142 io_u->resid = io_u->xfer_buflen - res;
144 assert(!o->events[o->free_event_buffer_index]);
145 o->events[o->free_event_buffer_index] = io_u;
146 o->free_event_buffer_index = (o->free_event_buffer_index + 1) % o->queue_depth;
147 o->outstanding_events--;
148 o->buffered_event_count++;
151 static int queue_write(struct fio_libnfs_options *o, struct io_u *io_u) {
152 struct nfs_data *nfs_data = io_u->engine_data;
153 return nfs_pwrite_async(o->context, nfs_data->nfsfh,
154 io_u->offset, io_u->buflen, io_u->buf, nfs_callback,
158 static int queue_read(struct fio_libnfs_options *o, struct io_u *io_u) {
159 struct nfs_data *nfs_data = io_u->engine_data;
160 return nfs_pread_async(o->context, nfs_data->nfsfh, io_u->offset, io_u->buflen, nfs_callback, io_u);
164 * The ->queue() hook is responsible for initiating io on the io_u
165 * being passed in. If the io engine is a synchronous one, io may complete
166 * before ->queue() returns. Required.
168 * The io engine must transfer in the direction noted by io_u->ddir
169 * to the buffer pointed to by io_u->xfer_buf for as many bytes as
170 * io_u->xfer_buflen. Residual data count may be set in io_u->resid
171 * for a short read/write.
173 static enum fio_q_status fio_libnfs_queue(struct thread_data *td,
176 struct nfs_data *nfs_data = io_u->file->engine_data;
177 struct fio_libnfs_options *o = nfs_data->options;
178 struct nfs_context *nfs = o->context;
180 enum fio_q_status ret = FIO_Q_QUEUED;
182 io_u->engine_data = nfs_data;
185 err = queue_write(o, io_u);
188 err = queue_read(o, io_u);
191 log_err("nfs: trim is not supported");
195 log_err("nfs: unhandled io %d\n", io_u->ddir);
199 log_err("nfs: Failed to queue nfs op: %s\n", nfs_get_error(nfs));
201 return FIO_Q_COMPLETED;
203 o->outstanding_events++;
207 /** Do a mount if one has not been done before */
208 static int do_mount(struct thread_data *td, const char *url)
210 size_t event_size = sizeof(struct io_u **) * td->o.iodepth;
211 struct fio_libnfs_options *options = td->eo;
212 struct nfs_url *nfs_url = NULL;
215 char *mnt_dir = NULL;
217 if (options->context) {
221 options->context = nfs_init_context();
222 if (options->context == NULL) {
223 log_err("nfs: failed to init nfs context\n");
227 options->events = malloc(event_size);
228 memset(options->events, 0, event_size);
230 options->prev_requested_event_index = -1;
231 options->queue_depth = td->o.iodepth;
233 nfs_url = nfs_parse_url_full(options->context, url);
234 path_len = strlen(nfs_url->path);
235 mnt_dir = malloc(path_len + strlen(nfs_url->file) + 1);
236 strcpy(mnt_dir, nfs_url->path);
237 strcpy(mnt_dir + strlen(nfs_url->path), nfs_url->file);
238 ret = nfs_mount(options->context, nfs_url->server, mnt_dir);
240 nfs_destroy_url(nfs_url);
245 * The init function is called once per thread/process, and should set up
246 * any structures that this io engine requires to keep track of io. Not
249 static int fio_libnfs_setup(struct thread_data *td)
251 // flipping this makes using gdb easier, but tends to hang fio on exit
252 td->o.use_thread = 0;
257 * This is paired with the ->init() function and is called when a thread is
258 * done doing io. Should tear down anything setup by the ->init() function.
261 static void fio_libnfs_cleanup(struct thread_data *td)
263 struct fio_libnfs_options *o = td->eo;
264 nfs_umount(o->context);
265 nfs_destroy_context(o->context);
269 static int fio_libnfs_open(struct thread_data *td, struct fio_file *f)
272 struct fio_libnfs_options *options = td->eo;
273 struct nfs_data *nfs_data = NULL;
276 if (!options->nfs_url) {
277 log_err("nfs: nfs_url is a required parameter\n");
281 ret = do_mount(td, options->nfs_url);
284 log_err("nfs: Failed to mount %s with code %d: %s\n", options->nfs_url, ret, nfs_get_error(options->context));
287 nfs_data = malloc(sizeof(struct nfs_data));
288 memset(nfs_data, 0, sizeof(struct nfs_data));
289 nfs_data->options = options;
291 if (td->o.td_ddir == TD_DDIR_WRITE) {
292 flags |= O_CREAT | O_RDWR;
296 ret = nfs_open(options->context, f->file_name, flags, &nfs_data->nfsfh);
299 log_err("Failed to open %s: %s\n", f->file_name, nfs_get_error(options->context));
301 f->engine_data = nfs_data;
305 static int fio_libnfs_close(struct thread_data *td, struct fio_file *f)
307 struct nfs_data *nfs_data = f->engine_data;
308 struct fio_libnfs_options *o = nfs_data->options;
310 if (nfs_data->nfsfh) {
311 ret = nfs_close(o->context, nfs_data->nfsfh);
314 f->engine_data = NULL;
319 * Hook for writing out outstanding data.
321 static int fio_libnfs_commit(struct thread_data *td) {
322 nfs_event_loop(td, true);
326 struct ioengine_ops ioengine = {
328 .version = FIO_IOOPS_VERSION,
329 .setup = fio_libnfs_setup,
330 .queue = fio_libnfs_queue,
331 .getevents = fio_libnfs_getevents,
332 .event = fio_libnfs_event,
333 .cleanup = fio_libnfs_cleanup,
334 .open_file = fio_libnfs_open,
335 .close_file = fio_libnfs_close,
336 .commit = fio_libnfs_commit,
337 .flags = FIO_DISKLESSIO | FIO_NOEXTEND | FIO_NODISKUTIL,
339 .option_struct_size = sizeof(struct fio_libnfs_options),
342 static void fio_init fio_nfs_register(void)
344 register_ioengine(&ioengine);
347 static void fio_exit fio_nfs_unregister(void)
349 unregister_ioengine(&ioengine);