NFS configure fixes
[fio.git] / engines / nfs.c
CommitLineData
9326926b
TG
1// https://github.com/axboe/fio/pull/762 sample pull req for new engine
2#include <stdlib.h>
3#include <poll.h>
4#include <nfsc/libnfs.h>
5#include <nfsc/libnfs-raw.h>
6#include <nfsc/libnfs-raw-mount.h>
7
8#include "../fio.h"
9#include "../optgroup.h"
10
11enum nfs_op_type {
12 NFS_READ_WRITE = 0,
13 NFS_STAT_MKDIR_RMDIR,
14 NFS_STAT_TOUCH_RM,
15};
16
17struct fio_libnfs_options {
18 struct nfs_context *context;
19 char *nfs_url;
20 // the following implements a circular queue of outstanding IOs
21 int outstanding_events; // IOs issued to libnfs, that have not returned yet
22 int prev_requested_event_index; // event last returned via fio_libnfs_event
23 int next_buffered_event; // round robin-pointer within events[]
24 int buffered_event_count; // IOs completed by libnfs faiting for FIO
25 int free_event_buffer_index; // next empty buffer
26 unsigned int queue_depth; // nfs_callback needs this info, but doesn't have fio td structure to pull it from
27 struct io_u**events;
28};
29
30struct nfs_data {
31 struct nfsfh *nfsfh;
32 struct fio_libnfs_options *options;
33};
34
35static struct fio_option options[] = {
36 {
37 .name = "nfs_url",
38 .lname = "nfs_url",
39 .type = FIO_OPT_STR_STORE,
40 .help = "URL in libnfs format, eg nfs://<server|ipv4|ipv6>/path[?arg=val[&arg=val]*]",
41 .off1 = offsetof(struct fio_libnfs_options, nfs_url),
42 .category = FIO_OPT_C_ENGINE,
43 .group = __FIO_OPT_G_NFS,
44 },
45 {
46 .name = NULL,
47 },
48};
49
50/*
51 * The ->event() hook is called to match an event number with an io_u.
52 * After the core has called ->getevents() and it has returned eg 3,
53 * the ->event() hook must return the 3 events that have completed for
54 * subsequent calls to ->event() with [0-2]. Required.
55 */
56static struct io_u *fio_libnfs_event(struct thread_data *td, int event)
57{
58 struct fio_libnfs_options *o = td->eo;
59 struct io_u *io_u = o->events[o->next_buffered_event];
60 assert(o->events[o->next_buffered_event]);
61 o->events[o->next_buffered_event] = NULL;
62 o->next_buffered_event = (o->next_buffered_event + 1) % td->o.iodepth;
63 // validate our state machine
64 assert(o->buffered_event_count);
65 o->buffered_event_count--;
66 assert(io_u);
67 // assert that fio_libnfs_event is being called in sequential fashion
68 assert(event == 0 || o->prev_requested_event_index + 1 == event);
69 if (o->buffered_event_count == 0) {
70 o->prev_requested_event_index = -1;
71 } else {
72 o->prev_requested_event_index = event;
73 }
74 return io_u;
75}
76
77static int nfs_event_loop(struct thread_data *td, bool flush) {
78 struct fio_libnfs_options *o = td->eo;
79 struct pollfd pfds[1]; /* nfs:0 */
80 // we already have stuff queued for fio, no need to waste cpu on poll()
81 if (o->buffered_event_count) {
82 return o->buffered_event_count;
83 }
84 // fio core logic seems to stop calling this event-loop if we ever return with 0 events
85 #define SHOULD_WAIT() (o->outstanding_events == td->o.iodepth || (flush && o->outstanding_events))
86
87 do {
88 int timeout = SHOULD_WAIT() ? -1 : 0;
89 int ret = 0;
90 pfds[0].fd = nfs_get_fd(o->context);
91 pfds[0].events = nfs_which_events(o->context);
92 ret = poll(&pfds[0], 1, timeout);
93 if (ret < 0) {
94 if (errno == EINTR || errno == EAGAIN) {
95 continue;
96 }
97 log_err("nfs: failed to poll events: %s.\n",
98 strerror(errno));
99 break;
100 }
101
102 ret = nfs_service(o->context, pfds[0].revents);
103 if (ret < 0) {
104 log_err("nfs: socket is in an unrecoverable error state.\n");
105 break;
106 }
107 } while (SHOULD_WAIT());
108 return o->buffered_event_count;
109}
110#undef SHOULD_WAIT
111
112/*
113 * The ->getevents() hook is used to reap completion events from an async
114 * io engine. It returns the number of completed events since the last call,
115 * which may then be retrieved by calling the ->event() hook with the event
116 * numbers. Required.
117 */
118static int fio_libnfs_getevents(struct thread_data *td, unsigned int min,
119 unsigned int max, const struct timespec *t)
120{
121 return nfs_event_loop(td, false);
122}
123
124static void nfs_callback(int res, struct nfs_context *nfs, void *data,
125 void *private_data)
126{
127 struct io_u *io_u = private_data;
128 struct nfs_data *nfs_data = io_u->file->engine_data;
129 struct fio_libnfs_options *o = nfs_data->options;
130 if (res < 0) {
131 log_err("Failed NFS operation(code:%d): %s\n", res, nfs_get_error(o->context));
132 io_u->error = -res;
133 // res is used for read math below, don't wanna pass negative there
134 res = 0;
135 } else if (io_u->ddir == DDIR_READ) {
136 memcpy(io_u->buf, data, res);
137 if (res == 0) {
138 log_err("Got NFS EOF, this is probably not expected\n");
139 }
140 }
141 // fio uses resid to track remaining data
142 io_u->resid = io_u->xfer_buflen - res;
143
144 assert(!o->events[o->free_event_buffer_index]);
145 o->events[o->free_event_buffer_index] = io_u;
146 o->free_event_buffer_index = (o->free_event_buffer_index + 1) % o->queue_depth;
147 o->outstanding_events--;
148 o->buffered_event_count++;
149}
150
151static int queue_write(struct fio_libnfs_options *o, struct io_u *io_u) {
152 struct nfs_data *nfs_data = io_u->engine_data;
153 return nfs_pwrite_async(o->context, nfs_data->nfsfh,
154 io_u->offset, io_u->buflen, io_u->buf, nfs_callback,
155 io_u);
156}
157
158static int queue_read(struct fio_libnfs_options *o, struct io_u *io_u) {
159 struct nfs_data *nfs_data = io_u->engine_data;
160 return nfs_pread_async(o->context, nfs_data->nfsfh, io_u->offset, io_u->buflen, nfs_callback, io_u);
161}
162
163/*
164 * The ->queue() hook is responsible for initiating io on the io_u
165 * being passed in. If the io engine is a synchronous one, io may complete
166 * before ->queue() returns. Required.
167 *
168 * The io engine must transfer in the direction noted by io_u->ddir
169 * to the buffer pointed to by io_u->xfer_buf for as many bytes as
170 * io_u->xfer_buflen. Residual data count may be set in io_u->resid
171 * for a short read/write.
172 */
173static enum fio_q_status fio_libnfs_queue(struct thread_data *td,
174 struct io_u *io_u)
175{
176 struct nfs_data *nfs_data = io_u->file->engine_data;
177 struct fio_libnfs_options *o = nfs_data->options;
178 struct nfs_context *nfs = o->context;
179 int err;
180 enum fio_q_status ret = FIO_Q_QUEUED;
181
182 io_u->engine_data = nfs_data;
183 switch(io_u->ddir) {
184 case DDIR_WRITE:
185 err = queue_write(o, io_u);
186 break;
187 case DDIR_READ:
188 err = queue_read(o, io_u);
189 break;
190 case DDIR_TRIM:
191 log_err("nfs: trim is not supported");
192 err = -1;
193 break;
194 default:
195 log_err("nfs: unhandled io %d\n", io_u->ddir);
196 err = -1;
197 }
198 if (err) {
199 log_err("nfs: Failed to queue nfs op: %s\n", nfs_get_error(nfs));
200 td->error = 1;
201 return FIO_Q_COMPLETED;
202 }
203 o->outstanding_events++;
204 return ret;
205}
206
207/** Do a mount if one has not been done before */
208static int do_mount(struct thread_data *td, const char *url)
209{
210 size_t event_size = sizeof(struct io_u **) * td->o.iodepth;
211 struct fio_libnfs_options *options = td->eo;
212 struct nfs_url *nfs_url = NULL;
213 int ret = 0;
214 int path_len = 0;
215 char *mnt_dir = NULL;
216
217 if (options->context) {
218 return 0;
219 }
220
221 options->context = nfs_init_context();
222 if (options->context == NULL) {
223 log_err("nfs: failed to init nfs context\n");
224 return -1;
225 }
226
227 options->events = malloc(event_size);
228 memset(options->events, 0, event_size);
229
230 options->prev_requested_event_index = -1;
231 options->queue_depth = td->o.iodepth;
232
233 nfs_url = nfs_parse_url_full(options->context, url);
234 path_len = strlen(nfs_url->path);
235 mnt_dir = malloc(path_len + strlen(nfs_url->file) + 1);
236 strcpy(mnt_dir, nfs_url->path);
237 strcpy(mnt_dir + strlen(nfs_url->path), nfs_url->file);
238 ret = nfs_mount(options->context, nfs_url->server, mnt_dir);
239 free(mnt_dir);
240 nfs_destroy_url(nfs_url);
241 return ret;
242}
243
244/*
245 * The init function is called once per thread/process, and should set up
246 * any structures that this io engine requires to keep track of io. Not
247 * required.
248 */
249static int fio_libnfs_setup(struct thread_data *td)
250{
251 // flipping this makes using gdb easier, but tends to hang fio on exit
252 td->o.use_thread = 0;
253 return 0;
254}
255
256/*
257 * This is paired with the ->init() function and is called when a thread is
258 * done doing io. Should tear down anything setup by the ->init() function.
259 * Not required.
260 */
261static void fio_libnfs_cleanup(struct thread_data *td)
262{
263 struct fio_libnfs_options *o = td->eo;
264 nfs_umount(o->context);
265 nfs_destroy_context(o->context);
266 free(o->events);
267}
268
269static int fio_libnfs_open(struct thread_data *td, struct fio_file *f)
270{
271 int ret;
272 struct fio_libnfs_options *options = td->eo;
273 struct nfs_data *nfs_data = NULL;
274 int flags = 0;
275
276 if (!options->nfs_url) {
277 log_err("nfs: nfs_url is a required parameter\n");
278 return -1;
279 }
280
281 ret = do_mount(td, options->nfs_url);
282
283 if (ret != 0) {
284 log_err("nfs: Failed to mount %s with code %d: %s\n", options->nfs_url, ret, nfs_get_error(options->context));
285 return ret;
286 }
287 nfs_data = malloc(sizeof(struct nfs_data));
288 memset(nfs_data, 0, sizeof(struct nfs_data));
289 nfs_data->options = options;
290
291 if (td->o.td_ddir == TD_DDIR_WRITE) {
292 flags |= O_CREAT | O_RDWR;
293 } else {
294 flags |= O_RDWR;
295 }
296 ret = nfs_open(options->context, f->file_name, flags, &nfs_data->nfsfh);
297
298 if (ret != 0) {
299 log_err("Failed to open %s: %s\n", f->file_name, nfs_get_error(options->context));
300 }
301 f->engine_data = nfs_data;
302 return ret;
303}
304
305static int fio_libnfs_close(struct thread_data *td, struct fio_file *f)
306{
307 struct nfs_data *nfs_data = f->engine_data;
308 struct fio_libnfs_options *o = nfs_data->options;
309 int ret = 0;
310 if (nfs_data->nfsfh) {
311 ret = nfs_close(o->context, nfs_data->nfsfh);
312 }
313 free(nfs_data);
314 f->engine_data = NULL;
315 return ret;
316}
317
318/*
319 * Hook for writing out outstanding data.
320 */
321static int fio_libnfs_commit(struct thread_data *td) {
322 nfs_event_loop(td, true);
323 return 0;
324}
325
326struct ioengine_ops ioengine = {
327 .name = "nfs",
328 .version = FIO_IOOPS_VERSION,
329 .setup = fio_libnfs_setup,
330 .queue = fio_libnfs_queue,
331 .getevents = fio_libnfs_getevents,
332 .event = fio_libnfs_event,
333 .cleanup = fio_libnfs_cleanup,
334 .open_file = fio_libnfs_open,
335 .close_file = fio_libnfs_close,
336 .commit = fio_libnfs_commit,
337 .flags = FIO_DISKLESSIO | FIO_NOEXTEND | FIO_NODISKUTIL,
338 .options = options,
339 .option_struct_size = sizeof(struct fio_libnfs_options),
340};
341
342static void fio_init fio_nfs_register(void)
343{
344 register_ioengine(&ioengine);
345}
346
347static void fio_exit fio_nfs_unregister(void)
348{
349 unregister_ioengine(&ioengine);
350}
351