single line bodies
[fio.git] / engines / nfs.c
CommitLineData
9326926b
TG
1#include <stdlib.h>
2#include <poll.h>
3#include <nfsc/libnfs.h>
4#include <nfsc/libnfs-raw.h>
5#include <nfsc/libnfs-raw-mount.h>
6
7#include "../fio.h"
8#include "../optgroup.h"
9
10enum nfs_op_type {
11 NFS_READ_WRITE = 0,
12 NFS_STAT_MKDIR_RMDIR,
13 NFS_STAT_TOUCH_RM,
14};
15
16struct fio_libnfs_options {
17 struct nfs_context *context;
18 char *nfs_url;
1fb2bc2f
TG
19 unsigned int queue_depth; /* nfs_callback needs this info, but doesn't have fio td structure to pull it from */
20 /* the following implement a circular queue of outstanding IOs */
21 int outstanding_events; /* IOs issued to libnfs, that have not returned yet */
22 int prev_requested_event_index; /* event last returned via fio_libnfs_event */
23 int next_buffered_event; /* round robin-pointer within events[] */
24 int buffered_event_count; /* IOs completed by libnfs, waiting for FIO */
25 int free_event_buffer_index; /* next free buffer */
9326926b
TG
26 struct io_u**events;
27};
28
29struct nfs_data {
30 struct nfsfh *nfsfh;
31 struct fio_libnfs_options *options;
32};
33
34static struct fio_option options[] = {
35 {
36 .name = "nfs_url",
37 .lname = "nfs_url",
38 .type = FIO_OPT_STR_STORE,
39 .help = "URL in libnfs format, eg nfs://<server|ipv4|ipv6>/path[?arg=val[&arg=val]*]",
40 .off1 = offsetof(struct fio_libnfs_options, nfs_url),
41 .category = FIO_OPT_C_ENGINE,
42 .group = __FIO_OPT_G_NFS,
43 },
44 {
45 .name = NULL,
46 },
47};
48
49/*
50 * The ->event() hook is called to match an event number with an io_u.
51 * After the core has called ->getevents() and it has returned eg 3,
52 * the ->event() hook must return the 3 events that have completed for
53 * subsequent calls to ->event() with [0-2]. Required.
54 */
55static struct io_u *fio_libnfs_event(struct thread_data *td, int event)
56{
57 struct fio_libnfs_options *o = td->eo;
58 struct io_u *io_u = o->events[o->next_buffered_event];
59 assert(o->events[o->next_buffered_event]);
60 o->events[o->next_buffered_event] = NULL;
61 o->next_buffered_event = (o->next_buffered_event + 1) % td->o.iodepth;
1fb2bc2f 62 /* validate our state machine */
9326926b
TG
63 assert(o->buffered_event_count);
64 o->buffered_event_count--;
65 assert(io_u);
1fb2bc2f 66 /* assert that fio_libnfs_event is being called in sequential fashion */
9326926b
TG
67 assert(event == 0 || o->prev_requested_event_index + 1 == event);
68 if (o->buffered_event_count == 0) {
69 o->prev_requested_event_index = -1;
70 } else {
71 o->prev_requested_event_index = event;
72 }
73 return io_u;
74}
75
76static int nfs_event_loop(struct thread_data *td, bool flush) {
77 struct fio_libnfs_options *o = td->eo;
78 struct pollfd pfds[1]; /* nfs:0 */
1fb2bc2f 79 /* we already have stuff queued for fio, no need to waste cpu on poll() */
7654a8d5 80 if (o->buffered_event_count)
9326926b 81 return o->buffered_event_count;
1fb2bc2f 82 /* fio core logic seems to stop calling this event-loop if we ever return with 0 events */
9326926b
TG
83 #define SHOULD_WAIT() (o->outstanding_events == td->o.iodepth || (flush && o->outstanding_events))
84
85 do {
86 int timeout = SHOULD_WAIT() ? -1 : 0;
87 int ret = 0;
88 pfds[0].fd = nfs_get_fd(o->context);
89 pfds[0].events = nfs_which_events(o->context);
90 ret = poll(&pfds[0], 1, timeout);
91 if (ret < 0) {
92 if (errno == EINTR || errno == EAGAIN) {
93 continue;
94 }
95 log_err("nfs: failed to poll events: %s.\n",
96 strerror(errno));
97 break;
98 }
99
100 ret = nfs_service(o->context, pfds[0].revents);
101 if (ret < 0) {
102 log_err("nfs: socket is in an unrecoverable error state.\n");
103 break;
104 }
105 } while (SHOULD_WAIT());
106 return o->buffered_event_count;
9326926b 107#undef SHOULD_WAIT
7654a8d5 108}
9326926b
TG
109
110/*
111 * The ->getevents() hook is used to reap completion events from an async
112 * io engine. It returns the number of completed events since the last call,
113 * which may then be retrieved by calling the ->event() hook with the event
114 * numbers. Required.
115 */
116static int fio_libnfs_getevents(struct thread_data *td, unsigned int min,
117 unsigned int max, const struct timespec *t)
118{
119 return nfs_event_loop(td, false);
120}
121
122static void nfs_callback(int res, struct nfs_context *nfs, void *data,
123 void *private_data)
124{
125 struct io_u *io_u = private_data;
126 struct nfs_data *nfs_data = io_u->file->engine_data;
127 struct fio_libnfs_options *o = nfs_data->options;
128 if (res < 0) {
129 log_err("Failed NFS operation(code:%d): %s\n", res, nfs_get_error(o->context));
130 io_u->error = -res;
1fb2bc2f 131 /* res is used for read math below, don't wanna pass negative there */
9326926b
TG
132 res = 0;
133 } else if (io_u->ddir == DDIR_READ) {
134 memcpy(io_u->buf, data, res);
7654a8d5 135 if (res == 0)
9326926b 136 log_err("Got NFS EOF, this is probably not expected\n");
9326926b 137 }
1fb2bc2f 138 /* fio uses resid to track remaining data */
9326926b
TG
139 io_u->resid = io_u->xfer_buflen - res;
140
141 assert(!o->events[o->free_event_buffer_index]);
142 o->events[o->free_event_buffer_index] = io_u;
143 o->free_event_buffer_index = (o->free_event_buffer_index + 1) % o->queue_depth;
144 o->outstanding_events--;
145 o->buffered_event_count++;
146}
147
148static int queue_write(struct fio_libnfs_options *o, struct io_u *io_u) {
149 struct nfs_data *nfs_data = io_u->engine_data;
150 return nfs_pwrite_async(o->context, nfs_data->nfsfh,
151 io_u->offset, io_u->buflen, io_u->buf, nfs_callback,
152 io_u);
153}
154
155static int queue_read(struct fio_libnfs_options *o, struct io_u *io_u) {
156 struct nfs_data *nfs_data = io_u->engine_data;
157 return nfs_pread_async(o->context, nfs_data->nfsfh, io_u->offset, io_u->buflen, nfs_callback, io_u);
158}
159
160/*
161 * The ->queue() hook is responsible for initiating io on the io_u
162 * being passed in. If the io engine is a synchronous one, io may complete
163 * before ->queue() returns. Required.
164 *
165 * The io engine must transfer in the direction noted by io_u->ddir
166 * to the buffer pointed to by io_u->xfer_buf for as many bytes as
167 * io_u->xfer_buflen. Residual data count may be set in io_u->resid
168 * for a short read/write.
169 */
170static enum fio_q_status fio_libnfs_queue(struct thread_data *td,
171 struct io_u *io_u)
172{
173 struct nfs_data *nfs_data = io_u->file->engine_data;
174 struct fio_libnfs_options *o = nfs_data->options;
175 struct nfs_context *nfs = o->context;
176 int err;
177 enum fio_q_status ret = FIO_Q_QUEUED;
178
179 io_u->engine_data = nfs_data;
180 switch(io_u->ddir) {
181 case DDIR_WRITE:
182 err = queue_write(o, io_u);
183 break;
184 case DDIR_READ:
185 err = queue_read(o, io_u);
186 break;
187 case DDIR_TRIM:
188 log_err("nfs: trim is not supported");
189 err = -1;
190 break;
191 default:
192 log_err("nfs: unhandled io %d\n", io_u->ddir);
193 err = -1;
194 }
195 if (err) {
196 log_err("nfs: Failed to queue nfs op: %s\n", nfs_get_error(nfs));
197 td->error = 1;
198 return FIO_Q_COMPLETED;
199 }
200 o->outstanding_events++;
201 return ret;
202}
203
204/** Do a mount if one has not been done before */
205static int do_mount(struct thread_data *td, const char *url)
206{
207 size_t event_size = sizeof(struct io_u **) * td->o.iodepth;
208 struct fio_libnfs_options *options = td->eo;
209 struct nfs_url *nfs_url = NULL;
210 int ret = 0;
211 int path_len = 0;
212 char *mnt_dir = NULL;
213
7654a8d5 214 if (options->context)
9326926b 215 return 0;
9326926b
TG
216
217 options->context = nfs_init_context();
218 if (options->context == NULL) {
219 log_err("nfs: failed to init nfs context\n");
220 return -1;
221 }
222
223 options->events = malloc(event_size);
224 memset(options->events, 0, event_size);
225
226 options->prev_requested_event_index = -1;
227 options->queue_depth = td->o.iodepth;
228
229 nfs_url = nfs_parse_url_full(options->context, url);
230 path_len = strlen(nfs_url->path);
231 mnt_dir = malloc(path_len + strlen(nfs_url->file) + 1);
232 strcpy(mnt_dir, nfs_url->path);
233 strcpy(mnt_dir + strlen(nfs_url->path), nfs_url->file);
234 ret = nfs_mount(options->context, nfs_url->server, mnt_dir);
235 free(mnt_dir);
236 nfs_destroy_url(nfs_url);
237 return ret;
238}
239
240/*
241 * The init function is called once per thread/process, and should set up
242 * any structures that this io engine requires to keep track of io. Not
243 * required.
244 */
245static int fio_libnfs_setup(struct thread_data *td)
246{
1fb2bc2f 247 /* Using threads with libnfs causes fio to hang on exit, lower performance */
9326926b
TG
248 td->o.use_thread = 0;
249 return 0;
250}
251
252/*
253 * This is paired with the ->init() function and is called when a thread is
254 * done doing io. Should tear down anything setup by the ->init() function.
255 * Not required.
256 */
257static void fio_libnfs_cleanup(struct thread_data *td)
258{
259 struct fio_libnfs_options *o = td->eo;
260 nfs_umount(o->context);
261 nfs_destroy_context(o->context);
262 free(o->events);
263}
264
265static int fio_libnfs_open(struct thread_data *td, struct fio_file *f)
266{
267 int ret;
268 struct fio_libnfs_options *options = td->eo;
269 struct nfs_data *nfs_data = NULL;
270 int flags = 0;
271
272 if (!options->nfs_url) {
273 log_err("nfs: nfs_url is a required parameter\n");
274 return -1;
275 }
276
277 ret = do_mount(td, options->nfs_url);
278
279 if (ret != 0) {
280 log_err("nfs: Failed to mount %s with code %d: %s\n", options->nfs_url, ret, nfs_get_error(options->context));
281 return ret;
282 }
283 nfs_data = malloc(sizeof(struct nfs_data));
284 memset(nfs_data, 0, sizeof(struct nfs_data));
285 nfs_data->options = options;
286
287 if (td->o.td_ddir == TD_DDIR_WRITE) {
288 flags |= O_CREAT | O_RDWR;
289 } else {
290 flags |= O_RDWR;
291 }
292 ret = nfs_open(options->context, f->file_name, flags, &nfs_data->nfsfh);
293
7654a8d5 294 if (ret != 0)
9326926b 295 log_err("Failed to open %s: %s\n", f->file_name, nfs_get_error(options->context));
9326926b
TG
296 f->engine_data = nfs_data;
297 return ret;
298}
299
300static int fio_libnfs_close(struct thread_data *td, struct fio_file *f)
301{
302 struct nfs_data *nfs_data = f->engine_data;
303 struct fio_libnfs_options *o = nfs_data->options;
304 int ret = 0;
7654a8d5 305 if (nfs_data->nfsfh)
9326926b 306 ret = nfs_close(o->context, nfs_data->nfsfh);
9326926b
TG
307 free(nfs_data);
308 f->engine_data = NULL;
309 return ret;
310}
311
312/*
313 * Hook for writing out outstanding data.
314 */
315static int fio_libnfs_commit(struct thread_data *td) {
316 nfs_event_loop(td, true);
317 return 0;
318}
319
320struct ioengine_ops ioengine = {
321 .name = "nfs",
322 .version = FIO_IOOPS_VERSION,
323 .setup = fio_libnfs_setup,
324 .queue = fio_libnfs_queue,
325 .getevents = fio_libnfs_getevents,
326 .event = fio_libnfs_event,
327 .cleanup = fio_libnfs_cleanup,
328 .open_file = fio_libnfs_open,
329 .close_file = fio_libnfs_close,
330 .commit = fio_libnfs_commit,
331 .flags = FIO_DISKLESSIO | FIO_NOEXTEND | FIO_NODISKUTIL,
332 .options = options,
333 .option_struct_size = sizeof(struct fio_libnfs_options),
334};
335
336static void fio_init fio_nfs_register(void)
337{
338 register_ioengine(&ioengine);
339}
340
341static void fio_exit fio_nfs_unregister(void)
342{
343 unregister_ioengine(&ioengine);
344}
345