Commit | Line | Data |
---|---|---|
9326926b TG |
1 | #include <stdlib.h> |
2 | #include <poll.h> | |
3 | #include <nfsc/libnfs.h> | |
4 | #include <nfsc/libnfs-raw.h> | |
5 | #include <nfsc/libnfs-raw-mount.h> | |
6 | ||
7 | #include "../fio.h" | |
8 | #include "../optgroup.h" | |
9 | ||
10 | enum nfs_op_type { | |
11 | NFS_READ_WRITE = 0, | |
12 | NFS_STAT_MKDIR_RMDIR, | |
13 | NFS_STAT_TOUCH_RM, | |
14 | }; | |
15 | ||
16 | struct fio_libnfs_options { | |
17 | struct nfs_context *context; | |
18 | char *nfs_url; | |
1fb2bc2f TG |
19 | unsigned int queue_depth; /* nfs_callback needs this info, but doesn't have fio td structure to pull it from */ |
20 | /* the following implement a circular queue of outstanding IOs */ | |
21 | int outstanding_events; /* IOs issued to libnfs, that have not returned yet */ | |
22 | int prev_requested_event_index; /* event last returned via fio_libnfs_event */ | |
23 | int next_buffered_event; /* round robin-pointer within events[] */ | |
24 | int buffered_event_count; /* IOs completed by libnfs, waiting for FIO */ | |
25 | int free_event_buffer_index; /* next free buffer */ | |
9326926b TG |
26 | struct io_u**events; |
27 | }; | |
28 | ||
29 | struct nfs_data { | |
30 | struct nfsfh *nfsfh; | |
31 | struct fio_libnfs_options *options; | |
32 | }; | |
33 | ||
34 | static struct fio_option options[] = { | |
35 | { | |
36 | .name = "nfs_url", | |
37 | .lname = "nfs_url", | |
38 | .type = FIO_OPT_STR_STORE, | |
39 | .help = "URL in libnfs format, eg nfs://<server|ipv4|ipv6>/path[?arg=val[&arg=val]*]", | |
40 | .off1 = offsetof(struct fio_libnfs_options, nfs_url), | |
41 | .category = FIO_OPT_C_ENGINE, | |
42 | .group = __FIO_OPT_G_NFS, | |
43 | }, | |
44 | { | |
45 | .name = NULL, | |
46 | }, | |
47 | }; | |
48 | ||
49 | /* | |
50 | * The ->event() hook is called to match an event number with an io_u. | |
51 | * After the core has called ->getevents() and it has returned eg 3, | |
52 | * the ->event() hook must return the 3 events that have completed for | |
53 | * subsequent calls to ->event() with [0-2]. Required. | |
54 | */ | |
55 | static struct io_u *fio_libnfs_event(struct thread_data *td, int event) | |
56 | { | |
57 | struct fio_libnfs_options *o = td->eo; | |
58 | struct io_u *io_u = o->events[o->next_buffered_event]; | |
59 | assert(o->events[o->next_buffered_event]); | |
60 | o->events[o->next_buffered_event] = NULL; | |
61 | o->next_buffered_event = (o->next_buffered_event + 1) % td->o.iodepth; | |
1fb2bc2f | 62 | /* validate our state machine */ |
9326926b TG |
63 | assert(o->buffered_event_count); |
64 | o->buffered_event_count--; | |
65 | assert(io_u); | |
1fb2bc2f | 66 | /* assert that fio_libnfs_event is being called in sequential fashion */ |
9326926b TG |
67 | assert(event == 0 || o->prev_requested_event_index + 1 == event); |
68 | if (o->buffered_event_count == 0) { | |
69 | o->prev_requested_event_index = -1; | |
70 | } else { | |
71 | o->prev_requested_event_index = event; | |
72 | } | |
73 | return io_u; | |
74 | } | |
75 | ||
76 | static int nfs_event_loop(struct thread_data *td, bool flush) { | |
77 | struct fio_libnfs_options *o = td->eo; | |
78 | struct pollfd pfds[1]; /* nfs:0 */ | |
1fb2bc2f | 79 | /* we already have stuff queued for fio, no need to waste cpu on poll() */ |
9326926b TG |
80 | if (o->buffered_event_count) { |
81 | return o->buffered_event_count; | |
82 | } | |
1fb2bc2f | 83 | /* fio core logic seems to stop calling this event-loop if we ever return with 0 events */ |
9326926b TG |
84 | #define SHOULD_WAIT() (o->outstanding_events == td->o.iodepth || (flush && o->outstanding_events)) |
85 | ||
86 | do { | |
87 | int timeout = SHOULD_WAIT() ? -1 : 0; | |
88 | int ret = 0; | |
89 | pfds[0].fd = nfs_get_fd(o->context); | |
90 | pfds[0].events = nfs_which_events(o->context); | |
91 | ret = poll(&pfds[0], 1, timeout); | |
92 | if (ret < 0) { | |
93 | if (errno == EINTR || errno == EAGAIN) { | |
94 | continue; | |
95 | } | |
96 | log_err("nfs: failed to poll events: %s.\n", | |
97 | strerror(errno)); | |
98 | break; | |
99 | } | |
100 | ||
101 | ret = nfs_service(o->context, pfds[0].revents); | |
102 | if (ret < 0) { | |
103 | log_err("nfs: socket is in an unrecoverable error state.\n"); | |
104 | break; | |
105 | } | |
106 | } while (SHOULD_WAIT()); | |
107 | return o->buffered_event_count; | |
108 | } | |
109 | #undef SHOULD_WAIT | |
110 | ||
111 | /* | |
112 | * The ->getevents() hook is used to reap completion events from an async | |
113 | * io engine. It returns the number of completed events since the last call, | |
114 | * which may then be retrieved by calling the ->event() hook with the event | |
115 | * numbers. Required. | |
116 | */ | |
117 | static int fio_libnfs_getevents(struct thread_data *td, unsigned int min, | |
118 | unsigned int max, const struct timespec *t) | |
119 | { | |
120 | return nfs_event_loop(td, false); | |
121 | } | |
122 | ||
123 | static void nfs_callback(int res, struct nfs_context *nfs, void *data, | |
124 | void *private_data) | |
125 | { | |
126 | struct io_u *io_u = private_data; | |
127 | struct nfs_data *nfs_data = io_u->file->engine_data; | |
128 | struct fio_libnfs_options *o = nfs_data->options; | |
129 | if (res < 0) { | |
130 | log_err("Failed NFS operation(code:%d): %s\n", res, nfs_get_error(o->context)); | |
131 | io_u->error = -res; | |
1fb2bc2f | 132 | /* res is used for read math below, don't wanna pass negative there */ |
9326926b TG |
133 | res = 0; |
134 | } else if (io_u->ddir == DDIR_READ) { | |
135 | memcpy(io_u->buf, data, res); | |
136 | if (res == 0) { | |
137 | log_err("Got NFS EOF, this is probably not expected\n"); | |
138 | } | |
139 | } | |
1fb2bc2f | 140 | /* fio uses resid to track remaining data */ |
9326926b TG |
141 | io_u->resid = io_u->xfer_buflen - res; |
142 | ||
143 | assert(!o->events[o->free_event_buffer_index]); | |
144 | o->events[o->free_event_buffer_index] = io_u; | |
145 | o->free_event_buffer_index = (o->free_event_buffer_index + 1) % o->queue_depth; | |
146 | o->outstanding_events--; | |
147 | o->buffered_event_count++; | |
148 | } | |
149 | ||
150 | static int queue_write(struct fio_libnfs_options *o, struct io_u *io_u) { | |
151 | struct nfs_data *nfs_data = io_u->engine_data; | |
152 | return nfs_pwrite_async(o->context, nfs_data->nfsfh, | |
153 | io_u->offset, io_u->buflen, io_u->buf, nfs_callback, | |
154 | io_u); | |
155 | } | |
156 | ||
157 | static int queue_read(struct fio_libnfs_options *o, struct io_u *io_u) { | |
158 | struct nfs_data *nfs_data = io_u->engine_data; | |
159 | return nfs_pread_async(o->context, nfs_data->nfsfh, io_u->offset, io_u->buflen, nfs_callback, io_u); | |
160 | } | |
161 | ||
162 | /* | |
163 | * The ->queue() hook is responsible for initiating io on the io_u | |
164 | * being passed in. If the io engine is a synchronous one, io may complete | |
165 | * before ->queue() returns. Required. | |
166 | * | |
167 | * The io engine must transfer in the direction noted by io_u->ddir | |
168 | * to the buffer pointed to by io_u->xfer_buf for as many bytes as | |
169 | * io_u->xfer_buflen. Residual data count may be set in io_u->resid | |
170 | * for a short read/write. | |
171 | */ | |
172 | static enum fio_q_status fio_libnfs_queue(struct thread_data *td, | |
173 | struct io_u *io_u) | |
174 | { | |
175 | struct nfs_data *nfs_data = io_u->file->engine_data; | |
176 | struct fio_libnfs_options *o = nfs_data->options; | |
177 | struct nfs_context *nfs = o->context; | |
178 | int err; | |
179 | enum fio_q_status ret = FIO_Q_QUEUED; | |
180 | ||
181 | io_u->engine_data = nfs_data; | |
182 | switch(io_u->ddir) { | |
183 | case DDIR_WRITE: | |
184 | err = queue_write(o, io_u); | |
185 | break; | |
186 | case DDIR_READ: | |
187 | err = queue_read(o, io_u); | |
188 | break; | |
189 | case DDIR_TRIM: | |
190 | log_err("nfs: trim is not supported"); | |
191 | err = -1; | |
192 | break; | |
193 | default: | |
194 | log_err("nfs: unhandled io %d\n", io_u->ddir); | |
195 | err = -1; | |
196 | } | |
197 | if (err) { | |
198 | log_err("nfs: Failed to queue nfs op: %s\n", nfs_get_error(nfs)); | |
199 | td->error = 1; | |
200 | return FIO_Q_COMPLETED; | |
201 | } | |
202 | o->outstanding_events++; | |
203 | return ret; | |
204 | } | |
205 | ||
206 | /** Do a mount if one has not been done before */ | |
207 | static int do_mount(struct thread_data *td, const char *url) | |
208 | { | |
209 | size_t event_size = sizeof(struct io_u **) * td->o.iodepth; | |
210 | struct fio_libnfs_options *options = td->eo; | |
211 | struct nfs_url *nfs_url = NULL; | |
212 | int ret = 0; | |
213 | int path_len = 0; | |
214 | char *mnt_dir = NULL; | |
215 | ||
216 | if (options->context) { | |
217 | return 0; | |
218 | } | |
219 | ||
220 | options->context = nfs_init_context(); | |
221 | if (options->context == NULL) { | |
222 | log_err("nfs: failed to init nfs context\n"); | |
223 | return -1; | |
224 | } | |
225 | ||
226 | options->events = malloc(event_size); | |
227 | memset(options->events, 0, event_size); | |
228 | ||
229 | options->prev_requested_event_index = -1; | |
230 | options->queue_depth = td->o.iodepth; | |
231 | ||
232 | nfs_url = nfs_parse_url_full(options->context, url); | |
233 | path_len = strlen(nfs_url->path); | |
234 | mnt_dir = malloc(path_len + strlen(nfs_url->file) + 1); | |
235 | strcpy(mnt_dir, nfs_url->path); | |
236 | strcpy(mnt_dir + strlen(nfs_url->path), nfs_url->file); | |
237 | ret = nfs_mount(options->context, nfs_url->server, mnt_dir); | |
238 | free(mnt_dir); | |
239 | nfs_destroy_url(nfs_url); | |
240 | return ret; | |
241 | } | |
242 | ||
243 | /* | |
244 | * The init function is called once per thread/process, and should set up | |
245 | * any structures that this io engine requires to keep track of io. Not | |
246 | * required. | |
247 | */ | |
248 | static int fio_libnfs_setup(struct thread_data *td) | |
249 | { | |
1fb2bc2f | 250 | /* Using threads with libnfs causes fio to hang on exit, lower performance */ |
9326926b TG |
251 | td->o.use_thread = 0; |
252 | return 0; | |
253 | } | |
254 | ||
255 | /* | |
256 | * This is paired with the ->init() function and is called when a thread is | |
257 | * done doing io. Should tear down anything setup by the ->init() function. | |
258 | * Not required. | |
259 | */ | |
260 | static void fio_libnfs_cleanup(struct thread_data *td) | |
261 | { | |
262 | struct fio_libnfs_options *o = td->eo; | |
263 | nfs_umount(o->context); | |
264 | nfs_destroy_context(o->context); | |
265 | free(o->events); | |
266 | } | |
267 | ||
268 | static int fio_libnfs_open(struct thread_data *td, struct fio_file *f) | |
269 | { | |
270 | int ret; | |
271 | struct fio_libnfs_options *options = td->eo; | |
272 | struct nfs_data *nfs_data = NULL; | |
273 | int flags = 0; | |
274 | ||
275 | if (!options->nfs_url) { | |
276 | log_err("nfs: nfs_url is a required parameter\n"); | |
277 | return -1; | |
278 | } | |
279 | ||
280 | ret = do_mount(td, options->nfs_url); | |
281 | ||
282 | if (ret != 0) { | |
283 | log_err("nfs: Failed to mount %s with code %d: %s\n", options->nfs_url, ret, nfs_get_error(options->context)); | |
284 | return ret; | |
285 | } | |
286 | nfs_data = malloc(sizeof(struct nfs_data)); | |
287 | memset(nfs_data, 0, sizeof(struct nfs_data)); | |
288 | nfs_data->options = options; | |
289 | ||
290 | if (td->o.td_ddir == TD_DDIR_WRITE) { | |
291 | flags |= O_CREAT | O_RDWR; | |
292 | } else { | |
293 | flags |= O_RDWR; | |
294 | } | |
295 | ret = nfs_open(options->context, f->file_name, flags, &nfs_data->nfsfh); | |
296 | ||
297 | if (ret != 0) { | |
298 | log_err("Failed to open %s: %s\n", f->file_name, nfs_get_error(options->context)); | |
299 | } | |
300 | f->engine_data = nfs_data; | |
301 | return ret; | |
302 | } | |
303 | ||
304 | static int fio_libnfs_close(struct thread_data *td, struct fio_file *f) | |
305 | { | |
306 | struct nfs_data *nfs_data = f->engine_data; | |
307 | struct fio_libnfs_options *o = nfs_data->options; | |
308 | int ret = 0; | |
309 | if (nfs_data->nfsfh) { | |
310 | ret = nfs_close(o->context, nfs_data->nfsfh); | |
311 | } | |
312 | free(nfs_data); | |
313 | f->engine_data = NULL; | |
314 | return ret; | |
315 | } | |
316 | ||
317 | /* | |
318 | * Hook for writing out outstanding data. | |
319 | */ | |
320 | static int fio_libnfs_commit(struct thread_data *td) { | |
321 | nfs_event_loop(td, true); | |
322 | return 0; | |
323 | } | |
324 | ||
325 | struct ioengine_ops ioengine = { | |
326 | .name = "nfs", | |
327 | .version = FIO_IOOPS_VERSION, | |
328 | .setup = fio_libnfs_setup, | |
329 | .queue = fio_libnfs_queue, | |
330 | .getevents = fio_libnfs_getevents, | |
331 | .event = fio_libnfs_event, | |
332 | .cleanup = fio_libnfs_cleanup, | |
333 | .open_file = fio_libnfs_open, | |
334 | .close_file = fio_libnfs_close, | |
335 | .commit = fio_libnfs_commit, | |
336 | .flags = FIO_DISKLESSIO | FIO_NOEXTEND | FIO_NODISKUTIL, | |
337 | .options = options, | |
338 | .option_struct_size = sizeof(struct fio_libnfs_options), | |
339 | }; | |
340 | ||
341 | static void fio_init fio_nfs_register(void) | |
342 | { | |
343 | register_ioengine(&ioengine); | |
344 | } | |
345 | ||
346 | static void fio_exit fio_nfs_unregister(void) | |
347 | { | |
348 | unregister_ioengine(&ioengine); | |
349 | } | |
350 |