t/io_uring: only calculate per-file depth if we have files
[fio.git] / engines / nbd.c
CommitLineData
d643a1e2
RJ
1/*
2 * NBD engine
3 *
4 * IO engine that talks to an NBD server.
5 *
6 * Copyright (C) 2019 Red Hat Inc.
7 * Written by Richard W.M. Jones <rjones@redhat.com>
8 *
9 */
10
11#include <stdio.h>
12#include <stdlib.h>
13#include <stdint.h>
14#include <errno.h>
15
16#include <libnbd.h>
17
18#include "../fio.h"
19#include "../optgroup.h"
20
21/* Actually this differs across servers, but for nbdkit ... */
22#define NBD_MAX_REQUEST_SIZE (64 * 1024 * 1024)
23
24/* Storage for the NBD handle. */
25struct nbd_data {
26 struct nbd_handle *nbd;
27 int debug;
28
29 /* The list of completed io_u structs. */
30 struct io_u **completed;
31 size_t nr_completed;
32};
33
34/* Options. */
35struct nbd_options {
36 void *padding;
37 char *uri;
38};
39
40static struct fio_option options[] = {
41 {
42 .name = "uri",
43 .lname = "NBD URI",
44 .help = "Name of NBD URI",
45 .category = FIO_OPT_C_ENGINE,
46 .group = FIO_OPT_G_NBD,
47 .type = FIO_OPT_STR_STORE,
48 .off1 = offsetof(struct nbd_options, uri),
49 },
50 {
51 .name = NULL,
52 },
53};
54
fc002f14 55/* Allocates nbd_data. */
d643a1e2
RJ
56static int nbd_setup(struct thread_data *td)
57{
58 struct nbd_data *nbd_data;
59 struct nbd_options *o = td->eo;
60 struct fio_file *f;
61 int r;
62 int64_t size;
63
64 nbd_data = calloc(1, sizeof(*nbd_data));
65 if (!nbd_data) {
66 td_verror(td, errno, "calloc");
67 return 1;
68 }
69 td->io_ops_data = nbd_data;
70
71 /* Pretend to deal with files. See engines/rbd.c */
72 if (!td->files_index) {
73 add_file(td, "nbd", 0, 0);
74 td->o.nr_files = td->o.nr_files ? : 1;
75 td->o.open_files++;
76 }
77 f = td->files[0];
78
79 nbd_data->nbd = nbd_create();
80 if (!nbd_data->nbd) {
81 log_err("fio: nbd_create: %s\n", nbd_get_error());
82 return 1;
83 }
84
85 /* Get the debug flag which can be set through LIBNBD_DEBUG=1. */
86 nbd_data->debug = nbd_get_debug(nbd_data->nbd);
87
88 /* Connect synchronously here so we can check for the size and
89 * in future other properties of the server.
90 */
91 if (!o->uri) {
92 log_err("fio: nbd: uri parameter was not specified\n");
93 return 1;
94 }
95 r = nbd_connect_uri(nbd_data->nbd, o->uri);
96 if (r == -1) {
97 log_err("fio: nbd_connect_uri: %s\n", nbd_get_error());
98 return 1;
99 }
100 size = nbd_get_size(nbd_data->nbd);
101 if (size == -1) {
102 log_err("fio: nbd_get_size: %s\n", nbd_get_error());
103 return 1;
104 }
105
106 f->real_file_size = size;
107
108 nbd_close (nbd_data->nbd);
109 nbd_data->nbd = NULL;
110
111 return 0;
112}
113
114/* Closes socket and frees nbd_data -- the opposite of nbd_setup. */
115static void nbd_cleanup(struct thread_data *td)
116{
117 struct nbd_data *nbd_data = td->io_ops_data;
118
119 if (nbd_data) {
120 if (nbd_data->nbd)
121 nbd_close(nbd_data->nbd);
122 free(nbd_data);
123 }
124}
125
126/* Connect to the server from each thread. */
127static int nbd_init(struct thread_data *td)
128{
129 struct nbd_options *o = td->eo;
130 struct nbd_data *nbd_data = td->io_ops_data;
131 int r;
132
133 if (!o->uri) {
134 log_err("fio: nbd: uri parameter was not specified\n");
135 return 1;
136 }
137
138 nbd_data->nbd = nbd_create();
139 if (!nbd_data->nbd) {
140 log_err("fio: nbd_create: %s\n", nbd_get_error());
141 return 1;
142 }
143 /* This is actually a synchronous connect and handshake. */
144 r = nbd_connect_uri(nbd_data->nbd, o->uri);
145 if (r == -1) {
146 log_err("fio: nbd_connect_uri: %s\n", nbd_get_error());
147 return 1;
148 }
149
150 log_info("fio: connected to NBD server\n");
151 return 0;
152}
153
154/* A command in flight has been completed. */
4e8c82b4 155static int cmd_completed (void *vp, int *error)
d643a1e2
RJ
156{
157 struct io_u *io_u;
158 struct nbd_data *nbd_data;
159 struct io_u **completed;
160
d643a1e2
RJ
161 io_u = vp;
162 nbd_data = io_u->engine_data;
163
164 if (nbd_data->debug)
165 log_info("fio: nbd: command completed\n");
166
167 if (*error != 0)
168 io_u->error = *error;
169 else
170 io_u->error = 0;
171
172 /* Add this completion to the list so it can be picked up
173 * later by ->event.
174 */
175 completed = realloc(nbd_data->completed,
176 sizeof(struct io_u *) *
177 (nbd_data->nr_completed+1));
178 if (completed == NULL) {
179 io_u->error = errno;
180 return 0;
181 }
182
183 nbd_data->completed = completed;
184 nbd_data->completed[nbd_data->nr_completed] = io_u;
185 nbd_data->nr_completed++;
186
187 return 0;
188}
189
190/* Begin read or write request. */
191static enum fio_q_status nbd_queue(struct thread_data *td,
192 struct io_u *io_u)
193{
194 struct nbd_data *nbd_data = td->io_ops_data;
4e8c82b4
RJ
195 nbd_completion_callback completion = { .callback = cmd_completed,
196 .user_data = io_u };
d643a1e2
RJ
197 int r;
198
199 fio_ro_check(td, io_u);
200
201 io_u->engine_data = nbd_data;
202
203 if (io_u->ddir == DDIR_WRITE || io_u->ddir == DDIR_READ)
204 assert(io_u->xfer_buflen <= NBD_MAX_REQUEST_SIZE);
205
206 switch (io_u->ddir) {
207 case DDIR_READ:
4e8c82b4
RJ
208 r = nbd_aio_pread(nbd_data->nbd,
209 io_u->xfer_buf, io_u->xfer_buflen,
210 io_u->offset, completion, 0);
d643a1e2
RJ
211 break;
212 case DDIR_WRITE:
4e8c82b4
RJ
213 r = nbd_aio_pwrite(nbd_data->nbd,
214 io_u->xfer_buf, io_u->xfer_buflen,
215 io_u->offset, completion, 0);
d643a1e2
RJ
216 break;
217 case DDIR_TRIM:
4e8c82b4
RJ
218 r = nbd_aio_trim(nbd_data->nbd, io_u->xfer_buflen,
219 io_u->offset, completion, 0);
d643a1e2
RJ
220 break;
221 case DDIR_SYNC:
222 /* XXX We could probably also handle
223 * DDIR_SYNC_FILE_RANGE with a bit of effort.
224 */
4e8c82b4 225 r = nbd_aio_flush(nbd_data->nbd, completion, 0);
d643a1e2
RJ
226 break;
227 default:
228 io_u->error = EINVAL;
229 return FIO_Q_COMPLETED;
230 }
231
232 if (r == -1) {
233 /* errno is optional information on libnbd error path;
234 * if it's 0, set it to a default value
235 */
236 io_u->error = nbd_get_errno();
237 if (io_u->error == 0)
238 io_u->error = EIO;
239 return FIO_Q_COMPLETED;
240 }
241
242 if (nbd_data->debug)
243 log_info("fio: nbd: command issued\n");
244 io_u->error = 0;
245 return FIO_Q_QUEUED;
246}
247
248static unsigned retire_commands(struct nbd_handle *nbd)
249{
250 int64_t cookie;
251 unsigned r = 0;
252
253 while ((cookie = nbd_aio_peek_command_completed(nbd)) > 0) {
254 /* Ignore the return value. cmd_completed has already
255 * checked for an error and set io_u->error. We only
256 * have to call this to retire the command.
257 */
258 nbd_aio_command_completed(nbd, cookie);
259 r++;
260 }
261
262 if (nbd_get_debug(nbd))
263 log_info("fio: nbd: %u commands retired\n", r);
264 return r;
265}
266
267static int nbd_getevents(struct thread_data *td, unsigned int min,
268 unsigned int max, const struct timespec *t)
269{
270 struct nbd_data *nbd_data = td->io_ops_data;
271 int r;
272 unsigned events = 0;
273 int timeout;
274
275 /* XXX This handling of timeout is wrong because it will wait
276 * for up to loop iterations * timeout.
277 */
278 timeout = !t ? -1 : t->tv_sec * 1000 + t->tv_nsec / 1000000;
279
280 while (events < min) {
281 r = nbd_poll(nbd_data->nbd, timeout);
282 if (r == -1) {
283 /* error in poll */
284 log_err("fio: nbd_poll: %s\n", nbd_get_error());
285 return -1;
286 }
287 else {
288 /* poll made progress */
289 events += retire_commands(nbd_data->nbd);
290 }
291 }
292
293 return events;
294}
295
296static struct io_u *nbd_event(struct thread_data *td, int event)
297{
298 struct nbd_data *nbd_data = td->io_ops_data;
299
300 if (nbd_data->nr_completed == 0)
301 return NULL;
302
303 /* XXX We ignore the event number and assume fio calls us
304 * exactly once for [0..nr_events-1].
305 */
306 nbd_data->nr_completed--;
307 return nbd_data->completed[nbd_data->nr_completed];
308}
309
310static int nbd_io_u_init(struct thread_data *td, struct io_u *io_u)
311{
312 io_u->engine_data = NULL;
313 return 0;
314}
315
316static void nbd_io_u_free(struct thread_data *td, struct io_u *io_u)
317{
318 /* Nothing needs to be done. */
319}
320
321static int nbd_open_file(struct thread_data *td, struct fio_file *f)
322{
323 return 0;
324}
325
326static int nbd_invalidate(struct thread_data *td, struct fio_file *f)
327{
328 return 0;
329}
330
5a8a6a03 331FIO_STATIC struct ioengine_ops ioengine = {
d643a1e2
RJ
332 .name = "nbd",
333 .version = FIO_IOOPS_VERSION,
334 .options = options,
335 .option_struct_size = sizeof(struct nbd_options),
336 .flags = FIO_DISKLESSIO | FIO_NOEXTEND,
337
338 .setup = nbd_setup,
339 .init = nbd_init,
340 .cleanup = nbd_cleanup,
341 .queue = nbd_queue,
342 .getevents = nbd_getevents,
343 .event = nbd_event,
344 .io_u_init = nbd_io_u_init,
345 .io_u_free = nbd_io_u_free,
346
347 .open_file = nbd_open_file,
348 .invalidate = nbd_invalidate,
349};
350
351static void fio_init fio_nbd_register(void)
352{
353 register_ioengine(&ioengine);
354}
355
356static void fio_exit fio_nbd_unregister(void)
357{
358 unregister_ioengine(&ioengine);
359}