Make file structures dynamically allocated
[fio.git] / engines / net.c
... / ...
CommitLineData
1/*
2 * net engine
3 *
4 * IO engine that reads/writes to/from sockets.
5 *
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <unistd.h>
10#include <errno.h>
11#include <assert.h>
12#include <netinet/in.h>
13#include <arpa/inet.h>
14#include <netdb.h>
15#include <sys/poll.h>
16
17#include "../fio.h"
18
19struct netio_data {
20 int listenfd;
21 int send_to_net;
22 int use_splice;
23 int pipes[2];
24 char host[64];
25 struct sockaddr_in addr;
26};
27
28static int fio_netio_prep(struct thread_data *td, struct io_u *io_u)
29{
30 struct netio_data *nd = td->io_ops->data;
31 struct fio_file *f = io_u->file;
32
33 /*
34 * Make sure we don't see spurious reads to a receiver, and vice versa
35 */
36 if ((nd->send_to_net && io_u->ddir == DDIR_READ) ||
37 (!nd->send_to_net && io_u->ddir == DDIR_WRITE)) {
38 td_verror(td, EINVAL, "bad direction");
39 return 1;
40 }
41
42 if (io_u->ddir == DDIR_SYNC)
43 return 0;
44 if (io_u->offset == f->last_completed_pos)
45 return 0;
46
47 /*
48 * If offset is different from last end position, it's a seek.
49 * As network io is purely sequential, we don't allow seeks.
50 */
51 td_verror(td, EINVAL, "cannot seek");
52 return 1;
53}
54
55static int splice_io_u(int fdin, int fdout, unsigned int len)
56{
57 int bytes = 0;
58
59 while (len) {
60 int ret = splice(fdin, NULL, fdout, NULL, len, 0);
61
62 if (ret < 0) {
63 if (!bytes)
64 bytes = ret;
65
66 break;
67 } else if (!ret)
68 break;
69
70 bytes += ret;
71 len -= ret;
72 }
73
74 return bytes;
75}
76
77/*
78 * Receive bytes from a socket and fill them into the internal pipe
79 */
80static int splice_in(struct thread_data *td, struct io_u *io_u)
81{
82 struct netio_data *nd = td->io_ops->data;
83
84 return splice_io_u(io_u->file->fd, nd->pipes[1], io_u->xfer_buflen);
85}
86
87/*
88 * Transmit 'len' bytes from the internal pipe
89 */
90static int splice_out(struct thread_data *td, struct io_u *io_u,
91 unsigned int len)
92{
93 struct netio_data *nd = td->io_ops->data;
94
95 return splice_io_u(nd->pipes[0], io_u->file->fd, len);
96}
97
98static int vmsplice_io_u(struct io_u *io_u, int fd, unsigned int len)
99{
100 struct iovec iov = {
101 .iov_base = io_u->xfer_buf,
102 .iov_len = len,
103 };
104 int bytes = 0;
105
106 while (iov.iov_len) {
107 int ret = vmsplice(fd, &iov, 1, SPLICE_F_MOVE);
108
109 if (ret < 0) {
110 if (!bytes)
111 bytes = ret;
112 break;
113 } else if (!ret)
114 break;
115
116 iov.iov_len -= ret;
117 iov.iov_base += ret;
118 bytes += ret;
119 }
120
121 return bytes;
122
123}
124
125/*
126 * vmsplice() pipe to io_u buffer
127 */
128static int vmsplice_io_u_out(struct thread_data *td, struct io_u *io_u,
129 unsigned int len)
130{
131 struct netio_data *nd = td->io_ops->data;
132
133 return vmsplice_io_u(io_u, nd->pipes[0], len);
134}
135
136/*
137 * vmsplice() io_u to pipe
138 */
139static int vmsplice_io_u_in(struct thread_data *td, struct io_u *io_u)
140{
141 struct netio_data *nd = td->io_ops->data;
142
143 return vmsplice_io_u(io_u, nd->pipes[1], io_u->xfer_buflen);
144}
145
146/*
147 * splice receive - transfer socket data into a pipe using splice, then map
148 * that pipe data into the io_u using vmsplice.
149 */
150static int fio_netio_splice_in(struct thread_data *td, struct io_u *io_u)
151{
152 int ret;
153
154 ret = splice_in(td, io_u);
155 if (ret > 0)
156 return vmsplice_io_u_out(td, io_u, ret);
157
158 return ret;
159}
160
161/*
162 * splice transmit - map data from the io_u into a pipe by using vmsplice,
163 * then transfer that pipe to a socket using splice.
164 */
165static int fio_netio_splice_out(struct thread_data *td, struct io_u *io_u)
166{
167 int ret;
168
169 ret = vmsplice_io_u_in(td, io_u);
170 if (ret > 0)
171 return splice_out(td, io_u, ret);
172
173 return ret;
174}
175
176static int fio_netio_send(struct thread_data *td, struct io_u *io_u)
177{
178 int flags = 0;
179
180 /*
181 * if we are going to write more, set MSG_MORE
182 */
183 if (td->this_io_bytes[DDIR_WRITE] + io_u->xfer_buflen < td->o.size)
184 flags = MSG_MORE;
185
186 return send(io_u->file->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
187}
188
189static int fio_netio_recv(struct io_u *io_u)
190{
191 int flags = MSG_WAITALL;
192
193 return recv(io_u->file->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
194}
195
196static int fio_netio_queue(struct thread_data *td, struct io_u *io_u)
197{
198 struct netio_data *nd = td->io_ops->data;
199 int ret;
200
201 fio_ro_check(td, io_u);
202
203 if (io_u->ddir == DDIR_WRITE) {
204 if (nd->use_splice)
205 ret = fio_netio_splice_out(td, io_u);
206 else
207 ret = fio_netio_send(td, io_u);
208 } else if (io_u->ddir == DDIR_READ) {
209 if (nd->use_splice)
210 ret = fio_netio_splice_in(td, io_u);
211 else
212 ret = fio_netio_recv(io_u);
213 } else
214 ret = 0; /* must be a SYNC */
215
216 if (ret != (int) io_u->xfer_buflen) {
217 if (ret >= 0) {
218 io_u->resid = io_u->xfer_buflen - ret;
219 io_u->error = 0;
220 return FIO_Q_COMPLETED;
221 } else
222 io_u->error = errno;
223 }
224
225 if (io_u->error)
226 td_verror(td, io_u->error, "xfer");
227
228 return FIO_Q_COMPLETED;
229}
230
231static int fio_netio_connect(struct thread_data *td, struct fio_file *f)
232{
233 struct netio_data *nd = td->io_ops->data;
234
235 f->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
236 if (f->fd < 0) {
237 td_verror(td, errno, "socket");
238 return 1;
239 }
240
241 if (connect(f->fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
242 td_verror(td, errno, "connect");
243 return 1;
244 }
245
246 return 0;
247}
248
249static int fio_netio_accept(struct thread_data *td, struct fio_file *f)
250{
251 struct netio_data *nd = td->io_ops->data;
252 socklen_t socklen = sizeof(nd->addr);
253 struct pollfd pfd;
254 int ret;
255
256 log_info("fio: waiting for connection\n");
257
258 /*
259 * Accept loop. poll for incoming events, accept them. Repeat until we
260 * have all connections.
261 */
262 while (!td->terminate) {
263 pfd.fd = nd->listenfd;
264 pfd.events = POLLIN;
265
266 ret = poll(&pfd, 1, -1);
267 printf("got ret %d\n", ret);
268 if (ret < 0) {
269 if (errno == EINTR)
270 continue;
271
272 td_verror(td, errno, "poll");
273 break;
274 } else if (!ret)
275 continue;
276
277 /*
278 * should be impossible
279 */
280 if (!(pfd.revents & POLLIN))
281 continue;
282
283 f->fd = accept(nd->listenfd, (struct sockaddr *) &nd->addr, &socklen);
284 if (f->fd < 0) {
285 td_verror(td, errno, "accept");
286 return 1;
287 }
288 break;
289 }
290
291 return 0;
292}
293
294static int fio_netio_open_file(struct thread_data *td, struct fio_file *f)
295{
296 if (td_read(td))
297 return fio_netio_accept(td, f);
298 else
299 return fio_netio_connect(td, f);
300}
301
302static int fio_netio_setup_connect(struct thread_data *td, const char *host,
303 unsigned short port)
304{
305 struct netio_data *nd = td->io_ops->data;
306
307 nd->addr.sin_family = AF_INET;
308 nd->addr.sin_port = htons(port);
309
310 if (inet_aton(host, &nd->addr.sin_addr) != 1) {
311 struct hostent *hent;
312
313 hent = gethostbyname(host);
314 if (!hent) {
315 td_verror(td, errno, "gethostbyname");
316 return 1;
317 }
318
319 memcpy(&nd->addr.sin_addr, hent->h_addr, 4);
320 }
321
322 return 0;
323}
324
325static int fio_netio_setup_listen(struct thread_data *td, short port)
326{
327 struct netio_data *nd = td->io_ops->data;
328 int fd, opt;
329
330 fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
331 if (fd < 0) {
332 td_verror(td, errno, "socket");
333 return 1;
334 }
335
336 opt = 1;
337 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
338 td_verror(td, errno, "setsockopt");
339 return 1;
340 }
341#ifdef SO_REUSEPORT
342 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
343 td_verror(td, errno, "setsockopt");
344 return 1;
345 }
346#endif
347
348 nd->addr.sin_family = AF_INET;
349 nd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
350 nd->addr.sin_port = htons(port);
351
352 if (bind(fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
353 td_verror(td, errno, "bind");
354 return 1;
355 }
356 if (listen(fd, 1) < 0) {
357 td_verror(td, errno, "listen");
358 return 1;
359 }
360
361 nd->listenfd = fd;
362 return 0;
363}
364
365static int fio_netio_init(struct thread_data *td)
366{
367 struct netio_data *nd = td->io_ops->data;
368 unsigned short port;
369 char host[64], buf[128];
370 char *sep;
371 int ret;
372
373 if (td_rw(td)) {
374 log_err("fio: network connections must be read OR write\n");
375 return 1;
376 }
377 if (td_random(td)) {
378 log_err("fio: network IO can't be random\n");
379 return 1;
380 }
381
382 strcpy(buf, td->o.filename);
383
384 sep = strchr(buf, '/');
385 if (!sep) {
386 log_err("fio: bad network host/port <<%s>>\n", td->o.filename);
387 return 1;
388 }
389
390 *sep = '\0';
391 sep++;
392 strcpy(host, buf);
393 port = atoi(sep);
394
395 if (td_read(td)) {
396 nd->send_to_net = 0;
397 ret = fio_netio_setup_listen(td, port);
398 } else {
399 nd->send_to_net = 1;
400 ret = fio_netio_setup_connect(td, host, port);
401 }
402
403 return ret;
404}
405
406static void fio_netio_cleanup(struct thread_data *td)
407{
408 struct netio_data *nd = td->io_ops->data;
409
410 if (nd) {
411 if (nd->listenfd != -1)
412 close(nd->listenfd);
413 if (nd->pipes[0] != -1)
414 close(nd->pipes[0]);
415 if (nd->pipes[1] != -1)
416 close(nd->pipes[1]);
417
418 free(nd);
419 td->io_ops->data = NULL;
420 }
421}
422
423static int fio_netio_setup(struct thread_data *td)
424{
425 struct netio_data *nd;
426
427 if (!td->io_ops->data) {
428 nd = malloc(sizeof(*nd));;
429
430 memset(nd, 0, sizeof(*nd));
431 nd->listenfd = -1;
432 nd->pipes[0] = nd->pipes[1] = -1;
433 td->io_ops->data = nd;
434 }
435
436 return 0;
437}
438
439static int fio_netio_setup_splice(struct thread_data *td)
440{
441 struct netio_data *nd;
442
443 fio_netio_setup(td);
444
445 nd = td->io_ops->data;
446 if (nd) {
447 if (pipe(nd->pipes) < 0)
448 return 1;
449
450 nd->use_splice = 1;
451 return 0;
452 }
453
454 return 1;
455}
456
457static struct ioengine_ops ioengine_rw = {
458 .name = "net",
459 .version = FIO_IOOPS_VERSION,
460 .prep = fio_netio_prep,
461 .queue = fio_netio_queue,
462 .setup = fio_netio_setup,
463 .init = fio_netio_init,
464 .cleanup = fio_netio_cleanup,
465 .open_file = fio_netio_open_file,
466 .close_file = generic_close_file,
467 .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR |
468 FIO_SIGQUIT,
469};
470
471static struct ioengine_ops ioengine_splice = {
472 .name = "netsplice",
473 .version = FIO_IOOPS_VERSION,
474 .prep = fio_netio_prep,
475 .queue = fio_netio_queue,
476 .setup = fio_netio_setup_splice,
477 .init = fio_netio_init,
478 .cleanup = fio_netio_cleanup,
479 .open_file = fio_netio_open_file,
480 .close_file = generic_close_file,
481 .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR |
482 FIO_SIGQUIT,
483};
484
485static void fio_init fio_netio_register(void)
486{
487 register_ioengine(&ioengine_rw);
488 register_ioengine(&ioengine_splice);
489}
490
491static void fio_exit fio_netio_unregister(void)
492{
493 unregister_ioengine(&ioengine_rw);
494 unregister_ioengine(&ioengine_splice);
495}