Full readonly check
[fio.git] / engines / net.c
... / ...
CommitLineData
1/*
2 * net engine
3 *
4 * IO engine that reads/writes to/from sockets.
5 *
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <unistd.h>
10#include <errno.h>
11#include <assert.h>
12#include <netinet/in.h>
13#include <arpa/inet.h>
14#include <netdb.h>
15#include <sys/poll.h>
16
17#include "../fio.h"
18
19struct netio_data {
20 int listenfd;
21 int send_to_net;
22 int use_splice;
23 int pipes[2];
24 char host[64];
25 struct sockaddr_in addr;
26};
27
28static int fio_netio_prep(struct thread_data *td, struct io_u *io_u)
29{
30 struct netio_data *nd = td->io_ops->data;
31 struct fio_file *f = io_u->file;
32
33 /*
34 * Make sure we don't see spurious reads to a receiver, and vice versa
35 */
36 if ((nd->send_to_net && io_u->ddir == DDIR_READ) ||
37 (!nd->send_to_net && io_u->ddir == DDIR_WRITE)) {
38 td_verror(td, EINVAL, "bad direction");
39 return 1;
40 }
41
42 if (io_u->ddir == DDIR_SYNC)
43 return 0;
44 if (io_u->offset == f->last_completed_pos)
45 return 0;
46
47 /*
48 * If offset is different from last end position, it's a seek.
49 * As network io is purely sequential, we don't allow seeks.
50 */
51 td_verror(td, EINVAL, "cannot seek");
52 return 1;
53}
54
55static int splice_io_u(int fdin, int fdout, unsigned int len)
56{
57 int bytes = 0;
58
59 while (len) {
60 int ret = splice(fdin, NULL, fdout, NULL, len, 0);
61
62 if (ret < 0) {
63 if (!bytes)
64 bytes = ret;
65
66 break;
67 } else if (!ret)
68 break;
69
70 bytes += ret;
71 len -= ret;
72 }
73
74 return bytes;
75}
76
77/*
78 * Receive bytes from a socket and fill them into the internal pipe
79 */
80static int splice_in(struct thread_data *td, struct io_u *io_u)
81{
82 struct netio_data *nd = td->io_ops->data;
83
84 return splice_io_u(io_u->file->fd, nd->pipes[1], io_u->xfer_buflen);
85}
86
87/*
88 * Transmit 'len' bytes from the internal pipe
89 */
90static int splice_out(struct thread_data *td, struct io_u *io_u,
91 unsigned int len)
92{
93 struct netio_data *nd = td->io_ops->data;
94
95 return splice_io_u(nd->pipes[0], io_u->file->fd, len);
96}
97
98static int vmsplice_io_u(struct io_u *io_u, int fd, unsigned int len)
99{
100 struct iovec iov = {
101 .iov_base = io_u->xfer_buf,
102 .iov_len = len,
103 };
104 int bytes = 0;
105
106 while (iov.iov_len) {
107 int ret = vmsplice(fd, &iov, 1, SPLICE_F_MOVE);
108
109 if (ret < 0) {
110 if (!bytes)
111 bytes = ret;
112 break;
113 } else if (!ret)
114 break;
115
116 iov.iov_len -= ret;
117 iov.iov_base += ret;
118 bytes += ret;
119 }
120
121 return bytes;
122
123}
124
125/*
126 * vmsplice() pipe to io_u buffer
127 */
128static int vmsplice_io_u_out(struct thread_data *td, struct io_u *io_u,
129 unsigned int len)
130{
131 struct netio_data *nd = td->io_ops->data;
132
133 return vmsplice_io_u(io_u, nd->pipes[0], len);
134}
135
136/*
137 * vmsplice() io_u to pipe
138 */
139static int vmsplice_io_u_in(struct thread_data *td, struct io_u *io_u)
140{
141 struct netio_data *nd = td->io_ops->data;
142
143 return vmsplice_io_u(io_u, nd->pipes[1], io_u->xfer_buflen);
144}
145
146/*
147 * splice receive - transfer socket data into a pipe using splice, then map
148 * that pipe data into the io_u using vmsplice.
149 */
150static int fio_netio_splice_in(struct thread_data *td, struct io_u *io_u)
151{
152 int ret;
153
154 ret = splice_in(td, io_u);
155 if (ret > 0)
156 return vmsplice_io_u_out(td, io_u, ret);
157
158 return ret;
159}
160
161/*
162 * splice transmit - map data from the io_u into a pipe by using vmsplice,
163 * then transfer that pipe to a socket using splice.
164 */
165static int fio_netio_splice_out(struct thread_data *td, struct io_u *io_u)
166{
167 int ret;
168
169 ret = vmsplice_io_u_in(td, io_u);
170 if (ret > 0)
171 return splice_out(td, io_u, ret);
172
173 return ret;
174}
175
176static int fio_netio_send(struct thread_data *td, struct io_u *io_u)
177{
178 int flags = 0;
179
180 /*
181 * if we are going to write more, set MSG_MORE
182 */
183 if (td->this_io_bytes[DDIR_WRITE] + io_u->xfer_buflen < td->o.size)
184 flags = MSG_MORE;
185
186 return send(io_u->file->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
187}
188
189static int fio_netio_recv(struct io_u *io_u)
190{
191 int flags = MSG_WAITALL;
192
193 return recv(io_u->file->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
194}
195
196static int fio_netio_queue(struct thread_data *td, struct io_u *io_u)
197{
198 struct netio_data *nd = td->io_ops->data;
199 int ret;
200
201 fio_ro_check(td, io_u);
202
203 if (io_u->ddir == DDIR_WRITE) {
204 if (nd->use_splice)
205 ret = fio_netio_splice_out(td, io_u);
206 else
207 ret = fio_netio_send(td, io_u);
208 } else if (io_u->ddir == DDIR_READ) {
209 if (nd->use_splice)
210 ret = fio_netio_splice_in(td, io_u);
211 else
212 ret = fio_netio_recv(io_u);
213 } else
214 ret = 0; /* must be a SYNC */
215
216 if (ret != (int) io_u->xfer_buflen) {
217 if (ret >= 0) {
218 io_u->resid = io_u->xfer_buflen - ret;
219 io_u->error = 0;
220 return FIO_Q_COMPLETED;
221 } else
222 io_u->error = errno;
223 }
224
225 if (io_u->error)
226 td_verror(td, io_u->error, "xfer");
227
228 return FIO_Q_COMPLETED;
229}
230
231static int fio_netio_connect(struct thread_data *td, struct fio_file *f)
232{
233 struct netio_data *nd = td->io_ops->data;
234
235 f->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
236 if (f->fd < 0) {
237 td_verror(td, errno, "socket");
238 return 1;
239 }
240
241 if (connect(f->fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
242 td_verror(td, errno, "connect");
243 return 1;
244 }
245
246 return 0;
247}
248
249static int fio_netio_accept(struct thread_data *td, struct fio_file *f)
250{
251 struct netio_data *nd = td->io_ops->data;
252 socklen_t socklen = sizeof(nd->addr);
253 struct pollfd pfd;
254 int ret;
255
256 log_info("fio: waiting for connection\n");
257
258 /*
259 * Accept loop. poll for incoming events, accept them. Repeat until we
260 * have all connections.
261 */
262 while (!td->terminate) {
263 pfd.fd = nd->listenfd;
264 pfd.events = POLLIN;
265
266 ret = poll(&pfd, 1, -1);
267 if (ret < 0) {
268 if (errno == EINTR)
269 continue;
270
271 td_verror(td, errno, "poll");
272 break;
273 } else if (!ret)
274 continue;
275
276 /*
277 * should be impossible
278 */
279 if (!(pfd.revents & POLLIN))
280 continue;
281
282 f->fd = accept(nd->listenfd, (struct sockaddr *) &nd->addr, &socklen);
283 if (f->fd < 0) {
284 td_verror(td, errno, "accept");
285 return 1;
286 }
287 break;
288 }
289
290 return 0;
291}
292
293static int fio_netio_open_file(struct thread_data *td, struct fio_file *f)
294{
295 if (td_read(td))
296 return fio_netio_accept(td, f);
297 else
298 return fio_netio_connect(td, f);
299}
300
301static int fio_netio_setup_connect(struct thread_data *td, const char *host,
302 unsigned short port)
303{
304 struct netio_data *nd = td->io_ops->data;
305
306 nd->addr.sin_family = AF_INET;
307 nd->addr.sin_port = htons(port);
308
309 if (inet_aton(host, &nd->addr.sin_addr) != 1) {
310 struct hostent *hent;
311
312 hent = gethostbyname(host);
313 if (!hent) {
314 td_verror(td, errno, "gethostbyname");
315 return 1;
316 }
317
318 memcpy(&nd->addr.sin_addr, hent->h_addr, 4);
319 }
320
321 return 0;
322}
323
324static int fio_netio_setup_listen(struct thread_data *td, short port)
325{
326 struct netio_data *nd = td->io_ops->data;
327 int fd, opt;
328
329 fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
330 if (fd < 0) {
331 td_verror(td, errno, "socket");
332 return 1;
333 }
334
335 opt = 1;
336 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
337 td_verror(td, errno, "setsockopt");
338 return 1;
339 }
340#ifdef SO_REUSEPORT
341 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
342 td_verror(td, errno, "setsockopt");
343 return 1;
344 }
345#endif
346
347 nd->addr.sin_family = AF_INET;
348 nd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
349 nd->addr.sin_port = htons(port);
350
351 if (bind(fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
352 td_verror(td, errno, "bind");
353 return 1;
354 }
355 if (listen(fd, 1) < 0) {
356 td_verror(td, errno, "listen");
357 return 1;
358 }
359
360 nd->listenfd = fd;
361 return 0;
362}
363
364static int fio_netio_init(struct thread_data *td)
365{
366 struct netio_data *nd = td->io_ops->data;
367 unsigned short port;
368 char host[64], buf[128];
369 char *sep;
370 int ret;
371
372 if (td_rw(td)) {
373 log_err("fio: network connections must be read OR write\n");
374 return 1;
375 }
376 if (td_random(td)) {
377 log_err("fio: network IO can't be random\n");
378 return 1;
379 }
380
381 strcpy(buf, td->o.filename);
382
383 sep = strchr(buf, '/');
384 if (!sep) {
385 log_err("fio: bad network host/port <<%s>>\n", td->o.filename);
386 return 1;
387 }
388
389 *sep = '\0';
390 sep++;
391 strcpy(host, buf);
392 port = atoi(sep);
393
394 if (td_read(td)) {
395 nd->send_to_net = 0;
396 ret = fio_netio_setup_listen(td, port);
397 } else {
398 nd->send_to_net = 1;
399 ret = fio_netio_setup_connect(td, host, port);
400 }
401
402 return ret;
403}
404
405static void fio_netio_cleanup(struct thread_data *td)
406{
407 struct netio_data *nd = td->io_ops->data;
408
409 if (nd) {
410 if (nd->listenfd != -1)
411 close(nd->listenfd);
412 if (nd->pipes[0] != -1)
413 close(nd->pipes[0]);
414 if (nd->pipes[1] != -1)
415 close(nd->pipes[1]);
416
417 free(nd);
418 td->io_ops->data = NULL;
419 }
420}
421
422static int fio_netio_setup(struct thread_data *td)
423{
424 struct netio_data *nd;
425
426 if (!td->io_ops->data) {
427 nd = malloc(sizeof(*nd));;
428
429 memset(nd, 0, sizeof(*nd));
430 nd->listenfd = -1;
431 nd->pipes[0] = nd->pipes[1] = -1;
432 td->io_ops->data = nd;
433 }
434
435 return 0;
436}
437
438static int fio_netio_setup_splice(struct thread_data *td)
439{
440 struct netio_data *nd;
441
442 fio_netio_setup(td);
443
444 nd = td->io_ops->data;
445 if (nd) {
446 if (pipe(nd->pipes) < 0)
447 return 1;
448
449 nd->use_splice = 1;
450 return 0;
451 }
452
453 return 1;
454}
455
456static struct ioengine_ops ioengine_rw = {
457 .name = "net",
458 .version = FIO_IOOPS_VERSION,
459 .prep = fio_netio_prep,
460 .queue = fio_netio_queue,
461 .setup = fio_netio_setup,
462 .init = fio_netio_init,
463 .cleanup = fio_netio_cleanup,
464 .open_file = fio_netio_open_file,
465 .close_file = generic_close_file,
466 .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR,
467};
468
469static struct ioengine_ops ioengine_splice = {
470 .name = "netsplice",
471 .version = FIO_IOOPS_VERSION,
472 .prep = fio_netio_prep,
473 .queue = fio_netio_queue,
474 .setup = fio_netio_setup_splice,
475 .init = fio_netio_init,
476 .cleanup = fio_netio_cleanup,
477 .open_file = fio_netio_open_file,
478 .close_file = generic_close_file,
479 .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR,
480};
481
482static void fio_init fio_netio_register(void)
483{
484 register_ioengine(&ioengine_rw);
485 register_ioengine(&ioengine_splice);
486}
487
488static void fio_exit fio_netio_unregister(void)
489{
490 unregister_ioengine(&ioengine_rw);
491 unregister_ioengine(&ioengine_splice);
492}