4 * IO engine that reads/writes to/from sockets.
12 #include <netinet/in.h>
13 #include <arpa/inet.h>
25 struct sockaddr_in addr;
28 static int fio_netio_prep(struct thread_data *td, struct io_u *io_u)
30 struct netio_data *nd = td->io_ops->data;
31 struct fio_file *f = io_u->file;
34 * Make sure we don't see spurious reads to a receiver, and vice versa
36 if ((nd->send_to_net && io_u->ddir == DDIR_READ) ||
37 (!nd->send_to_net && io_u->ddir == DDIR_WRITE)) {
38 td_verror(td, EINVAL, "bad direction");
42 if (io_u->ddir == DDIR_SYNC)
44 if (io_u->offset == f->last_completed_pos)
48 * If offset is different from last end position, it's a seek.
49 * As network io is purely sequential, we don't allow seeks.
51 td_verror(td, EINVAL, "cannot seek");
55 static int splice_io_u(int fdin, int fdout, unsigned int len)
60 int ret = splice(fdin, NULL, fdout, NULL, len, 0);
78 * Receive bytes from a socket and fill them into the internal pipe
80 static int splice_in(struct thread_data *td, struct io_u *io_u)
82 struct netio_data *nd = td->io_ops->data;
84 return splice_io_u(io_u->file->fd, nd->pipes[1], io_u->xfer_buflen);
88 * Transmit 'len' bytes from the internal pipe
90 static int splice_out(struct thread_data *td, struct io_u *io_u,
93 struct netio_data *nd = td->io_ops->data;
95 return splice_io_u(nd->pipes[0], io_u->file->fd, len);
98 static int vmsplice_io_u(struct io_u *io_u, int fd, unsigned int len)
101 .iov_base = io_u->xfer_buf,
106 while (iov.iov_len) {
107 int ret = vmsplice(fd, &iov, 1, SPLICE_F_MOVE);
126 * vmsplice() pipe to io_u buffer
128 static int vmsplice_io_u_out(struct thread_data *td, struct io_u *io_u,
131 struct netio_data *nd = td->io_ops->data;
133 return vmsplice_io_u(io_u, nd->pipes[0], len);
137 * vmsplice() io_u to pipe
139 static int vmsplice_io_u_in(struct thread_data *td, struct io_u *io_u)
141 struct netio_data *nd = td->io_ops->data;
143 return vmsplice_io_u(io_u, nd->pipes[1], io_u->xfer_buflen);
147 * splice receive - transfer socket data into a pipe using splice, then map
148 * that pipe data into the io_u using vmsplice.
150 static int fio_netio_splice_in(struct thread_data *td, struct io_u *io_u)
154 ret = splice_in(td, io_u);
156 return vmsplice_io_u_out(td, io_u, ret);
162 * splice transmit - map data from the io_u into a pipe by using vmsplice,
163 * then transfer that pipe to a socket using splice.
165 static int fio_netio_splice_out(struct thread_data *td, struct io_u *io_u)
169 ret = vmsplice_io_u_in(td, io_u);
171 return splice_out(td, io_u, ret);
176 static int fio_netio_send(struct thread_data *td, struct io_u *io_u)
181 * if we are going to write more, set MSG_MORE
183 if (td->this_io_bytes[DDIR_WRITE] + io_u->xfer_buflen < td->o.size)
186 return send(io_u->file->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
189 static int fio_netio_recv(struct io_u *io_u)
191 int flags = MSG_WAITALL;
193 return recv(io_u->file->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
196 static int fio_netio_queue(struct thread_data *td, struct io_u *io_u)
198 struct netio_data *nd = td->io_ops->data;
201 fio_ro_check(td, io_u);
203 if (io_u->ddir == DDIR_WRITE) {
205 ret = fio_netio_splice_out(td, io_u);
207 ret = fio_netio_send(td, io_u);
208 } else if (io_u->ddir == DDIR_READ) {
210 ret = fio_netio_splice_in(td, io_u);
212 ret = fio_netio_recv(io_u);
214 ret = 0; /* must be a SYNC */
216 if (ret != (int) io_u->xfer_buflen) {
218 io_u->resid = io_u->xfer_buflen - ret;
220 return FIO_Q_COMPLETED;
226 td_verror(td, io_u->error, "xfer");
228 return FIO_Q_COMPLETED;
231 static int fio_netio_connect(struct thread_data *td, struct fio_file *f)
233 struct netio_data *nd = td->io_ops->data;
235 f->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
237 td_verror(td, errno, "socket");
241 if (connect(f->fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
242 td_verror(td, errno, "connect");
249 static int fio_netio_accept(struct thread_data *td, struct fio_file *f)
251 struct netio_data *nd = td->io_ops->data;
252 socklen_t socklen = sizeof(nd->addr);
256 log_info("fio: waiting for connection\n");
259 * Accept loop. poll for incoming events, accept them. Repeat until we
260 * have all connections.
262 while (!td->terminate) {
263 pfd.fd = nd->listenfd;
266 ret = poll(&pfd, 1, -1);
267 printf("got ret %d\n", ret);
272 td_verror(td, errno, "poll");
278 * should be impossible
280 if (!(pfd.revents & POLLIN))
283 f->fd = accept(nd->listenfd, (struct sockaddr *) &nd->addr, &socklen);
285 td_verror(td, errno, "accept");
294 static int fio_netio_open_file(struct thread_data *td, struct fio_file *f)
297 return fio_netio_accept(td, f);
299 return fio_netio_connect(td, f);
302 static int fio_netio_setup_connect(struct thread_data *td, const char *host,
305 struct netio_data *nd = td->io_ops->data;
307 nd->addr.sin_family = AF_INET;
308 nd->addr.sin_port = htons(port);
310 if (inet_aton(host, &nd->addr.sin_addr) != 1) {
311 struct hostent *hent;
313 hent = gethostbyname(host);
315 td_verror(td, errno, "gethostbyname");
319 memcpy(&nd->addr.sin_addr, hent->h_addr, 4);
325 static int fio_netio_setup_listen(struct thread_data *td, short port)
327 struct netio_data *nd = td->io_ops->data;
330 fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
332 td_verror(td, errno, "socket");
337 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
338 td_verror(td, errno, "setsockopt");
342 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
343 td_verror(td, errno, "setsockopt");
348 nd->addr.sin_family = AF_INET;
349 nd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
350 nd->addr.sin_port = htons(port);
352 if (bind(fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
353 td_verror(td, errno, "bind");
356 if (listen(fd, 1) < 0) {
357 td_verror(td, errno, "listen");
365 static int fio_netio_init(struct thread_data *td)
367 struct netio_data *nd = td->io_ops->data;
369 char host[64], buf[128];
374 log_err("fio: network connections must be read OR write\n");
378 log_err("fio: network IO can't be random\n");
382 strcpy(buf, td->o.filename);
384 sep = strchr(buf, '/');
386 log_err("fio: bad network host/port <<%s>>\n", td->o.filename);
397 ret = fio_netio_setup_listen(td, port);
400 ret = fio_netio_setup_connect(td, host, port);
406 static void fio_netio_cleanup(struct thread_data *td)
408 struct netio_data *nd = td->io_ops->data;
411 if (nd->listenfd != -1)
413 if (nd->pipes[0] != -1)
415 if (nd->pipes[1] != -1)
419 td->io_ops->data = NULL;
423 static int fio_netio_setup(struct thread_data *td)
425 struct netio_data *nd;
427 if (!td->io_ops->data) {
428 nd = malloc(sizeof(*nd));;
430 memset(nd, 0, sizeof(*nd));
432 nd->pipes[0] = nd->pipes[1] = -1;
433 td->io_ops->data = nd;
439 static int fio_netio_setup_splice(struct thread_data *td)
441 struct netio_data *nd;
445 nd = td->io_ops->data;
447 if (pipe(nd->pipes) < 0)
457 static struct ioengine_ops ioengine_rw = {
459 .version = FIO_IOOPS_VERSION,
460 .prep = fio_netio_prep,
461 .queue = fio_netio_queue,
462 .setup = fio_netio_setup,
463 .init = fio_netio_init,
464 .cleanup = fio_netio_cleanup,
465 .open_file = fio_netio_open_file,
466 .close_file = generic_close_file,
467 .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR |
471 static struct ioengine_ops ioengine_splice = {
473 .version = FIO_IOOPS_VERSION,
474 .prep = fio_netio_prep,
475 .queue = fio_netio_queue,
476 .setup = fio_netio_setup_splice,
477 .init = fio_netio_init,
478 .cleanup = fio_netio_cleanup,
479 .open_file = fio_netio_open_file,
480 .close_file = generic_close_file,
481 .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_UNIDIR |
485 static void fio_init fio_netio_register(void)
487 register_ioengine(&ioengine_rw);
488 register_ioengine(&ioengine_splice);
491 static void fio_exit fio_netio_unregister(void)
493 unregister_ioengine(&ioengine_rw);
494 unregister_ioengine(&ioengine_splice);