4 * IO engine that reads/writes to/from sockets.
12 #include <netinet/in.h>
13 #include <arpa/inet.h>
25 struct sockaddr_in addr;
28 static int fio_netio_prep(struct thread_data *td, struct io_u *io_u)
30 struct netio_data *nd = td->io_ops->data;
31 struct fio_file *f = io_u->file;
34 * Make sure we don't see spurious reads to a receiver, and vice versa
36 if ((nd->send_to_net && io_u->ddir == DDIR_READ) ||
37 (!nd->send_to_net && io_u->ddir == DDIR_WRITE)) {
38 td_verror(td, EINVAL, "bad direction");
42 if (io_u->ddir == DDIR_SYNC)
44 if (io_u->offset == f->last_completed_pos)
48 * If offset is different from last end position, it's a seek.
49 * As network io is purely sequential, we don't allow seeks.
51 td_verror(td, EINVAL, "cannot seek");
56 * Receive bytes from a socket and fill them into the internal pipe
58 static int splice_in(struct thread_data *td, struct io_u *io_u)
60 struct netio_data *nd = td->io_ops->data;
61 unsigned int len = io_u->xfer_buflen;
62 struct fio_file *f = io_u->file;
66 int ret = splice(f->fd, NULL, nd->pipes[1], NULL, len, 0);
84 * Transmit 'len' bytes from the internal pipe
86 static int splice_out(struct thread_data *td, struct io_u *io_u,
89 struct netio_data *nd = td->io_ops->data;
90 struct fio_file *f = io_u->file;
94 int ret = splice(nd->pipes[0], NULL, f->fd, NULL, len, 0);
112 * vmsplice() pipe to io_u buffer
114 static int vmsplice_io_u_out(struct thread_data *td, struct io_u *io_u,
117 struct netio_data *nd = td->io_ops->data;
119 .iov_base = io_u->xfer_buf,
124 while (iov.iov_len) {
125 int ret = vmsplice(nd->pipes[0], &iov, 1, SPLICE_F_MOVE);
144 * vmsplice() io_u to pipe
146 static int vmsplice_io_u_in(struct thread_data *td, struct io_u *io_u)
148 struct netio_data *nd = td->io_ops->data;
150 .iov_base = io_u->xfer_buf,
151 .iov_len = io_u->xfer_buflen,
153 unsigned int bytes = 0;
155 while (iov.iov_len) {
156 int ret = vmsplice(nd->pipes[1], &iov, 1, SPLICE_F_MOVE);
172 static int fio_netio_splice_in(struct thread_data *td, struct io_u *io_u)
176 ret = splice_in(td, io_u);
180 return vmsplice_io_u_out(td, io_u, ret);
183 static int fio_netio_splice_out(struct thread_data *td, struct io_u *io_u)
187 ret = vmsplice_io_u_in(td, io_u);
191 return splice_out(td, io_u, ret);
194 static int fio_netio_send(struct thread_data *td, struct io_u *io_u)
199 * if we are going to write more, set MSG_MORE
201 if (td->this_io_bytes[DDIR_WRITE] + io_u->xfer_buflen < td->o.size)
204 return send(io_u->file->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
207 static int fio_netio_recv(struct io_u *io_u)
209 int flags = MSG_WAITALL;
211 return recv(io_u->file->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
214 static int fio_netio_queue(struct thread_data *td, struct io_u *io_u)
216 struct netio_data *nd = td->io_ops->data;
219 if (io_u->ddir == DDIR_WRITE) {
221 ret = fio_netio_splice_out(td, io_u);
223 ret = fio_netio_send(td, io_u);
224 } else if (io_u->ddir == DDIR_READ) {
226 ret = fio_netio_splice_in(td, io_u);
228 ret = fio_netio_recv(io_u);
230 ret = 0; /* must be a SYNC */
232 if (ret != (int) io_u->xfer_buflen) {
234 io_u->resid = io_u->xfer_buflen - ret;
236 return FIO_Q_COMPLETED;
242 td_verror(td, io_u->error, "xfer");
244 return FIO_Q_COMPLETED;
247 static int fio_netio_connect(struct thread_data *td, struct fio_file *f)
249 struct netio_data *nd = td->io_ops->data;
251 f->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
253 td_verror(td, errno, "socket");
257 if (connect(f->fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
258 td_verror(td, errno, "connect");
265 static int fio_netio_accept(struct thread_data *td, struct fio_file *f)
267 struct netio_data *nd = td->io_ops->data;
268 socklen_t socklen = sizeof(nd->addr);
272 log_info("fio: waiting for connection\n");
275 * Accept loop. poll for incoming events, accept them. Repeat until we
276 * have all connections.
278 while (!td->terminate) {
279 pfd.fd = nd->listenfd;
282 ret = poll(&pfd, 1, -1);
287 td_verror(td, errno, "poll");
293 * should be impossible
295 if (!(pfd.revents & POLLIN))
298 f->fd = accept(nd->listenfd, (struct sockaddr *) &nd->addr, &socklen);
300 td_verror(td, errno, "accept");
310 static int fio_netio_open_file(struct thread_data *td, struct fio_file *f)
313 return fio_netio_accept(td, f);
315 return fio_netio_connect(td, f);
318 static int fio_netio_setup_connect(struct thread_data *td, const char *host,
321 struct netio_data *nd = td->io_ops->data;
323 nd->addr.sin_family = AF_INET;
324 nd->addr.sin_port = htons(port);
326 if (inet_aton(host, &nd->addr.sin_addr) != 1) {
327 struct hostent *hent;
329 hent = gethostbyname(host);
331 td_verror(td, errno, "gethostbyname");
335 memcpy(&nd->addr.sin_addr, hent->h_addr, 4);
341 static int fio_netio_setup_listen(struct thread_data *td, short port)
343 struct netio_data *nd = td->io_ops->data;
346 fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
348 td_verror(td, errno, "socket");
353 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
354 td_verror(td, errno, "setsockopt");
358 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
359 td_verror(td, errno, "setsockopt");
364 nd->addr.sin_family = AF_INET;
365 nd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
366 nd->addr.sin_port = htons(port);
368 if (bind(fd, (struct sockaddr *) &nd->addr, sizeof(nd->addr)) < 0) {
369 td_verror(td, errno, "bind");
372 if (listen(fd, 1) < 0) {
373 td_verror(td, errno, "listen");
381 static int fio_netio_init(struct thread_data *td)
383 struct netio_data *nd = td->io_ops->data;
385 char host[64], buf[128];
390 log_err("fio: network connections must be read OR write\n");
394 log_err("fio: network IO can't be random\n");
398 strcpy(buf, td->o.filename);
400 sep = strchr(buf, '/');
402 log_err("fio: bad network host/port <<%s>>\n", td->o.filename);
413 ret = fio_netio_setup_listen(td, port);
416 ret = fio_netio_setup_connect(td, host, port);
422 static void fio_netio_cleanup(struct thread_data *td)
424 struct netio_data *nd = td->io_ops->data;
427 if (nd->listenfd != -1)
429 if (nd->pipes[0] != -1)
431 if (nd->pipes[1] != -1)
435 td->io_ops->data = NULL;
439 static int fio_netio_setup(struct thread_data *td)
441 struct netio_data *nd;
443 if (!td->io_ops->data) {
444 nd = malloc(sizeof(*nd));;
446 memset(nd, 0, sizeof(*nd));
448 nd->pipes[0] = nd->pipes[1] = -1;
449 td->io_ops->data = nd;
455 static int fio_netio_setup_splice(struct thread_data *td)
457 struct netio_data *nd;
461 nd = td->io_ops->data;
463 if (pipe(nd->pipes) < 0)
473 static struct ioengine_ops ioengine_rw = {
475 .version = FIO_IOOPS_VERSION,
476 .prep = fio_netio_prep,
477 .queue = fio_netio_queue,
478 .setup = fio_netio_setup,
479 .init = fio_netio_init,
480 .cleanup = fio_netio_cleanup,
481 .open_file = fio_netio_open_file,
482 .close_file = generic_close_file,
483 .flags = FIO_SYNCIO | FIO_DISKLESSIO,
486 static struct ioengine_ops ioengine_splice = {
488 .version = FIO_IOOPS_VERSION,
489 .prep = fio_netio_prep,
490 .queue = fio_netio_queue,
491 .setup = fio_netio_setup_splice,
492 .init = fio_netio_init,
493 .cleanup = fio_netio_cleanup,
494 .open_file = fio_netio_open_file,
495 .close_file = generic_close_file,
496 .flags = FIO_SYNCIO | FIO_DISKLESSIO,
499 static void fio_init fio_netio_register(void)
501 register_ioengine(&ioengine_rw);
502 register_ioengine(&ioengine_splice);
505 static void fio_exit fio_netio_unregister(void)
507 unregister_ioengine(&ioengine_rw);
508 unregister_ioengine(&ioengine_splice);