#include "../fio.h"
#include "../os.h"
-struct net_data {
- int send_to_net;
- struct io_u *last_io_u;
-};
-
-static int fio_netio_getevents(struct thread_data *td, int fio_unused min,
- int max, struct timespec fio_unused *t)
-{
- assert(max <= 1);
-
- /*
- * we can only have one finished io_u for sync io, since the depth
- * is always 1
- */
- if (list_empty(&td->io_u_busylist))
- return 0;
-
- return 1;
-}
-
-static struct io_u *fio_netio_event(struct thread_data *td, int event)
-{
- struct net_data *nd = td->io_ops->data;
-
- assert(event == 0);
-
- return nd->last_io_u;
-}
+#define send_to_net(td) ((td)->io_ops->priv)
static int fio_netio_prep(struct thread_data *td, struct io_u *io_u)
{
- struct net_data *nd = td->io_ops->data;
struct fio_file *f = io_u->file;
/*
* Make sure we don't see spurious reads to a receiver, and vice versa
*/
- if ((nd->send_to_net && io_u->ddir == DDIR_READ) ||
- (!nd->send_to_net && io_u->ddir == DDIR_WRITE)) {
- printf("boo!\n");
- td_verror(td, EINVAL);
+ if ((send_to_net(td) && io_u->ddir == DDIR_READ) ||
+ (!send_to_net(td) && io_u->ddir == DDIR_WRITE)) {
+ td_verror(td, EINVAL, "bad direction");
return 1;
}
* If offset is different from last end position, it's a seek.
* As network io is purely sequential, we don't allow seeks.
*/
- td_verror(td, EINVAL);
+ td_verror(td, EINVAL, "cannot seek");
return 1;
}
static int fio_netio_queue(struct thread_data *td, struct io_u *io_u)
{
- struct net_data *nd = td->io_ops->data;
struct fio_file *f = io_u->file;
int ret, flags = 0;
ret = 0; /* must be a SYNC */
if (ret != (int) io_u->xfer_buflen) {
- if (ret > 0) {
+ if (ret >= 0) {
io_u->resid = io_u->xfer_buflen - ret;
io_u->error = 0;
- return ret;
+ return FIO_Q_COMPLETED;
} else
io_u->error = errno;
}
- if (!io_u->error)
- nd->last_io_u = io_u;
- else
- td_verror(td, io_u->error);
+ if (io_u->error)
+ td_verror(td, io_u->error, "xfer");
- return io_u->error;
+ return FIO_Q_COMPLETED;
}
static int fio_netio_setup_connect(struct thread_data *td, const char *host,
hent = gethostbyname(host);
if (!hent) {
- td_verror(td, errno);
+ td_verror(td, errno, "gethostbyname");
return 1;
}
for_each_file(td, f, i) {
f->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (f->fd < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "socket");
return 1;
}
if (connect(f->fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "connect");
return 1;
}
}
if (errno == EINTR)
continue;
- td_verror(td, errno);
+ td_verror(td, errno, "poll");
break;
} else if (!ret)
continue;
+ /*
+ * should be impossible
+ */
+ if (!(pfd.revents & POLLIN))
+ continue;
+
for_each_file(td, f, i) {
if (f->fd != -1)
continue;
f->fd = accept(fd, (struct sockaddr *) addr, &socklen);
if (f->fd < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "accept");
return 1;
}
accepts++;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (fd < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "socket");
return 1;
}
opt = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "setsockopt");
return 1;
}
#ifdef SO_REUSEPORT
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "setsockopt");
return 1;
}
#endif
addr.sin_port = htons(port);
if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "bind");
return 1;
}
if (listen(fd, 1) < 0) {
- td_verror(td, errno);
+ td_verror(td, errno, "listen");
return 1;
}
return fio_netio_accept_connections(td, fd, &addr);
}
-static int fio_netio_setup(struct thread_data *td)
+static int fio_netio_init(struct thread_data *td)
{
char host[64], buf[128];
- struct net_data *nd;
unsigned short port;
struct fio_file *f;
char *sep;
return 1;
}
- /*
- * work around for late init call
- */
- if (td->io_ops->init(td))
- return 1;
-
- nd = td->io_ops->data;
-
- if (td->iomix) {
+ if (td_rw(td)) {
log_err("fio: network connections must be read OR write\n");
return 1;
}
strcpy(host, buf);
port = atoi(sep);
- if (td->ddir == DDIR_READ) {
- nd->send_to_net = 0;
+ if (td_read(td)) {
+ send_to_net(td) = 0;
ret = fio_netio_setup_listen(td, port);
} else {
- nd->send_to_net = 1;
+ send_to_net(td) = 1;
ret = fio_netio_setup_connect(td, host, port);
}
f->real_file_size = f->file_size;
}
+ td->nr_open_files = td->nr_files;
return 0;
}
-static void fio_netio_cleanup(struct thread_data *td)
-{
- if (td->io_ops->data) {
- free(td->io_ops->data);
- td->io_ops->data = NULL;
- }
-}
-
-static int fio_netio_init(struct thread_data *td)
+static int fio_netio_setup(struct thread_data fio_unused *td)
{
- struct net_data *nd;
-
- /*
- * Hack to work-around the ->setup() function calling init on its
- * own, since it needs ->io_ops->data to be set up.
- */
- if (td->io_ops->data)
- return 0;
-
- nd = malloc(sizeof(*nd));
- nd->last_io_u = NULL;
- td->io_ops->data = nd;
return 0;
}
static struct ioengine_ops ioengine = {
.name = "net",
.version = FIO_IOOPS_VERSION,
- .init = fio_netio_init,
.prep = fio_netio_prep,
.queue = fio_netio_queue,
- .getevents = fio_netio_getevents,
- .event = fio_netio_event,
- .cleanup = fio_netio_cleanup,
.setup = fio_netio_setup,
- .flags = FIO_SYNCIO | FIO_NETIO,
+ .init = fio_netio_init,
+ .flags = FIO_SYNCIO | FIO_DISKLESSIO | FIO_SELFOPEN,
};
static void fio_init fio_netio_register(void)