2 * Transfer data over the net.
9 #include <netinet/in.h>
10 #include <arpa/inet.h>
19 struct io_u *last_io_u;
22 static int fio_netio_getevents(struct thread_data *td, int fio_unused min,
23 int max, struct timespec fio_unused *t)
28 * we can only have one finished io_u for sync io, since the depth
31 if (list_empty(&td->io_u_busylist))
37 static struct io_u *fio_netio_event(struct thread_data *td, int event)
39 struct net_data *nd = td->io_ops->data;
46 static int fio_netio_prep(struct thread_data *td, struct io_u *io_u)
48 struct net_data *nd = td->io_ops->data;
49 struct fio_file *f = io_u->file;
52 * Make sure we don't see spurious reads to a receiver, and vice versa
54 if ((nd->send_to_net && io_u->ddir == DDIR_READ) ||
55 (!nd->send_to_net && io_u->ddir == DDIR_WRITE)) {
57 td_verror(td, EINVAL);
61 if (io_u->ddir == DDIR_SYNC)
63 if (io_u->offset == f->last_completed_pos)
67 * If offset is different from last end position, it's a seek.
68 * As network io is purely sequential, we don't allow seeks.
70 td_verror(td, EINVAL);
74 static int fio_netio_queue(struct thread_data *td, struct io_u *io_u)
76 struct net_data *nd = td->io_ops->data;
77 struct fio_file *f = io_u->file;
80 if (io_u->ddir == DDIR_WRITE) {
82 * if we are going to write more, set MSG_MORE
84 if (td->this_io_bytes[DDIR_WRITE] + io_u->xfer_buflen <
88 ret = send(f->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
89 } else if (io_u->ddir == DDIR_READ) {
91 ret = recv(f->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
93 ret = 0; /* must be a SYNC */
95 if (ret != (int) io_u->xfer_buflen) {
97 io_u->resid = io_u->xfer_buflen - ret;
105 nd->last_io_u = io_u;
107 td_verror(td, io_u->error);
112 static int fio_netio_setup_connect(struct thread_data *td, const char *host,
115 struct sockaddr_in addr;
119 memset(&addr, 0, sizeof(addr));
120 addr.sin_family = AF_INET;
121 addr.sin_port = htons(port);
123 if (inet_aton(host, &addr.sin_addr) != 1) {
124 struct hostent *hent;
126 hent = gethostbyname(host);
128 td_verror(td, errno);
132 memcpy(&addr.sin_addr, hent->h_addr, 4);
135 for_each_file(td, f, i) {
136 f->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
138 td_verror(td, errno);
142 if (connect(f->fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
143 td_verror(td, errno);
152 static int fio_netio_accept_connections(struct thread_data *td, int fd,
153 struct sockaddr_in *addr)
155 socklen_t socklen = sizeof(*addr);
156 unsigned int accepts = 0;
159 fprintf(f_out, "fio: waiting for %u connections\n", td->nr_files);
162 * Accept loop. poll for incoming events, accept them. Repeat until we
163 * have all connections.
165 while (!td->terminate && accepts < td->nr_files) {
172 ret = poll(&pfd, 1, -1);
177 td_verror(td, errno);
183 * should be impossible
185 if (!(pfd.revents & POLLIN))
188 for_each_file(td, f, i) {
192 f->fd = accept(fd, (struct sockaddr *) addr, &socklen);
194 td_verror(td, errno);
205 static int fio_netio_setup_listen(struct thread_data *td, unsigned short port)
207 struct sockaddr_in addr;
210 fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
212 td_verror(td, errno);
217 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
218 td_verror(td, errno);
222 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
223 td_verror(td, errno);
228 memset(&addr, 0, sizeof(addr));
229 addr.sin_family = AF_INET;
230 addr.sin_addr.s_addr = htonl(INADDR_ANY);
231 addr.sin_port = htons(port);
233 if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
234 td_verror(td, errno);
237 if (listen(fd, 1) < 0) {
238 td_verror(td, errno);
242 return fio_netio_accept_connections(td, fd, &addr);
245 static int fio_netio_setup(struct thread_data *td)
247 char host[64], buf[128];
254 if (!td->total_file_size) {
255 log_err("fio: need size= set\n");
260 * work around for late init call
262 if (td->io_ops->init(td))
265 nd = td->io_ops->data;
268 log_err("fio: network connections must be read OR write\n");
272 strcpy(buf, td->filename);
274 sep = strchr(buf, ':');
276 log_err("fio: bad network host:port <<%s>>\n", td->filename);
285 if (td->ddir == DDIR_READ) {
287 ret = fio_netio_setup_listen(td, port);
290 ret = fio_netio_setup_connect(td, host, port);
296 td->io_size = td->total_file_size;
297 td->total_io_size = td->io_size;
299 for_each_file(td, f, i) {
300 f->file_size = td->total_file_size / td->nr_files;
301 f->real_file_size = f->file_size;
307 static void fio_netio_cleanup(struct thread_data *td)
309 if (td->io_ops->data) {
310 free(td->io_ops->data);
311 td->io_ops->data = NULL;
315 static int fio_netio_init(struct thread_data *td)
320 * Hack to work-around the ->setup() function calling init on its
321 * own, since it needs ->io_ops->data to be set up.
323 if (td->io_ops->data)
326 nd = malloc(sizeof(*nd));
327 nd->last_io_u = NULL;
328 td->io_ops->data = nd;
332 static struct ioengine_ops ioengine = {
334 .version = FIO_IOOPS_VERSION,
335 .init = fio_netio_init,
336 .prep = fio_netio_prep,
337 .queue = fio_netio_queue,
338 .getevents = fio_netio_getevents,
339 .event = fio_netio_event,
340 .cleanup = fio_netio_cleanup,
341 .setup = fio_netio_setup,
342 .flags = FIO_SYNCIO | FIO_NETIO,
345 static void fio_init fio_netio_register(void)
347 register_ioengine(&ioengine);
350 static void fio_exit fio_netio_unregister(void)
352 unregister_ioengine(&ioengine);