[PATCH] Move td_verror() into io_ops->queue() hook
[fio.git] / engines / net.c
... / ...
CommitLineData
1/*
2 * Transfer data over the net.
3 */
4#include <stdio.h>
5#include <stdlib.h>
6#include <unistd.h>
7#include <errno.h>
8#include <assert.h>
9#include <netinet/in.h>
10#include <arpa/inet.h>
11#include <netdb.h>
12
13#include "../fio.h"
14#include "../os.h"
15
16struct net_data {
17 int send_to_net;
18 struct io_u *last_io_u;
19};
20
21static int fio_netio_getevents(struct thread_data *td, int fio_unused min,
22 int max, struct timespec fio_unused *t)
23{
24 assert(max <= 1);
25
26 /*
27 * we can only have one finished io_u for sync io, since the depth
28 * is always 1
29 */
30 if (list_empty(&td->io_u_busylist))
31 return 0;
32
33 return 1;
34}
35
36static struct io_u *fio_netio_event(struct thread_data *td, int event)
37{
38 struct net_data *nd = td->io_ops->data;
39
40 assert(event == 0);
41
42 return nd->last_io_u;
43}
44
45static int fio_netio_prep(struct thread_data *td, struct io_u *io_u)
46{
47 struct net_data *nd = td->io_ops->data;
48 struct fio_file *f = io_u->file;
49
50 /*
51 * Make sure we don't see spurious reads to a receiver, and vice versa
52 */
53 if ((nd->send_to_net && io_u->ddir == DDIR_READ) ||
54 (!nd->send_to_net && io_u->ddir == DDIR_WRITE)) {
55 printf("boo!\n");
56 td_verror(td, EINVAL);
57 return 1;
58 }
59
60 if (io_u->ddir == DDIR_SYNC)
61 return 0;
62 if (io_u->offset == f->last_completed_pos)
63 return 0;
64
65 /*
66 * If offset is different from last end position, it's a seek.
67 * As network io is purely sequential, we don't allow seeks.
68 */
69 td_verror(td, EINVAL);
70 return 1;
71}
72
73static int fio_netio_queue(struct thread_data *td, struct io_u *io_u)
74{
75 struct net_data *nd = td->io_ops->data;
76 struct fio_file *f = io_u->file;
77 int ret, flags = 0;
78
79 if (io_u->ddir == DDIR_WRITE) {
80 /*
81 * if we are going to write more, set MSG_MORE
82 */
83 if (td->this_io_bytes[DDIR_WRITE] + io_u->xfer_buflen <
84 td->io_size)
85 flags = MSG_MORE;
86
87 ret = send(f->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
88 } else if (io_u->ddir == DDIR_READ) {
89 flags = MSG_WAITALL;
90 ret = recv(f->fd, io_u->xfer_buf, io_u->xfer_buflen, flags);
91 } else
92 ret = 0; /* must be a SYNC */
93
94 if (ret != (int) io_u->xfer_buflen) {
95 if (ret > 0) {
96 io_u->resid = io_u->xfer_buflen - ret;
97 io_u->error = 0;
98 return ret;
99 } else
100 io_u->error = errno;
101 }
102
103 if (!io_u->error)
104 nd->last_io_u = io_u;
105 else
106 td_verror(td, io_u->error);
107
108 return io_u->error;
109}
110
111static int fio_netio_setup_connect(struct thread_data *td, const char *host,
112 unsigned short port)
113{
114 struct sockaddr_in addr;
115 struct fio_file *f;
116 int i;
117
118 memset(&addr, 0, sizeof(addr));
119 addr.sin_family = AF_INET;
120 addr.sin_port = htons(port);
121
122 if (inet_aton(host, &addr.sin_addr) != 1) {
123 struct hostent *hent;
124
125 hent = gethostbyname(host);
126 if (!hent) {
127 td_verror(td, errno);
128 return 1;
129 }
130
131 memcpy(&addr.sin_addr, hent->h_addr, 4);
132 }
133
134 for_each_file(td, f, i) {
135 f->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
136 if (f->fd < 0) {
137 td_verror(td, errno);
138 return 1;
139 }
140
141 if (connect(f->fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
142 td_verror(td, errno);
143 return 1;
144 }
145 }
146
147 return 0;
148
149}
150
151static int fio_netio_setup_listen(struct thread_data *td, unsigned short port)
152{
153 struct sockaddr_in addr;
154 socklen_t socklen;
155 struct fio_file *f;
156 int fd, opt, i;
157
158 fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
159 if (fd < 0) {
160 td_verror(td, errno);
161 return 1;
162 }
163
164 opt = 1;
165 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
166 td_verror(td, errno);
167 return 1;
168 }
169#ifdef SO_REUSEPORT
170 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
171 td_verror(td, errno);
172 return 1;
173 }
174#endif
175
176 memset(&addr, 0, sizeof(addr));
177 addr.sin_family = AF_INET;
178 addr.sin_addr.s_addr = htonl(INADDR_ANY);
179 addr.sin_port = htons(port);
180
181 if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
182 td_verror(td, errno);
183 return 1;
184 }
185 if (listen(fd, 1) < 0) {
186 td_verror(td, errno);
187 return 1;
188 }
189
190 fprintf(f_out, "fio: waiting for %u connections\n", td->nr_files);
191
192 socklen = sizeof(addr);
193 for_each_file(td, f, i) {
194 f->fd = accept(fd, (struct sockaddr *) &addr, &socklen);
195 if (f->fd < 0) {
196 td_verror(td, errno);
197 return 1;
198 }
199 }
200
201 return 0;
202}
203
204static int fio_netio_setup(struct thread_data *td)
205{
206 char host[64], buf[128];
207 struct net_data *nd;
208 unsigned short port;
209 struct fio_file *f;
210 char *sep;
211 int ret, i;
212
213 if (!td->total_file_size) {
214 log_err("fio: need size= set\n");
215 return 1;
216 }
217
218 /*
219 * work around for late init call
220 */
221 if (td->io_ops->init(td))
222 return 1;
223
224 nd = td->io_ops->data;
225
226 if (td->iomix) {
227 log_err("fio: network connections must be read OR write\n");
228 return 1;
229 }
230
231 strcpy(buf, td->filename);
232
233 sep = strchr(buf, ':');
234 if (!sep) {
235 log_err("fio: bad network host:port <<%s>>\n", td->filename);
236 return 1;
237 }
238
239 *sep = '\0';
240 sep++;
241 strcpy(host, buf);
242 port = atoi(sep);
243
244 if (td->ddir == DDIR_READ) {
245 nd->send_to_net = 0;
246 ret = fio_netio_setup_listen(td, port);
247 } else {
248 nd->send_to_net = 1;
249 ret = fio_netio_setup_connect(td, host, port);
250 }
251
252 if (ret)
253 return ret;
254
255 td->io_size = td->total_file_size;
256 td->total_io_size = td->io_size;
257
258 for_each_file(td, f, i) {
259 f->file_size = td->total_file_size / td->nr_files;
260 f->real_file_size = f->file_size;
261 }
262
263 return 0;
264}
265
266static void fio_netio_cleanup(struct thread_data *td)
267{
268 if (td->io_ops->data) {
269 free(td->io_ops->data);
270 td->io_ops->data = NULL;
271 }
272}
273
274static int fio_netio_init(struct thread_data *td)
275{
276 struct net_data *nd;
277
278 /*
279 * Hack to work-around the ->setup() function calling init on its
280 * own, since it needs ->io_ops->data to be set up.
281 */
282 if (td->io_ops->data)
283 return 0;
284
285 nd = malloc(sizeof(*nd));
286 nd->last_io_u = NULL;
287 td->io_ops->data = nd;
288 return 0;
289}
290
291static struct ioengine_ops ioengine = {
292 .name = "net",
293 .version = FIO_IOOPS_VERSION,
294 .init = fio_netio_init,
295 .prep = fio_netio_prep,
296 .queue = fio_netio_queue,
297 .getevents = fio_netio_getevents,
298 .event = fio_netio_event,
299 .cleanup = fio_netio_cleanup,
300 .setup = fio_netio_setup,
301 .flags = FIO_SYNCIO | FIO_NETIO,
302};
303
304static void fio_init fio_netio_register(void)
305{
306 register_ioengine(&ioengine);
307}
308
309static void fio_exit fio_netio_unregister(void)
310{
311 unregister_ioengine(&ioengine);
312}