server: remove leftover debug statement
[fio.git] / server.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <stdlib.h>
3#include <stdarg.h>
4#include <unistd.h>
5#include <limits.h>
6#include <errno.h>
7#include <sys/poll.h>
8#include <sys/types.h>
9#include <sys/wait.h>
10#include <sys/socket.h>
11#include <sys/stat.h>
12#include <sys/un.h>
13#include <sys/uio.h>
14#include <netinet/in.h>
15#include <arpa/inet.h>
16#include <netdb.h>
17#include <syslog.h>
18#include <signal.h>
19#ifdef CONFIG_ZLIB
20#include <zlib.h>
21#endif
22
23#include "fio.h"
24#include "options.h"
25#include "server.h"
26#include "crc/crc16.h"
27#include "lib/ieee754.h"
28#include "verify.h"
29#include "smalloc.h"
30
31int fio_net_port = FIO_NET_PORT;
32
33int exit_backend = 0;
34
35enum {
36 SK_F_FREE = 1,
37 SK_F_COPY = 2,
38 SK_F_SIMPLE = 4,
39 SK_F_VEC = 8,
40 SK_F_INLINE = 16,
41};
42
43struct sk_entry {
44 struct flist_head list; /* link on sk_out->list */
45 int flags; /* SK_F_* */
46 int opcode; /* Actual command fields */
47 void *buf;
48 off_t size;
49 uint64_t *tagptr;
50 struct flist_head next; /* Other sk_entry's, if linked command */
51};
52
53struct sk_out {
54 unsigned int refs; /* frees sk_out when it drops to zero.
55 * protected by below ->lock */
56
57 int sk; /* socket fd to talk to client */
58 struct fio_mutex lock; /* protects ref and below list */
59 struct flist_head list; /* list of pending transmit work */
60 struct fio_mutex wait; /* wake backend when items added to list */
61 struct fio_mutex xmit; /* held while sending data */
62};
63
64static char *fio_server_arg;
65static char *bind_sock;
66static struct sockaddr_in saddr_in;
67static struct sockaddr_in6 saddr_in6;
68static int use_ipv6;
69#ifdef CONFIG_ZLIB
70static unsigned int has_zlib = 1;
71#else
72static unsigned int has_zlib = 0;
73#endif
74static unsigned int use_zlib;
75static char me[128];
76
77static pthread_key_t sk_out_key;
78
79struct fio_fork_item {
80 struct flist_head list;
81 int exitval;
82 int signal;
83 int exited;
84 pid_t pid;
85};
86
87struct cmd_reply {
88 struct fio_mutex lock;
89 void *data;
90 size_t size;
91 int error;
92};
93
94static const char *fio_server_ops[FIO_NET_CMD_NR] = {
95 "",
96 "QUIT",
97 "EXIT",
98 "JOB",
99 "JOBLINE",
100 "TEXT",
101 "TS",
102 "GS",
103 "SEND_ETA",
104 "ETA",
105 "PROBE",
106 "START",
107 "STOP",
108 "DISK_UTIL",
109 "SERVER_START",
110 "ADD_JOB",
111 "RUN",
112 "IOLOG",
113 "UPDATE_JOB",
114 "LOAD_FILE",
115 "VTRIGGER",
116 "SENDFILE",
117};
118
119static void sk_lock(struct sk_out *sk_out)
120{
121 fio_mutex_down(&sk_out->lock);
122}
123
124static void sk_unlock(struct sk_out *sk_out)
125{
126 fio_mutex_up(&sk_out->lock);
127}
128
129void sk_out_assign(struct sk_out *sk_out)
130{
131 if (!sk_out)
132 return;
133
134 sk_lock(sk_out);
135 sk_out->refs++;
136 sk_unlock(sk_out);
137 pthread_setspecific(sk_out_key, sk_out);
138}
139
140static void sk_out_free(struct sk_out *sk_out)
141{
142 __fio_mutex_remove(&sk_out->lock);
143 __fio_mutex_remove(&sk_out->wait);
144 __fio_mutex_remove(&sk_out->xmit);
145 sfree(sk_out);
146}
147
148static int __sk_out_drop(struct sk_out *sk_out)
149{
150 if (sk_out) {
151 int refs;
152
153 sk_lock(sk_out);
154 refs = --sk_out->refs;
155 sk_unlock(sk_out);
156
157 if (!refs) {
158 sk_out_free(sk_out);
159 return 0;
160 }
161 }
162
163 return 1;
164}
165
166void sk_out_drop(void)
167{
168 struct sk_out *sk_out;
169
170 sk_out = pthread_getspecific(sk_out_key);
171 if (!__sk_out_drop(sk_out))
172 pthread_setspecific(sk_out_key, NULL);
173}
174
175static void __fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
176 uint32_t pdu_len, uint64_t tag)
177{
178 memset(cmd, 0, sizeof(*cmd));
179
180 cmd->version = __cpu_to_le16(FIO_SERVER_VER);
181 cmd->opcode = cpu_to_le16(opcode);
182 cmd->tag = cpu_to_le64(tag);
183 cmd->pdu_len = cpu_to_le32(pdu_len);
184}
185
186
187static void fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
188 const void *pdu, uint32_t pdu_len, uint64_t tag)
189{
190 __fio_init_net_cmd(cmd, opcode, pdu_len, tag);
191
192 if (pdu)
193 memcpy(&cmd->payload, pdu, pdu_len);
194}
195
196const char *fio_server_op(unsigned int op)
197{
198 static char buf[32];
199
200 if (op < FIO_NET_CMD_NR)
201 return fio_server_ops[op];
202
203 sprintf(buf, "UNKNOWN/%d", op);
204 return buf;
205}
206
207static ssize_t iov_total_len(const struct iovec *iov, int count)
208{
209 ssize_t ret = 0;
210
211 while (count--) {
212 ret += iov->iov_len;
213 iov++;
214 }
215
216 return ret;
217}
218
219static int fio_sendv_data(int sk, struct iovec *iov, int count)
220{
221 ssize_t total_len = iov_total_len(iov, count);
222 ssize_t ret;
223
224 do {
225 ret = writev(sk, iov, count);
226 if (ret > 0) {
227 total_len -= ret;
228 if (!total_len)
229 break;
230
231 while (ret) {
232 if (ret >= iov->iov_len) {
233 ret -= iov->iov_len;
234 iov++;
235 continue;
236 }
237 iov->iov_base += ret;
238 iov->iov_len -= ret;
239 ret = 0;
240 }
241 } else if (!ret)
242 break;
243 else if (errno == EAGAIN || errno == EINTR)
244 continue;
245 else
246 break;
247 } while (!exit_backend);
248
249 if (!total_len)
250 return 0;
251
252 return 1;
253}
254
255static int fio_send_data(int sk, const void *p, unsigned int len)
256{
257 struct iovec iov = { .iov_base = (void *) p, .iov_len = len };
258
259 assert(len <= sizeof(struct fio_net_cmd) + FIO_SERVER_MAX_FRAGMENT_PDU);
260
261 return fio_sendv_data(sk, &iov, 1);
262}
263
264static int fio_recv_data(int sk, void *p, unsigned int len)
265{
266 do {
267 int ret = recv(sk, p, len, MSG_WAITALL);
268
269 if (ret > 0) {
270 len -= ret;
271 if (!len)
272 break;
273 p += ret;
274 continue;
275 } else if (!ret)
276 break;
277 else if (errno == EAGAIN || errno == EINTR)
278 continue;
279 else
280 break;
281 } while (!exit_backend);
282
283 if (!len)
284 return 0;
285
286 return -1;
287}
288
289static int verify_convert_cmd(struct fio_net_cmd *cmd)
290{
291 uint16_t crc;
292
293 cmd->cmd_crc16 = le16_to_cpu(cmd->cmd_crc16);
294 cmd->pdu_crc16 = le16_to_cpu(cmd->pdu_crc16);
295
296 crc = fio_crc16(cmd, FIO_NET_CMD_CRC_SZ);
297 if (crc != cmd->cmd_crc16) {
298 log_err("fio: server bad crc on command (got %x, wanted %x)\n",
299 cmd->cmd_crc16, crc);
300 return 1;
301 }
302
303 cmd->version = le16_to_cpu(cmd->version);
304 cmd->opcode = le16_to_cpu(cmd->opcode);
305 cmd->flags = le32_to_cpu(cmd->flags);
306 cmd->tag = le64_to_cpu(cmd->tag);
307 cmd->pdu_len = le32_to_cpu(cmd->pdu_len);
308
309 switch (cmd->version) {
310 case FIO_SERVER_VER:
311 break;
312 default:
313 log_err("fio: bad server cmd version %d\n", cmd->version);
314 return 1;
315 }
316
317 if (cmd->pdu_len > FIO_SERVER_MAX_FRAGMENT_PDU) {
318 log_err("fio: command payload too large: %u\n", cmd->pdu_len);
319 return 1;
320 }
321
322 return 0;
323}
324
325/*
326 * Read (and defragment, if necessary) incoming commands
327 */
328struct fio_net_cmd *fio_net_recv_cmd(int sk)
329{
330 struct fio_net_cmd cmd, *tmp, *cmdret = NULL;
331 size_t cmd_size = 0, pdu_offset = 0;
332 uint16_t crc;
333 int ret, first = 1;
334 void *pdu = NULL;
335
336 do {
337 ret = fio_recv_data(sk, &cmd, sizeof(cmd));
338 if (ret)
339 break;
340
341 /* We have a command, verify it and swap if need be */
342 ret = verify_convert_cmd(&cmd);
343 if (ret)
344 break;
345
346 if (first) {
347 /* if this is text, add room for \0 at the end */
348 cmd_size = sizeof(cmd) + cmd.pdu_len + 1;
349 assert(!cmdret);
350 } else
351 cmd_size += cmd.pdu_len;
352
353 if (cmd_size / 1024 > FIO_SERVER_MAX_CMD_MB * 1024) {
354 log_err("fio: cmd+pdu too large (%llu)\n", (unsigned long long) cmd_size);
355 ret = 1;
356 break;
357 }
358
359 tmp = realloc(cmdret, cmd_size);
360 if (!tmp) {
361 log_err("fio: server failed allocating cmd\n");
362 ret = 1;
363 break;
364 }
365 cmdret = tmp;
366
367 if (first)
368 memcpy(cmdret, &cmd, sizeof(cmd));
369 else if (cmdret->opcode != cmd.opcode) {
370 log_err("fio: fragment opcode mismatch (%d != %d)\n",
371 cmdret->opcode, cmd.opcode);
372 ret = 1;
373 break;
374 }
375
376 if (!cmd.pdu_len)
377 break;
378
379 /* There's payload, get it */
380 pdu = (void *) cmdret->payload + pdu_offset;
381 ret = fio_recv_data(sk, pdu, cmd.pdu_len);
382 if (ret)
383 break;
384
385 /* Verify payload crc */
386 crc = fio_crc16(pdu, cmd.pdu_len);
387 if (crc != cmd.pdu_crc16) {
388 log_err("fio: server bad crc on payload ");
389 log_err("(got %x, wanted %x)\n", cmd.pdu_crc16, crc);
390 ret = 1;
391 break;
392 }
393
394 pdu_offset += cmd.pdu_len;
395 if (!first)
396 cmdret->pdu_len += cmd.pdu_len;
397 first = 0;
398 } while (cmd.flags & FIO_NET_CMD_F_MORE);
399
400 if (ret) {
401 free(cmdret);
402 cmdret = NULL;
403 } else if (cmdret) {
404 /* zero-terminate text input */
405 if (cmdret->pdu_len) {
406 if (cmdret->opcode == FIO_NET_CMD_TEXT) {
407 struct cmd_text_pdu *__pdu = (struct cmd_text_pdu *) cmdret->payload;
408 char *buf = (char *) __pdu->buf;
409
410 buf[__pdu->buf_len] = '\0';
411 } else if (cmdret->opcode == FIO_NET_CMD_JOB) {
412 struct cmd_job_pdu *__pdu = (struct cmd_job_pdu *) cmdret->payload;
413 char *buf = (char *) __pdu->buf;
414 int len = le32_to_cpu(__pdu->buf_len);
415
416 buf[len] = '\0';
417 }
418 }
419
420 /* frag flag is internal */
421 cmdret->flags &= ~FIO_NET_CMD_F_MORE;
422 }
423
424 return cmdret;
425}
426
427static void add_reply(uint64_t tag, struct flist_head *list)
428{
429 struct fio_net_cmd_reply *reply;
430
431 reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
432 flist_add_tail(&reply->list, list);
433}
434
435static uint64_t alloc_reply(uint64_t tag, uint16_t opcode)
436{
437 struct fio_net_cmd_reply *reply;
438
439 reply = calloc(1, sizeof(*reply));
440 INIT_FLIST_HEAD(&reply->list);
441 fio_gettime(&reply->tv, NULL);
442 reply->saved_tag = tag;
443 reply->opcode = opcode;
444
445 return (uintptr_t) reply;
446}
447
448static void free_reply(uint64_t tag)
449{
450 struct fio_net_cmd_reply *reply;
451
452 reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
453 free(reply);
454}
455
456static void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu)
457{
458 uint32_t pdu_len;
459
460 cmd->cmd_crc16 = __cpu_to_le16(fio_crc16(cmd, FIO_NET_CMD_CRC_SZ));
461
462 pdu_len = le32_to_cpu(cmd->pdu_len);
463 cmd->pdu_crc16 = __cpu_to_le16(fio_crc16(pdu, pdu_len));
464}
465
466static void fio_net_cmd_crc(struct fio_net_cmd *cmd)
467{
468 fio_net_cmd_crc_pdu(cmd, cmd->payload);
469}
470
471int fio_net_send_cmd(int fd, uint16_t opcode, const void *buf, off_t size,
472 uint64_t *tagptr, struct flist_head *list)
473{
474 struct fio_net_cmd *cmd = NULL;
475 size_t this_len, cur_len = 0;
476 uint64_t tag;
477 int ret;
478
479 if (list) {
480 assert(tagptr);
481 tag = *tagptr = alloc_reply(*tagptr, opcode);
482 } else
483 tag = tagptr ? *tagptr : 0;
484
485 do {
486 this_len = size;
487 if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
488 this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
489
490 if (!cmd || cur_len < sizeof(*cmd) + this_len) {
491 if (cmd)
492 free(cmd);
493
494 cur_len = sizeof(*cmd) + this_len;
495 cmd = malloc(cur_len);
496 }
497
498 fio_init_net_cmd(cmd, opcode, buf, this_len, tag);
499
500 if (this_len < size)
501 cmd->flags = __cpu_to_le32(FIO_NET_CMD_F_MORE);
502
503 fio_net_cmd_crc(cmd);
504
505 ret = fio_send_data(fd, cmd, sizeof(*cmd) + this_len);
506 size -= this_len;
507 buf += this_len;
508 } while (!ret && size);
509
510 if (list) {
511 if (ret)
512 free_reply(tag);
513 else
514 add_reply(tag, list);
515 }
516
517 if (cmd)
518 free(cmd);
519
520 return ret;
521}
522
523static struct sk_entry *fio_net_prep_cmd(uint16_t opcode, void *buf,
524 size_t size, uint64_t *tagptr,
525 int flags)
526{
527 struct sk_entry *entry;
528
529 entry = smalloc(sizeof(*entry));
530 INIT_FLIST_HEAD(&entry->next);
531 entry->opcode = opcode;
532 if (flags & SK_F_COPY) {
533 entry->buf = smalloc(size);
534 memcpy(entry->buf, buf, size);
535 } else
536 entry->buf = buf;
537
538 entry->size = size;
539 entry->tagptr = tagptr;
540 entry->flags = flags;
541 return entry;
542}
543
544static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry);
545
546static void fio_net_queue_entry(struct sk_entry *entry)
547{
548 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
549
550 if (entry->flags & SK_F_INLINE)
551 handle_sk_entry(sk_out, entry);
552 else {
553 sk_lock(sk_out);
554 flist_add_tail(&entry->list, &sk_out->list);
555 sk_unlock(sk_out);
556
557 fio_mutex_up(&sk_out->wait);
558 }
559}
560
561static int fio_net_queue_cmd(uint16_t opcode, void *buf, off_t size,
562 uint64_t *tagptr, int flags)
563{
564 struct sk_entry *entry;
565
566 entry = fio_net_prep_cmd(opcode, buf, size, tagptr, flags);
567 fio_net_queue_entry(entry);
568 return 0;
569}
570
571static int fio_net_send_simple_stack_cmd(int sk, uint16_t opcode, uint64_t tag)
572{
573 struct fio_net_cmd cmd;
574
575 fio_init_net_cmd(&cmd, opcode, NULL, 0, tag);
576 fio_net_cmd_crc(&cmd);
577
578 return fio_send_data(sk, &cmd, sizeof(cmd));
579}
580
581/*
582 * If 'list' is non-NULL, then allocate and store the sent command for
583 * later verification.
584 */
585int fio_net_send_simple_cmd(int sk, uint16_t opcode, uint64_t tag,
586 struct flist_head *list)
587{
588 int ret;
589
590 if (list)
591 tag = alloc_reply(tag, opcode);
592
593 ret = fio_net_send_simple_stack_cmd(sk, opcode, tag);
594 if (ret) {
595 if (list)
596 free_reply(tag);
597
598 return ret;
599 }
600
601 if (list)
602 add_reply(tag, list);
603
604 return 0;
605}
606
607static int fio_net_queue_quit(void)
608{
609 dprint(FD_NET, "server: sending quit\n");
610
611 return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, 0, SK_F_SIMPLE);
612}
613
614int fio_net_send_quit(int sk)
615{
616 dprint(FD_NET, "server: sending quit\n");
617
618 return fio_net_send_simple_cmd(sk, FIO_NET_CMD_QUIT, 0, NULL);
619}
620
621static int fio_net_send_ack(struct fio_net_cmd *cmd, int error, int signal)
622{
623 struct cmd_end_pdu epdu;
624 uint64_t tag = 0;
625
626 if (cmd)
627 tag = cmd->tag;
628
629 epdu.error = __cpu_to_le32(error);
630 epdu.signal = __cpu_to_le32(signal);
631 return fio_net_queue_cmd(FIO_NET_CMD_STOP, &epdu, sizeof(epdu), &tag, SK_F_COPY);
632}
633
634static int fio_net_queue_stop(int error, int signal)
635{
636 dprint(FD_NET, "server: sending stop (%d, %d)\n", error, signal);
637 return fio_net_send_ack(NULL, error, signal);
638}
639
640static void fio_server_add_fork_item(pid_t pid, struct flist_head *list)
641{
642 struct fio_fork_item *ffi;
643
644 ffi = malloc(sizeof(*ffi));
645 ffi->exitval = 0;
646 ffi->signal = 0;
647 ffi->exited = 0;
648 ffi->pid = pid;
649 flist_add_tail(&ffi->list, list);
650}
651
652static void fio_server_add_conn_pid(struct flist_head *conn_list, pid_t pid)
653{
654 dprint(FD_NET, "server: forked off connection job (pid=%u)\n", (int) pid);
655 fio_server_add_fork_item(pid, conn_list);
656}
657
658static void fio_server_add_job_pid(struct flist_head *job_list, pid_t pid)
659{
660 dprint(FD_NET, "server: forked off job job (pid=%u)\n", (int) pid);
661 fio_server_add_fork_item(pid, job_list);
662}
663
664static void fio_server_check_fork_item(struct fio_fork_item *ffi)
665{
666 int ret, status;
667
668 ret = waitpid(ffi->pid, &status, WNOHANG);
669 if (ret < 0) {
670 if (errno == ECHILD) {
671 log_err("fio: connection pid %u disappeared\n", (int) ffi->pid);
672 ffi->exited = 1;
673 } else
674 log_err("fio: waitpid: %s\n", strerror(errno));
675 } else if (ret == ffi->pid) {
676 if (WIFSIGNALED(status)) {
677 ffi->signal = WTERMSIG(status);
678 ffi->exited = 1;
679 }
680 if (WIFEXITED(status)) {
681 if (WEXITSTATUS(status))
682 ffi->exitval = WEXITSTATUS(status);
683 ffi->exited = 1;
684 }
685 }
686}
687
688static void fio_server_fork_item_done(struct fio_fork_item *ffi, bool stop)
689{
690 dprint(FD_NET, "pid %u exited, sig=%u, exitval=%d\n", (int) ffi->pid, ffi->signal, ffi->exitval);
691
692 /*
693 * Fold STOP and QUIT...
694 */
695 if (stop) {
696 fio_net_queue_stop(ffi->exitval, ffi->signal);
697 fio_net_queue_quit();
698 }
699
700 flist_del(&ffi->list);
701 free(ffi);
702}
703
704static void fio_server_check_fork_items(struct flist_head *list, bool stop)
705{
706 struct flist_head *entry, *tmp;
707 struct fio_fork_item *ffi;
708
709 flist_for_each_safe(entry, tmp, list) {
710 ffi = flist_entry(entry, struct fio_fork_item, list);
711
712 fio_server_check_fork_item(ffi);
713
714 if (ffi->exited)
715 fio_server_fork_item_done(ffi, stop);
716 }
717}
718
719static void fio_server_check_jobs(struct flist_head *job_list)
720{
721 fio_server_check_fork_items(job_list, true);
722}
723
724static void fio_server_check_conns(struct flist_head *conn_list)
725{
726 fio_server_check_fork_items(conn_list, false);
727}
728
729static int handle_load_file_cmd(struct fio_net_cmd *cmd)
730{
731 struct cmd_load_file_pdu *pdu = (struct cmd_load_file_pdu *) cmd->payload;
732 void *file_name = pdu->file;
733 struct cmd_start_pdu spdu;
734
735 dprint(FD_NET, "server: loading local file %s\n", (char *) file_name);
736
737 pdu->name_len = le16_to_cpu(pdu->name_len);
738 pdu->client_type = le16_to_cpu(pdu->client_type);
739
740 if (parse_jobs_ini(file_name, 0, 0, pdu->client_type)) {
741 fio_net_queue_quit();
742 return -1;
743 }
744
745 spdu.jobs = cpu_to_le32(thread_number);
746 spdu.stat_outputs = cpu_to_le32(stat_number);
747 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
748 return 0;
749}
750
751static int handle_run_cmd(struct sk_out *sk_out, struct flist_head *job_list,
752 struct fio_net_cmd *cmd)
753{
754 pid_t pid;
755 int ret;
756
757 fio_time_init();
758 set_genesis_time();
759
760 pid = fork();
761 if (pid) {
762 fio_server_add_job_pid(job_list, pid);
763 return 0;
764 }
765
766 ret = fio_backend(sk_out);
767 free_threads_shm();
768 _exit(ret);
769}
770
771static int handle_job_cmd(struct fio_net_cmd *cmd)
772{
773 struct cmd_job_pdu *pdu = (struct cmd_job_pdu *) cmd->payload;
774 void *buf = pdu->buf;
775 struct cmd_start_pdu spdu;
776
777 pdu->buf_len = le32_to_cpu(pdu->buf_len);
778 pdu->client_type = le32_to_cpu(pdu->client_type);
779
780 if (parse_jobs_ini(buf, 1, 0, pdu->client_type)) {
781 fio_net_queue_quit();
782 return -1;
783 }
784
785 spdu.jobs = cpu_to_le32(thread_number);
786 spdu.stat_outputs = cpu_to_le32(stat_number);
787
788 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
789 return 0;
790}
791
792static int handle_jobline_cmd(struct fio_net_cmd *cmd)
793{
794 void *pdu = cmd->payload;
795 struct cmd_single_line_pdu *cslp;
796 struct cmd_line_pdu *clp;
797 unsigned long offset;
798 struct cmd_start_pdu spdu;
799 char **argv;
800 int i;
801
802 clp = pdu;
803 clp->lines = le16_to_cpu(clp->lines);
804 clp->client_type = le16_to_cpu(clp->client_type);
805 argv = malloc(clp->lines * sizeof(char *));
806 offset = sizeof(*clp);
807
808 dprint(FD_NET, "server: %d command line args\n", clp->lines);
809
810 for (i = 0; i < clp->lines; i++) {
811 cslp = pdu + offset;
812 argv[i] = (char *) cslp->text;
813
814 offset += sizeof(*cslp) + le16_to_cpu(cslp->len);
815 dprint(FD_NET, "server: %d: %s\n", i, argv[i]);
816 }
817
818 if (parse_cmd_line(clp->lines, argv, clp->client_type)) {
819 fio_net_queue_quit();
820 free(argv);
821 return -1;
822 }
823
824 free(argv);
825
826 spdu.jobs = cpu_to_le32(thread_number);
827 spdu.stat_outputs = cpu_to_le32(stat_number);
828
829 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
830 return 0;
831}
832
833static int handle_probe_cmd(struct fio_net_cmd *cmd)
834{
835 struct cmd_client_probe_pdu *pdu = (struct cmd_client_probe_pdu *) cmd->payload;
836 struct cmd_probe_reply_pdu probe;
837 uint64_t tag = cmd->tag;
838
839 dprint(FD_NET, "server: sending probe reply\n");
840
841 strcpy(me, (char *) pdu->server);
842
843 memset(&probe, 0, sizeof(probe));
844 gethostname((char *) probe.hostname, sizeof(probe.hostname));
845#ifdef CONFIG_BIG_ENDIAN
846 probe.bigendian = 1;
847#endif
848 strncpy((char *) probe.fio_version, fio_version_string, sizeof(probe.fio_version));
849
850 probe.os = FIO_OS;
851 probe.arch = FIO_ARCH;
852 probe.bpp = sizeof(void *);
853 probe.cpus = __cpu_to_le32(cpus_online());
854
855 /*
856 * If the client supports compression and we do too, then enable it
857 */
858 if (has_zlib && le64_to_cpu(pdu->flags) & FIO_PROBE_FLAG_ZLIB) {
859 probe.flags = __cpu_to_le64(FIO_PROBE_FLAG_ZLIB);
860 use_zlib = 1;
861 } else {
862 probe.flags = 0;
863 use_zlib = 0;
864 }
865
866 return fio_net_queue_cmd(FIO_NET_CMD_PROBE, &probe, sizeof(probe), &tag, SK_F_COPY);
867}
868
869static int handle_send_eta_cmd(struct fio_net_cmd *cmd)
870{
871 struct jobs_eta *je;
872 uint64_t tag = cmd->tag;
873 size_t size;
874 int i;
875
876 dprint(FD_NET, "server sending status\n");
877
878 /*
879 * Fake ETA return if we don't have a local one, otherwise the client
880 * will end up timing out waiting for a response to the ETA request
881 */
882 je = get_jobs_eta(true, &size);
883 if (!je) {
884 size = sizeof(*je);
885 je = calloc(1, size);
886 } else {
887 je->nr_running = cpu_to_le32(je->nr_running);
888 je->nr_ramp = cpu_to_le32(je->nr_ramp);
889 je->nr_pending = cpu_to_le32(je->nr_pending);
890 je->nr_setting_up = cpu_to_le32(je->nr_setting_up);
891 je->files_open = cpu_to_le32(je->files_open);
892
893 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
894 je->m_rate[i] = cpu_to_le32(je->m_rate[i]);
895 je->t_rate[i] = cpu_to_le32(je->t_rate[i]);
896 je->m_iops[i] = cpu_to_le32(je->m_iops[i]);
897 je->t_iops[i] = cpu_to_le32(je->t_iops[i]);
898 je->rate[i] = cpu_to_le32(je->rate[i]);
899 je->iops[i] = cpu_to_le32(je->iops[i]);
900 }
901
902 je->elapsed_sec = cpu_to_le64(je->elapsed_sec);
903 je->eta_sec = cpu_to_le64(je->eta_sec);
904 je->nr_threads = cpu_to_le32(je->nr_threads);
905 je->is_pow2 = cpu_to_le32(je->is_pow2);
906 je->unit_base = cpu_to_le32(je->unit_base);
907 }
908
909 fio_net_queue_cmd(FIO_NET_CMD_ETA, je, size, &tag, SK_F_FREE);
910 return 0;
911}
912
913static int send_update_job_reply(uint64_t __tag, int error)
914{
915 uint64_t tag = __tag;
916 uint32_t pdu_error;
917
918 pdu_error = __cpu_to_le32(error);
919 return fio_net_queue_cmd(FIO_NET_CMD_UPDATE_JOB, &pdu_error, sizeof(pdu_error), &tag, SK_F_COPY);
920}
921
922static int handle_update_job_cmd(struct fio_net_cmd *cmd)
923{
924 struct cmd_add_job_pdu *pdu = (struct cmd_add_job_pdu *) cmd->payload;
925 struct thread_data *td;
926 uint32_t tnumber;
927
928 tnumber = le32_to_cpu(pdu->thread_number);
929
930 dprint(FD_NET, "server: updating options for job %u\n", tnumber);
931
932 if (!tnumber || tnumber > thread_number) {
933 send_update_job_reply(cmd->tag, ENODEV);
934 return 0;
935 }
936
937 td = &threads[tnumber - 1];
938 convert_thread_options_to_cpu(&td->o, &pdu->top);
939 send_update_job_reply(cmd->tag, 0);
940 return 0;
941}
942
943static int handle_trigger_cmd(struct fio_net_cmd *cmd)
944{
945 struct cmd_vtrigger_pdu *pdu = (struct cmd_vtrigger_pdu *) cmd->payload;
946 char *buf = (char *) pdu->cmd;
947 struct all_io_list *rep;
948 size_t sz;
949
950 pdu->len = le16_to_cpu(pdu->len);
951 buf[pdu->len] = '\0';
952
953 rep = get_all_io_list(IO_LIST_ALL, &sz);
954 if (!rep) {
955 struct all_io_list state;
956
957 state.threads = cpu_to_le64((uint64_t) 0);
958 fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY);
959 } else
960 fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE);
961
962 exec_trigger(buf);
963 return 0;
964}
965
966static int handle_command(struct sk_out *sk_out, struct flist_head *job_list,
967 struct fio_net_cmd *cmd)
968{
969 int ret;
970
971 dprint(FD_NET, "server: got op [%s], pdu=%u, tag=%llx\n",
972 fio_server_op(cmd->opcode), cmd->pdu_len,
973 (unsigned long long) cmd->tag);
974
975 switch (cmd->opcode) {
976 case FIO_NET_CMD_QUIT:
977 fio_terminate_threads(TERMINATE_ALL);
978 ret = 0;
979 break;
980 case FIO_NET_CMD_EXIT:
981 exit_backend = 1;
982 return -1;
983 case FIO_NET_CMD_LOAD_FILE:
984 ret = handle_load_file_cmd(cmd);
985 break;
986 case FIO_NET_CMD_JOB:
987 ret = handle_job_cmd(cmd);
988 break;
989 case FIO_NET_CMD_JOBLINE:
990 ret = handle_jobline_cmd(cmd);
991 break;
992 case FIO_NET_CMD_PROBE:
993 ret = handle_probe_cmd(cmd);
994 break;
995 case FIO_NET_CMD_SEND_ETA:
996 ret = handle_send_eta_cmd(cmd);
997 break;
998 case FIO_NET_CMD_RUN:
999 ret = handle_run_cmd(sk_out, job_list, cmd);
1000 break;
1001 case FIO_NET_CMD_UPDATE_JOB:
1002 ret = handle_update_job_cmd(cmd);
1003 break;
1004 case FIO_NET_CMD_VTRIGGER:
1005 ret = handle_trigger_cmd(cmd);
1006 break;
1007 case FIO_NET_CMD_SENDFILE: {
1008 struct cmd_sendfile_reply *in;
1009 struct cmd_reply *rep;
1010
1011 rep = (struct cmd_reply *) (uintptr_t) cmd->tag;
1012
1013 in = (struct cmd_sendfile_reply *) cmd->payload;
1014 in->size = le32_to_cpu(in->size);
1015 in->error = le32_to_cpu(in->error);
1016 if (in->error) {
1017 ret = 1;
1018 rep->error = in->error;
1019 } else {
1020 ret = 0;
1021 rep->data = smalloc(in->size);
1022 if (!rep->data) {
1023 ret = 1;
1024 rep->error = ENOMEM;
1025 } else {
1026 rep->size = in->size;
1027 memcpy(rep->data, in->data, in->size);
1028 }
1029 }
1030 fio_mutex_up(&rep->lock);
1031 break;
1032 }
1033 default:
1034 log_err("fio: unknown opcode: %s\n", fio_server_op(cmd->opcode));
1035 ret = 1;
1036 }
1037
1038 return ret;
1039}
1040
1041/*
1042 * Send a command with a separate PDU, not inlined in the command
1043 */
1044static int fio_send_cmd_ext_pdu(int sk, uint16_t opcode, const void *buf,
1045 off_t size, uint64_t tag, uint32_t flags)
1046{
1047 struct fio_net_cmd cmd;
1048 struct iovec iov[2];
1049
1050 iov[0].iov_base = (void *) &cmd;
1051 iov[0].iov_len = sizeof(cmd);
1052 iov[1].iov_base = (void *) buf;
1053 iov[1].iov_len = size;
1054
1055 __fio_init_net_cmd(&cmd, opcode, size, tag);
1056 cmd.flags = __cpu_to_le32(flags);
1057 fio_net_cmd_crc_pdu(&cmd, buf);
1058
1059 return fio_sendv_data(sk, iov, 2);
1060}
1061
1062static void finish_entry(struct sk_entry *entry)
1063{
1064 if (entry->flags & SK_F_FREE)
1065 free(entry->buf);
1066 else if (entry->flags & SK_F_COPY)
1067 sfree(entry->buf);
1068
1069 sfree(entry);
1070}
1071
1072static void entry_set_flags_tag(struct sk_entry *entry, struct flist_head *list,
1073 unsigned int *flags, uint64_t *tag)
1074{
1075 if (!flist_empty(list))
1076 *flags = FIO_NET_CMD_F_MORE;
1077 else
1078 *flags = 0;
1079
1080 if (entry->tagptr)
1081 *tag = *entry->tagptr;
1082 else
1083 *tag = 0;
1084}
1085
1086static int send_vec_entry(struct sk_out *sk_out, struct sk_entry *first)
1087{
1088 unsigned int flags;
1089 uint64_t tag;
1090 int ret;
1091
1092 entry_set_flags_tag(first, &first->next, &flags, &tag);
1093
1094 ret = fio_send_cmd_ext_pdu(sk_out->sk, first->opcode, first->buf, first->size, tag, flags);
1095
1096 while (!flist_empty(&first->next)) {
1097 struct sk_entry *next;
1098
1099 next = flist_first_entry(&first->next, struct sk_entry, list);
1100 flist_del_init(&next->list);
1101
1102 entry_set_flags_tag(next, &first->next, &flags, &tag);
1103
1104 ret += fio_send_cmd_ext_pdu(sk_out->sk, next->opcode, next->buf, next->size, tag, flags);
1105 finish_entry(next);
1106 }
1107
1108 return ret;
1109}
1110
1111static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry)
1112{
1113 int ret;
1114
1115 fio_mutex_down(&sk_out->xmit);
1116
1117 if (entry->flags & SK_F_VEC)
1118 ret = send_vec_entry(sk_out, entry);
1119 else if (entry->flags & SK_F_SIMPLE) {
1120 uint64_t tag = 0;
1121
1122 if (entry->tagptr)
1123 tag = *entry->tagptr;
1124
1125 ret = fio_net_send_simple_cmd(sk_out->sk, entry->opcode, tag, NULL);
1126 } else
1127 ret = fio_net_send_cmd(sk_out->sk, entry->opcode, entry->buf, entry->size, entry->tagptr, NULL);
1128
1129 fio_mutex_up(&sk_out->xmit);
1130
1131 if (ret)
1132 log_err("fio: failed handling cmd %s\n", fio_server_op(entry->opcode));
1133
1134 finish_entry(entry);
1135 return ret;
1136}
1137
1138static int handle_xmits(struct sk_out *sk_out)
1139{
1140 struct sk_entry *entry;
1141 FLIST_HEAD(list);
1142 int ret = 0;
1143
1144 sk_lock(sk_out);
1145 if (flist_empty(&sk_out->list)) {
1146 sk_unlock(sk_out);
1147 return 0;
1148 }
1149
1150 flist_splice_init(&sk_out->list, &list);
1151 sk_unlock(sk_out);
1152
1153 while (!flist_empty(&list)) {
1154 entry = flist_entry(list.next, struct sk_entry, list);
1155 flist_del(&entry->list);
1156 ret += handle_sk_entry(sk_out, entry);
1157 }
1158
1159 return ret;
1160}
1161
1162static int handle_connection(struct sk_out *sk_out)
1163{
1164 struct fio_net_cmd *cmd = NULL;
1165 FLIST_HEAD(job_list);
1166 int ret = 0;
1167
1168 reset_fio_state();
1169
1170 /* read forever */
1171 while (!exit_backend) {
1172 struct pollfd pfd = {
1173 .fd = sk_out->sk,
1174 .events = POLLIN,
1175 };
1176
1177 ret = 0;
1178 do {
1179 int timeout = 1000;
1180
1181 if (!flist_empty(&job_list))
1182 timeout = 100;
1183
1184 handle_xmits(sk_out);
1185
1186 ret = poll(&pfd, 1, 0);
1187 if (ret < 0) {
1188 if (errno == EINTR)
1189 break;
1190 log_err("fio: poll: %s\n", strerror(errno));
1191 break;
1192 } else if (!ret) {
1193 fio_server_check_jobs(&job_list);
1194 fio_mutex_down_timeout(&sk_out->wait, timeout);
1195 continue;
1196 }
1197
1198 if (pfd.revents & POLLIN)
1199 break;
1200 if (pfd.revents & (POLLERR|POLLHUP)) {
1201 ret = 1;
1202 break;
1203 }
1204 } while (!exit_backend);
1205
1206 fio_server_check_jobs(&job_list);
1207
1208 if (ret < 0)
1209 break;
1210
1211 cmd = fio_net_recv_cmd(sk_out->sk);
1212 if (!cmd) {
1213 ret = -1;
1214 break;
1215 }
1216
1217 ret = handle_command(sk_out, &job_list, cmd);
1218 if (ret)
1219 break;
1220
1221 free(cmd);
1222 cmd = NULL;
1223 }
1224
1225 if (cmd)
1226 free(cmd);
1227
1228 handle_xmits(sk_out);
1229
1230 close(sk_out->sk);
1231 sk_out->sk = -1;
1232 __sk_out_drop(sk_out);
1233 _exit(ret);
1234}
1235
1236/* get the address on this host bound by the input socket,
1237 * whether it is ipv6 or ipv4 */
1238
1239static int get_my_addr_str(int sk)
1240{
1241 struct sockaddr_in6 myaddr6 = { 0, };
1242 struct sockaddr_in myaddr4 = { 0, };
1243 struct sockaddr *sockaddr_p;
1244 char *net_addr;
1245 socklen_t len;
1246 int ret;
1247
1248 if (use_ipv6) {
1249 len = sizeof(myaddr6);
1250 sockaddr_p = (struct sockaddr * )&myaddr6;
1251 net_addr = (char * )&myaddr6.sin6_addr;
1252 } else {
1253 len = sizeof(myaddr4);
1254 sockaddr_p = (struct sockaddr * )&myaddr4;
1255 net_addr = (char * )&myaddr4.sin_addr;
1256 }
1257
1258 ret = getsockname(sk, sockaddr_p, &len);
1259 if (ret) {
1260 log_err("fio: getsockaddr: %s\n", strerror(errno));
1261 return -1;
1262 }
1263
1264 if (!inet_ntop(use_ipv6?AF_INET6:AF_INET, net_addr, client_sockaddr_str, INET6_ADDRSTRLEN - 1)) {
1265 log_err("inet_ntop: failed to convert addr to string\n");
1266 return -1;
1267 }
1268
1269 dprint(FD_NET, "fio server bound to addr %s\n", client_sockaddr_str);
1270 return 0;
1271}
1272
1273static int accept_loop(int listen_sk)
1274{
1275 struct sockaddr_in addr;
1276 struct sockaddr_in6 addr6;
1277 socklen_t len = use_ipv6 ? sizeof(addr6) : sizeof(addr);
1278 struct pollfd pfd;
1279 int ret = 0, sk, exitval = 0;
1280 FLIST_HEAD(conn_list);
1281
1282 dprint(FD_NET, "server enter accept loop\n");
1283
1284 fio_set_fd_nonblocking(listen_sk, "server");
1285
1286 while (!exit_backend) {
1287 struct sk_out *sk_out;
1288 const char *from;
1289 char buf[64];
1290 pid_t pid;
1291
1292 pfd.fd = listen_sk;
1293 pfd.events = POLLIN;
1294 do {
1295 int timeout = 1000;
1296
1297 if (!flist_empty(&conn_list))
1298 timeout = 100;
1299
1300 ret = poll(&pfd, 1, timeout);
1301 if (ret < 0) {
1302 if (errno == EINTR)
1303 break;
1304 log_err("fio: poll: %s\n", strerror(errno));
1305 break;
1306 } else if (!ret) {
1307 fio_server_check_conns(&conn_list);
1308 continue;
1309 }
1310
1311 if (pfd.revents & POLLIN)
1312 break;
1313 } while (!exit_backend);
1314
1315 fio_server_check_conns(&conn_list);
1316
1317 if (exit_backend || ret < 0)
1318 break;
1319
1320 if (use_ipv6)
1321 sk = accept(listen_sk, (struct sockaddr *) &addr6, &len);
1322 else
1323 sk = accept(listen_sk, (struct sockaddr *) &addr, &len);
1324
1325 if (sk < 0) {
1326 log_err("fio: accept: %s\n", strerror(errno));
1327 return -1;
1328 }
1329
1330 if (use_ipv6)
1331 from = inet_ntop(AF_INET6, (struct sockaddr *) &addr6.sin6_addr, buf, sizeof(buf));
1332 else
1333 from = inet_ntop(AF_INET, (struct sockaddr *) &addr.sin_addr, buf, sizeof(buf));
1334
1335 dprint(FD_NET, "server: connect from %s\n", from);
1336
1337 sk_out = smalloc(sizeof(*sk_out));
1338 sk_out->sk = sk;
1339 INIT_FLIST_HEAD(&sk_out->list);
1340 __fio_mutex_init(&sk_out->lock, FIO_MUTEX_UNLOCKED);
1341 __fio_mutex_init(&sk_out->wait, FIO_MUTEX_LOCKED);
1342 __fio_mutex_init(&sk_out->xmit, FIO_MUTEX_UNLOCKED);
1343
1344 pid = fork();
1345 if (pid) {
1346 close(sk);
1347 fio_server_add_conn_pid(&conn_list, pid);
1348 continue;
1349 }
1350
1351 /* if error, it's already logged, non-fatal */
1352 get_my_addr_str(sk);
1353
1354 /*
1355 * Assign sk_out here, it'll be dropped in handle_connection()
1356 * since that function calls _exit() when done
1357 */
1358 sk_out_assign(sk_out);
1359 handle_connection(sk_out);
1360 }
1361
1362 return exitval;
1363}
1364
1365int fio_server_text_output(int level, const char *buf, size_t len)
1366{
1367 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
1368 struct cmd_text_pdu *pdu;
1369 unsigned int tlen;
1370 struct timeval tv;
1371
1372 if (!sk_out || sk_out->sk == -1)
1373 return -1;
1374
1375 tlen = sizeof(*pdu) + len;
1376 pdu = malloc(tlen);
1377
1378 pdu->level = __cpu_to_le32(level);
1379 pdu->buf_len = __cpu_to_le32(len);
1380
1381 gettimeofday(&tv, NULL);
1382 pdu->log_sec = __cpu_to_le64(tv.tv_sec);
1383 pdu->log_usec = __cpu_to_le64(tv.tv_usec);
1384
1385 memcpy(pdu->buf, buf, len);
1386
1387 fio_net_queue_cmd(FIO_NET_CMD_TEXT, pdu, tlen, NULL, SK_F_COPY);
1388 free(pdu);
1389 return len;
1390}
1391
1392static void convert_io_stat(struct io_stat *dst, struct io_stat *src)
1393{
1394 dst->max_val = cpu_to_le64(src->max_val);
1395 dst->min_val = cpu_to_le64(src->min_val);
1396 dst->samples = cpu_to_le64(src->samples);
1397
1398 /*
1399 * Encode to IEEE 754 for network transfer
1400 */
1401 dst->mean.u.i = cpu_to_le64(fio_double_to_uint64(src->mean.u.f));
1402 dst->S.u.i = cpu_to_le64(fio_double_to_uint64(src->S.u.f));
1403}
1404
1405static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src)
1406{
1407 int i;
1408
1409 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1410 dst->max_run[i] = cpu_to_le64(src->max_run[i]);
1411 dst->min_run[i] = cpu_to_le64(src->min_run[i]);
1412 dst->max_bw[i] = cpu_to_le64(src->max_bw[i]);
1413 dst->min_bw[i] = cpu_to_le64(src->min_bw[i]);
1414 dst->io_kb[i] = cpu_to_le64(src->io_kb[i]);
1415 dst->agg[i] = cpu_to_le64(src->agg[i]);
1416 }
1417
1418 dst->kb_base = cpu_to_le32(src->kb_base);
1419 dst->unit_base = cpu_to_le32(src->unit_base);
1420 dst->groupid = cpu_to_le32(src->groupid);
1421 dst->unified_rw_rep = cpu_to_le32(src->unified_rw_rep);
1422}
1423
1424/*
1425 * Send a CMD_TS, which packs struct thread_stat and group_run_stats
1426 * into a single payload.
1427 */
1428void fio_server_send_ts(struct thread_stat *ts, struct group_run_stats *rs)
1429{
1430 struct cmd_ts_pdu p;
1431 int i, j;
1432
1433 dprint(FD_NET, "server sending end stats\n");
1434
1435 memset(&p, 0, sizeof(p));
1436
1437 strncpy(p.ts.name, ts->name, FIO_JOBNAME_SIZE - 1);
1438 strncpy(p.ts.verror, ts->verror, FIO_VERROR_SIZE - 1);
1439 strncpy(p.ts.description, ts->description, FIO_JOBDESC_SIZE - 1);
1440
1441 p.ts.error = cpu_to_le32(ts->error);
1442 p.ts.thread_number = cpu_to_le32(ts->thread_number);
1443 p.ts.groupid = cpu_to_le32(ts->groupid);
1444 p.ts.pid = cpu_to_le32(ts->pid);
1445 p.ts.members = cpu_to_le32(ts->members);
1446 p.ts.unified_rw_rep = cpu_to_le32(ts->unified_rw_rep);
1447
1448 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1449 convert_io_stat(&p.ts.clat_stat[i], &ts->clat_stat[i]);
1450 convert_io_stat(&p.ts.slat_stat[i], &ts->slat_stat[i]);
1451 convert_io_stat(&p.ts.lat_stat[i], &ts->lat_stat[i]);
1452 convert_io_stat(&p.ts.bw_stat[i], &ts->bw_stat[i]);
1453 }
1454
1455 p.ts.usr_time = cpu_to_le64(ts->usr_time);
1456 p.ts.sys_time = cpu_to_le64(ts->sys_time);
1457 p.ts.ctx = cpu_to_le64(ts->ctx);
1458 p.ts.minf = cpu_to_le64(ts->minf);
1459 p.ts.majf = cpu_to_le64(ts->majf);
1460 p.ts.clat_percentiles = cpu_to_le64(ts->clat_percentiles);
1461 p.ts.percentile_precision = cpu_to_le64(ts->percentile_precision);
1462
1463 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
1464 fio_fp64_t *src = &ts->percentile_list[i];
1465 fio_fp64_t *dst = &p.ts.percentile_list[i];
1466
1467 dst->u.i = cpu_to_le64(fio_double_to_uint64(src->u.f));
1468 }
1469
1470 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
1471 p.ts.io_u_map[i] = cpu_to_le32(ts->io_u_map[i]);
1472 p.ts.io_u_submit[i] = cpu_to_le32(ts->io_u_submit[i]);
1473 p.ts.io_u_complete[i] = cpu_to_le32(ts->io_u_complete[i]);
1474 }
1475
1476 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
1477 p.ts.io_u_lat_u[i] = cpu_to_le32(ts->io_u_lat_u[i]);
1478 p.ts.io_u_lat_m[i] = cpu_to_le32(ts->io_u_lat_m[i]);
1479 }
1480
1481 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1482 for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
1483 p.ts.io_u_plat[i][j] = cpu_to_le32(ts->io_u_plat[i][j]);
1484
1485 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1486 p.ts.total_io_u[i] = cpu_to_le64(ts->total_io_u[i]);
1487 p.ts.short_io_u[i] = cpu_to_le64(ts->short_io_u[i]);
1488 p.ts.drop_io_u[i] = cpu_to_le64(ts->drop_io_u[i]);
1489 }
1490
1491 p.ts.total_submit = cpu_to_le64(ts->total_submit);
1492 p.ts.total_complete = cpu_to_le64(ts->total_complete);
1493
1494 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1495 p.ts.io_bytes[i] = cpu_to_le64(ts->io_bytes[i]);
1496 p.ts.runtime[i] = cpu_to_le64(ts->runtime[i]);
1497 }
1498
1499 p.ts.total_run_time = cpu_to_le64(ts->total_run_time);
1500 p.ts.continue_on_error = cpu_to_le16(ts->continue_on_error);
1501 p.ts.total_err_count = cpu_to_le64(ts->total_err_count);
1502 p.ts.first_error = cpu_to_le32(ts->first_error);
1503 p.ts.kb_base = cpu_to_le32(ts->kb_base);
1504 p.ts.unit_base = cpu_to_le32(ts->unit_base);
1505
1506 p.ts.latency_depth = cpu_to_le32(ts->latency_depth);
1507 p.ts.latency_target = cpu_to_le64(ts->latency_target);
1508 p.ts.latency_window = cpu_to_le64(ts->latency_window);
1509 p.ts.latency_percentile.u.i = cpu_to_le64(fio_double_to_uint64(ts->latency_percentile.u.f));
1510
1511 p.ts.nr_block_infos = le64_to_cpu(ts->nr_block_infos);
1512 for (i = 0; i < p.ts.nr_block_infos; i++)
1513 p.ts.block_infos[i] = le32_to_cpu(ts->block_infos[i]);
1514
1515 convert_gs(&p.rs, rs);
1516
1517 fio_net_queue_cmd(FIO_NET_CMD_TS, &p, sizeof(p), NULL, SK_F_COPY);
1518}
1519
1520void fio_server_send_gs(struct group_run_stats *rs)
1521{
1522 struct group_run_stats gs;
1523
1524 dprint(FD_NET, "server sending group run stats\n");
1525
1526 convert_gs(&gs, rs);
1527 fio_net_queue_cmd(FIO_NET_CMD_GS, &gs, sizeof(gs), NULL, SK_F_COPY);
1528}
1529
1530void fio_server_send_job_options(struct flist_head *opt_list,
1531 unsigned int groupid)
1532{
1533 struct cmd_job_option pdu;
1534 struct flist_head *entry;
1535
1536 if (flist_empty(opt_list))
1537 return;
1538
1539 flist_for_each(entry, opt_list) {
1540 struct print_option *p;
1541 size_t len;
1542
1543 p = flist_entry(entry, struct print_option, list);
1544 memset(&pdu, 0, sizeof(pdu));
1545
1546 if (groupid == -1U) {
1547 pdu.global = __cpu_to_le16(1);
1548 pdu.groupid = 0;
1549 } else {
1550 pdu.global = 0;
1551 pdu.groupid = cpu_to_le32(groupid);
1552 }
1553 len = strlen(p->name);
1554 if (len >= sizeof(pdu.name)) {
1555 len = sizeof(pdu.name) - 1;
1556 pdu.truncated = __cpu_to_le16(1);
1557 }
1558 memcpy(pdu.name, p->name, len);
1559 if (p->value) {
1560 len = strlen(p->value);
1561 if (len >= sizeof(pdu.value)) {
1562 len = sizeof(pdu.value) - 1;
1563 pdu.truncated = __cpu_to_le16(1);
1564 }
1565 memcpy(pdu.value, p->value, len);
1566 }
1567 fio_net_queue_cmd(FIO_NET_CMD_JOB_OPT, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1568 }
1569}
1570
1571static void convert_agg(struct disk_util_agg *dst, struct disk_util_agg *src)
1572{
1573 int i;
1574
1575 for (i = 0; i < 2; i++) {
1576 dst->ios[i] = cpu_to_le64(src->ios[i]);
1577 dst->merges[i] = cpu_to_le64(src->merges[i]);
1578 dst->sectors[i] = cpu_to_le64(src->sectors[i]);
1579 dst->ticks[i] = cpu_to_le64(src->ticks[i]);
1580 }
1581
1582 dst->io_ticks = cpu_to_le64(src->io_ticks);
1583 dst->time_in_queue = cpu_to_le64(src->time_in_queue);
1584 dst->slavecount = cpu_to_le32(src->slavecount);
1585 dst->max_util.u.i = cpu_to_le64(fio_double_to_uint64(src->max_util.u.f));
1586}
1587
1588static void convert_dus(struct disk_util_stat *dst, struct disk_util_stat *src)
1589{
1590 int i;
1591
1592 dst->name[FIO_DU_NAME_SZ - 1] = '\0';
1593 strncpy((char *) dst->name, (char *) src->name, FIO_DU_NAME_SZ - 1);
1594
1595 for (i = 0; i < 2; i++) {
1596 dst->s.ios[i] = cpu_to_le64(src->s.ios[i]);
1597 dst->s.merges[i] = cpu_to_le64(src->s.merges[i]);
1598 dst->s.sectors[i] = cpu_to_le64(src->s.sectors[i]);
1599 dst->s.ticks[i] = cpu_to_le64(src->s.ticks[i]);
1600 }
1601
1602 dst->s.io_ticks = cpu_to_le64(src->s.io_ticks);
1603 dst->s.time_in_queue = cpu_to_le64(src->s.time_in_queue);
1604 dst->s.msec = cpu_to_le64(src->s.msec);
1605}
1606
1607void fio_server_send_du(void)
1608{
1609 struct disk_util *du;
1610 struct flist_head *entry;
1611 struct cmd_du_pdu pdu;
1612
1613 dprint(FD_NET, "server: sending disk_util %d\n", !flist_empty(&disk_list));
1614
1615 memset(&pdu, 0, sizeof(pdu));
1616
1617 flist_for_each(entry, &disk_list) {
1618 du = flist_entry(entry, struct disk_util, list);
1619
1620 convert_dus(&pdu.dus, &du->dus);
1621 convert_agg(&pdu.agg, &du->agg);
1622
1623 fio_net_queue_cmd(FIO_NET_CMD_DU, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1624 }
1625}
1626
1627static int fio_send_iolog_gz(struct sk_entry *first, struct io_log *log)
1628{
1629 int ret = 0;
1630#ifdef CONFIG_ZLIB
1631 struct sk_entry *entry;
1632 z_stream stream;
1633 void *out_pdu;
1634
1635 /*
1636 * Dirty - since the log is potentially huge, compress it into
1637 * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
1638 * side defragment it.
1639 */
1640 out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1641
1642 stream.zalloc = Z_NULL;
1643 stream.zfree = Z_NULL;
1644 stream.opaque = Z_NULL;
1645
1646 if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK) {
1647 ret = 1;
1648 goto err;
1649 }
1650
1651 stream.next_in = (void *) log->log;
1652 stream.avail_in = log->nr_samples * log_entry_sz(log);
1653
1654 do {
1655 unsigned int this_len;
1656
1657 stream.avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1658 stream.next_out = out_pdu;
1659 ret = deflate(&stream, Z_FINISH);
1660 /* may be Z_OK, or Z_STREAM_END */
1661 if (ret < 0)
1662 goto err_zlib;
1663
1664 this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream.avail_out;
1665
1666 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
1667 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
1668 out_pdu = NULL;
1669 flist_add_tail(&entry->list, &first->next);
1670 } while (stream.avail_in);
1671
1672err_zlib:
1673 deflateEnd(&stream);
1674err:
1675 free(out_pdu);
1676#endif
1677 return ret;
1678}
1679
1680static int fio_send_gz_chunks(struct sk_entry *first, struct io_log *log)
1681{
1682 struct sk_entry *entry;
1683 struct flist_head *node;
1684
1685 pthread_mutex_lock(&log->chunk_lock);
1686 flist_for_each(node, &log->chunk_list) {
1687 struct iolog_compress *c;
1688
1689 c = flist_entry(node, struct iolog_compress, list);
1690 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, c->buf, c->len,
1691 NULL, SK_F_VEC | SK_F_INLINE);
1692 flist_add_tail(&entry->list, &first->next);
1693 }
1694 pthread_mutex_unlock(&log->chunk_lock);
1695
1696 return 0;
1697}
1698
1699int fio_send_iolog(struct thread_data *td, struct io_log *log, const char *name)
1700{
1701 struct cmd_iolog_pdu pdu;
1702 struct sk_entry *first;
1703 int i, ret = 0;
1704
1705 pdu.nr_samples = cpu_to_le64(log->nr_samples);
1706 pdu.thread_number = cpu_to_le32(td->thread_number);
1707 pdu.log_type = cpu_to_le32(log->log_type);
1708
1709 if (!flist_empty(&log->chunk_list))
1710 pdu.compressed = __cpu_to_le32(STORE_COMPRESSED);
1711 else if (use_zlib)
1712 pdu.compressed = __cpu_to_le32(XMIT_COMPRESSED);
1713 else
1714 pdu.compressed = 0;
1715
1716 strncpy((char *) pdu.name, name, FIO_NET_NAME_MAX);
1717 pdu.name[FIO_NET_NAME_MAX - 1] = '\0';
1718
1719 /*
1720 * We can't do this for a pre-compressed log, but for that case,
1721 * log->nr_samples is zero anyway.
1722 */
1723 for (i = 0; i < log->nr_samples; i++) {
1724 struct io_sample *s = get_sample(log, i);
1725
1726 s->time = cpu_to_le64(s->time);
1727 s->val = cpu_to_le64(s->val);
1728 s->__ddir = cpu_to_le32(s->__ddir);
1729 s->bs = cpu_to_le32(s->bs);
1730
1731 if (log->log_offset) {
1732 struct io_sample_offset *so = (void *) s;
1733
1734 so->offset = cpu_to_le64(so->offset);
1735 }
1736 }
1737
1738 /*
1739 * Assemble header entry first
1740 */
1741 first = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, &pdu, sizeof(pdu), NULL, SK_F_VEC | SK_F_INLINE | SK_F_COPY);
1742
1743 /*
1744 * Now append actual log entries. Compress if we can, otherwise just
1745 * plain text output.
1746 */
1747 if (!flist_empty(&log->chunk_list))
1748 ret = fio_send_gz_chunks(first, log);
1749 else if (use_zlib)
1750 ret = fio_send_iolog_gz(first, log);
1751 else {
1752 struct sk_entry *entry;
1753 size_t size = log->nr_samples * log_entry_sz(log);
1754
1755 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, log->log, size,
1756 NULL, SK_F_VEC | SK_F_INLINE);
1757 flist_add_tail(&entry->list, &first->next);
1758 }
1759
1760 fio_net_queue_entry(first);
1761 return ret;
1762}
1763
1764void fio_server_send_add_job(struct thread_data *td)
1765{
1766 struct cmd_add_job_pdu pdu;
1767
1768 memset(&pdu, 0, sizeof(pdu));
1769 pdu.thread_number = cpu_to_le32(td->thread_number);
1770 pdu.groupid = cpu_to_le32(td->groupid);
1771 convert_thread_options_to_net(&pdu.top, &td->o);
1772
1773 fio_net_queue_cmd(FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1774}
1775
1776void fio_server_send_start(struct thread_data *td)
1777{
1778 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
1779
1780 assert(sk_out->sk != -1);
1781
1782 fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, 0, SK_F_SIMPLE);
1783}
1784
1785int fio_server_get_verify_state(const char *name, int threadnumber,
1786 void **datap, int *version)
1787{
1788 struct thread_io_list *s;
1789 struct cmd_sendfile out;
1790 struct cmd_reply *rep;
1791 uint64_t tag;
1792 void *data;
1793
1794 dprint(FD_NET, "server: request verify state\n");
1795
1796 rep = smalloc(sizeof(*rep));
1797 if (!rep) {
1798 log_err("fio: smalloc pool too small\n");
1799 return 1;
1800 }
1801
1802 __fio_mutex_init(&rep->lock, FIO_MUTEX_LOCKED);
1803 rep->data = NULL;
1804 rep->error = 0;
1805
1806 verify_state_gen_name((char *) out.path, sizeof(out.path), name, me,
1807 threadnumber);
1808 tag = (uint64_t) (uintptr_t) rep;
1809 fio_net_queue_cmd(FIO_NET_CMD_SENDFILE, &out, sizeof(out), &tag, SK_F_COPY);
1810
1811 /*
1812 * Wait for the backend to receive the reply
1813 */
1814 if (fio_mutex_down_timeout(&rep->lock, 10000)) {
1815 log_err("fio: timed out waiting for reply\n");
1816 goto fail;
1817 }
1818
1819 if (rep->error) {
1820 log_err("fio: failure on receiving state file: %s\n", strerror(rep->error));
1821fail:
1822 *datap = NULL;
1823 sfree(rep);
1824 fio_net_queue_quit();
1825 return 1;
1826 }
1827
1828 /*
1829 * The format is verify_state_hdr, then thread_io_list. Verify
1830 * the header, and the thread_io_list checksum
1831 */
1832 s = rep->data + sizeof(struct verify_state_hdr);
1833 if (verify_state_hdr(rep->data, s, version))
1834 goto fail;
1835
1836 /*
1837 * Don't need the header from now, copy just the thread_io_list
1838 */
1839 rep->size -= sizeof(struct verify_state_hdr);
1840 data = malloc(rep->size);
1841 memcpy(data, s, rep->size);
1842 *datap = data;
1843
1844 sfree(rep->data);
1845 __fio_mutex_remove(&rep->lock);
1846 sfree(rep);
1847 return 0;
1848}
1849
1850static int fio_init_server_ip(void)
1851{
1852 struct sockaddr *addr;
1853 socklen_t socklen;
1854 char buf[80];
1855 const char *str;
1856 int sk, opt;
1857
1858 if (use_ipv6)
1859 sk = socket(AF_INET6, SOCK_STREAM, 0);
1860 else
1861 sk = socket(AF_INET, SOCK_STREAM, 0);
1862
1863 if (sk < 0) {
1864 log_err("fio: socket: %s\n", strerror(errno));
1865 return -1;
1866 }
1867
1868 opt = 1;
1869 if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, (void *)&opt, sizeof(opt)) < 0) {
1870 log_err("fio: setsockopt(REUSEADDR): %s\n", strerror(errno));
1871 close(sk);
1872 return -1;
1873 }
1874#ifdef SO_REUSEPORT
1875 if (setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
1876 log_err("fio: setsockopt(REUSEPORT): %s\n", strerror(errno));
1877 close(sk);
1878 return -1;
1879 }
1880#endif
1881
1882 if (use_ipv6) {
1883 const void *src = &saddr_in6.sin6_addr;
1884
1885 addr = (struct sockaddr *) &saddr_in6;
1886 socklen = sizeof(saddr_in6);
1887 saddr_in6.sin6_family = AF_INET6;
1888 str = inet_ntop(AF_INET6, src, buf, sizeof(buf));
1889 } else {
1890 const void *src = &saddr_in.sin_addr;
1891
1892 addr = (struct sockaddr *) &saddr_in;
1893 socklen = sizeof(saddr_in);
1894 saddr_in.sin_family = AF_INET;
1895 str = inet_ntop(AF_INET, src, buf, sizeof(buf));
1896 }
1897
1898 if (bind(sk, addr, socklen) < 0) {
1899 log_err("fio: bind: %s\n", strerror(errno));
1900 log_info("fio: failed with IPv%c %s\n", use_ipv6 ? '6' : '4', str);
1901 close(sk);
1902 return -1;
1903 }
1904
1905 return sk;
1906}
1907
1908static int fio_init_server_sock(void)
1909{
1910 struct sockaddr_un addr;
1911 socklen_t len;
1912 mode_t mode;
1913 int sk;
1914
1915 sk = socket(AF_UNIX, SOCK_STREAM, 0);
1916 if (sk < 0) {
1917 log_err("fio: socket: %s\n", strerror(errno));
1918 return -1;
1919 }
1920
1921 mode = umask(000);
1922
1923 memset(&addr, 0, sizeof(addr));
1924 addr.sun_family = AF_UNIX;
1925 strncpy(addr.sun_path, bind_sock, sizeof(addr.sun_path) - 1);
1926
1927 len = sizeof(addr.sun_family) + strlen(bind_sock) + 1;
1928
1929 if (bind(sk, (struct sockaddr *) &addr, len) < 0) {
1930 log_err("fio: bind: %s\n", strerror(errno));
1931 close(sk);
1932 return -1;
1933 }
1934
1935 umask(mode);
1936 return sk;
1937}
1938
1939static int fio_init_server_connection(void)
1940{
1941 char bind_str[128];
1942 int sk;
1943
1944 dprint(FD_NET, "starting server\n");
1945
1946 if (!bind_sock)
1947 sk = fio_init_server_ip();
1948 else
1949 sk = fio_init_server_sock();
1950
1951 if (sk < 0)
1952 return sk;
1953
1954 memset(bind_str, 0, sizeof(bind_str));
1955
1956 if (!bind_sock) {
1957 char *p, port[16];
1958 const void *src;
1959 int af;
1960
1961 if (use_ipv6) {
1962 af = AF_INET6;
1963 src = &saddr_in6.sin6_addr;
1964 } else {
1965 af = AF_INET;
1966 src = &saddr_in.sin_addr;
1967 }
1968
1969 p = (char *) inet_ntop(af, src, bind_str, sizeof(bind_str));
1970
1971 sprintf(port, ",%u", fio_net_port);
1972 if (p)
1973 strcat(p, port);
1974 else
1975 strncpy(bind_str, port, sizeof(bind_str) - 1);
1976 } else
1977 strncpy(bind_str, bind_sock, sizeof(bind_str) - 1);
1978
1979 log_info("fio: server listening on %s\n", bind_str);
1980
1981 if (listen(sk, 4) < 0) {
1982 log_err("fio: listen: %s\n", strerror(errno));
1983 close(sk);
1984 return -1;
1985 }
1986
1987 return sk;
1988}
1989
1990int fio_server_parse_host(const char *host, int ipv6, struct in_addr *inp,
1991 struct in6_addr *inp6)
1992
1993{
1994 int ret = 0;
1995
1996 if (ipv6)
1997 ret = inet_pton(AF_INET6, host, inp6);
1998 else
1999 ret = inet_pton(AF_INET, host, inp);
2000
2001 if (ret != 1) {
2002 struct addrinfo hints, *res;
2003
2004 memset(&hints, 0, sizeof(hints));
2005 hints.ai_family = ipv6 ? AF_INET6 : AF_INET;
2006 hints.ai_socktype = SOCK_STREAM;
2007
2008 ret = getaddrinfo(host, NULL, &hints, &res);
2009 if (ret) {
2010 log_err("fio: failed to resolve <%s> (%s)\n", host,
2011 gai_strerror(ret));
2012 return 1;
2013 }
2014
2015 if (ipv6)
2016 memcpy(inp6, &((struct sockaddr_in6 *) res->ai_addr)->sin6_addr, sizeof(*inp6));
2017 else
2018 memcpy(inp, &((struct sockaddr_in *) res->ai_addr)->sin_addr, sizeof(*inp));
2019
2020 ret = 1;
2021 freeaddrinfo(res);
2022 }
2023
2024 return !(ret == 1);
2025}
2026
2027/*
2028 * Parse a host/ip/port string. Reads from 'str'.
2029 *
2030 * Outputs:
2031 *
2032 * For IPv4:
2033 * *ptr is the host, *port is the port, inp is the destination.
2034 * For IPv6:
2035 * *ptr is the host, *port is the port, inp6 is the dest, and *ipv6 is 1.
2036 * For local domain sockets:
2037 * *ptr is the filename, *is_sock is 1.
2038 */
2039int fio_server_parse_string(const char *str, char **ptr, int *is_sock,
2040 int *port, struct in_addr *inp,
2041 struct in6_addr *inp6, int *ipv6)
2042{
2043 const char *host = str;
2044 char *portp;
2045 int lport = 0;
2046
2047 *ptr = NULL;
2048 *is_sock = 0;
2049 *port = fio_net_port;
2050 *ipv6 = 0;
2051
2052 if (!strncmp(str, "sock:", 5)) {
2053 *ptr = strdup(str + 5);
2054 *is_sock = 1;
2055
2056 return 0;
2057 }
2058
2059 /*
2060 * Is it ip:<ip or host>:port
2061 */
2062 if (!strncmp(host, "ip:", 3))
2063 host += 3;
2064 else if (!strncmp(host, "ip4:", 4))
2065 host += 4;
2066 else if (!strncmp(host, "ip6:", 4)) {
2067 host += 4;
2068 *ipv6 = 1;
2069 } else if (host[0] == ':') {
2070 /* String is :port */
2071 host++;
2072 lport = atoi(host);
2073 if (!lport || lport > 65535) {
2074 log_err("fio: bad server port %u\n", lport);
2075 return 1;
2076 }
2077 /* no hostname given, we are done */
2078 *port = lport;
2079 return 0;
2080 }
2081
2082 /*
2083 * If no port seen yet, check if there's a last ',' at the end
2084 */
2085 if (!lport) {
2086 portp = strchr(host, ',');
2087 if (portp) {
2088 *portp = '\0';
2089 portp++;
2090 lport = atoi(portp);
2091 if (!lport || lport > 65535) {
2092 log_err("fio: bad server port %u\n", lport);
2093 return 1;
2094 }
2095 }
2096 }
2097
2098 if (lport)
2099 *port = lport;
2100
2101 if (!strlen(host))
2102 return 0;
2103
2104 *ptr = strdup(host);
2105
2106 if (fio_server_parse_host(*ptr, *ipv6, inp, inp6)) {
2107 free(*ptr);
2108 *ptr = NULL;
2109 return 1;
2110 }
2111
2112 if (*port == 0)
2113 *port = fio_net_port;
2114
2115 return 0;
2116}
2117
2118/*
2119 * Server arg should be one of:
2120 *
2121 * sock:/path/to/socket
2122 * ip:1.2.3.4
2123 * 1.2.3.4
2124 *
2125 * Where sock uses unix domain sockets, and ip binds the server to
2126 * a specific interface. If no arguments are given to the server, it
2127 * uses IP and binds to 0.0.0.0.
2128 *
2129 */
2130static int fio_handle_server_arg(void)
2131{
2132 int port = fio_net_port;
2133 int is_sock, ret = 0;
2134
2135 saddr_in.sin_addr.s_addr = htonl(INADDR_ANY);
2136
2137 if (!fio_server_arg)
2138 goto out;
2139
2140 ret = fio_server_parse_string(fio_server_arg, &bind_sock, &is_sock,
2141 &port, &saddr_in.sin_addr,
2142 &saddr_in6.sin6_addr, &use_ipv6);
2143
2144 if (!is_sock && bind_sock) {
2145 free(bind_sock);
2146 bind_sock = NULL;
2147 }
2148
2149out:
2150 fio_net_port = port;
2151 saddr_in.sin_port = htons(port);
2152 saddr_in6.sin6_port = htons(port);
2153 return ret;
2154}
2155
2156static void sig_int(int sig)
2157{
2158 if (bind_sock)
2159 unlink(bind_sock);
2160}
2161
2162static void set_sig_handlers(void)
2163{
2164 struct sigaction act;
2165
2166 memset(&act, 0, sizeof(act));
2167 act.sa_handler = sig_int;
2168 act.sa_flags = SA_RESTART;
2169 sigaction(SIGINT, &act, NULL);
2170}
2171
2172static int fio_server(void)
2173{
2174 int sk, ret;
2175
2176 if (pthread_key_create(&sk_out_key, NULL)) {
2177 log_err("fio: can't create sk_out backend key\n");
2178 return -1;
2179 }
2180
2181 pthread_setspecific(sk_out_key, NULL);
2182
2183 dprint(FD_NET, "starting server\n");
2184
2185 if (fio_handle_server_arg())
2186 return -1;
2187
2188 sk = fio_init_server_connection();
2189 if (sk < 0)
2190 return -1;
2191
2192 set_sig_handlers();
2193
2194 ret = accept_loop(sk);
2195
2196 close(sk);
2197
2198 if (fio_server_arg) {
2199 free(fio_server_arg);
2200 fio_server_arg = NULL;
2201 }
2202 if (bind_sock)
2203 free(bind_sock);
2204
2205 return ret;
2206}
2207
2208void fio_server_got_signal(int signal)
2209{
2210 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
2211
2212 assert(sk_out);
2213
2214 if (signal == SIGPIPE)
2215 sk_out->sk = -1;
2216 else {
2217 log_info("\nfio: terminating on signal %d\n", signal);
2218 exit_backend = 1;
2219 }
2220}
2221
2222static int check_existing_pidfile(const char *pidfile)
2223{
2224 struct stat sb;
2225 char buf[16];
2226 pid_t pid;
2227 FILE *f;
2228
2229 if (stat(pidfile, &sb))
2230 return 0;
2231
2232 f = fopen(pidfile, "r");
2233 if (!f)
2234 return 0;
2235
2236 if (fread(buf, sb.st_size, 1, f) <= 0) {
2237 fclose(f);
2238 return 1;
2239 }
2240 fclose(f);
2241
2242 pid = atoi(buf);
2243 if (kill(pid, SIGCONT) < 0)
2244 return errno != ESRCH;
2245
2246 return 1;
2247}
2248
2249static int write_pid(pid_t pid, const char *pidfile)
2250{
2251 FILE *fpid;
2252
2253 fpid = fopen(pidfile, "w");
2254 if (!fpid) {
2255 log_err("fio: failed opening pid file %s\n", pidfile);
2256 return 1;
2257 }
2258
2259 fprintf(fpid, "%u\n", (unsigned int) pid);
2260 fclose(fpid);
2261 return 0;
2262}
2263
2264/*
2265 * If pidfile is specified, background us.
2266 */
2267int fio_start_server(char *pidfile)
2268{
2269 pid_t pid;
2270 int ret;
2271
2272#if defined(WIN32)
2273 WSADATA wsd;
2274 WSAStartup(MAKEWORD(2, 2), &wsd);
2275#endif
2276
2277 if (!pidfile)
2278 return fio_server();
2279
2280 if (check_existing_pidfile(pidfile)) {
2281 log_err("fio: pidfile %s exists and server appears alive\n",
2282 pidfile);
2283 free(pidfile);
2284 return -1;
2285 }
2286
2287 pid = fork();
2288 if (pid < 0) {
2289 log_err("fio: failed server fork: %s", strerror(errno));
2290 free(pidfile);
2291 return -1;
2292 } else if (pid) {
2293 ret = write_pid(pid, pidfile);
2294 free(pidfile);
2295 _exit(ret);
2296 }
2297
2298 setsid();
2299 openlog("fio", LOG_NDELAY|LOG_NOWAIT|LOG_PID, LOG_USER);
2300 log_syslog = 1;
2301 close(STDIN_FILENO);
2302 close(STDOUT_FILENO);
2303 close(STDERR_FILENO);
2304 f_out = NULL;
2305 f_err = NULL;
2306
2307 ret = fio_server();
2308
2309 closelog();
2310 unlink(pidfile);
2311 free(pidfile);
2312 return ret;
2313}
2314
2315void fio_server_set_arg(const char *arg)
2316{
2317 fio_server_arg = strdup(arg);
2318}