t/nvmept_trim: increase transfer size for some tests
[fio.git] / server.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <stdlib.h>
3#include <string.h>
4#include <unistd.h>
5#include <errno.h>
6#include <poll.h>
7#include <sys/types.h>
8#include <sys/wait.h>
9#include <sys/socket.h>
10#include <sys/stat.h>
11#include <sys/un.h>
12#include <sys/uio.h>
13#include <netinet/in.h>
14#include <arpa/inet.h>
15#include <netdb.h>
16#include <syslog.h>
17#include <signal.h>
18#ifdef CONFIG_ZLIB
19#include <zlib.h>
20#endif
21
22#include "fio.h"
23#include "options.h"
24#include "server.h"
25#include "crc/crc16.h"
26#include "lib/ieee754.h"
27#include "verify-state.h"
28#include "smalloc.h"
29
30int fio_net_port = FIO_NET_PORT;
31
32bool exit_backend = false;
33
34enum {
35 SK_F_FREE = 1,
36 SK_F_COPY = 2,
37 SK_F_SIMPLE = 4,
38 SK_F_VEC = 8,
39 SK_F_INLINE = 16,
40};
41
42struct sk_entry {
43 struct flist_head list; /* link on sk_out->list */
44 int flags; /* SK_F_* */
45 int opcode; /* Actual command fields */
46 void *buf;
47 off_t size;
48 uint64_t tag;
49 struct flist_head next; /* Other sk_entry's, if linked command */
50};
51
52static char *fio_server_arg;
53static char *bind_sock;
54static struct sockaddr_in saddr_in;
55static struct sockaddr_in6 saddr_in6;
56static int use_ipv6;
57#ifdef CONFIG_ZLIB
58static unsigned int has_zlib = 1;
59#else
60static unsigned int has_zlib = 0;
61#endif
62static unsigned int use_zlib;
63static char me[128];
64
65static pthread_key_t sk_out_key;
66
67#ifdef WIN32
68static char *fio_server_pipe_name = NULL;
69static HANDLE hjob = INVALID_HANDLE_VALUE;
70struct ffi_element {
71 union {
72 pthread_t thread;
73 HANDLE hProcess;
74 };
75 bool is_thread;
76};
77#endif
78
79struct fio_fork_item {
80 struct flist_head list;
81 int exitval;
82 int signal;
83 int exited;
84#ifdef WIN32
85 struct ffi_element element;
86#else
87 pid_t pid;
88#endif
89};
90
91struct cmd_reply {
92 struct fio_sem lock;
93 void *data;
94 size_t size;
95 int error;
96};
97
98static const char *fio_server_ops[FIO_NET_CMD_NR] = {
99 "",
100 "QUIT",
101 "EXIT",
102 "JOB",
103 "JOBLINE",
104 "TEXT",
105 "TS",
106 "GS",
107 "SEND_ETA",
108 "ETA",
109 "PROBE",
110 "START",
111 "STOP",
112 "DISK_UTIL",
113 "SERVER_START",
114 "ADD_JOB",
115 "RUN",
116 "IOLOG",
117 "UPDATE_JOB",
118 "LOAD_FILE",
119 "VTRIGGER",
120 "SENDFILE",
121 "JOB_OPT",
122};
123
124static void sk_lock(struct sk_out *sk_out)
125{
126 fio_sem_down(&sk_out->lock);
127}
128
129static void sk_unlock(struct sk_out *sk_out)
130{
131 fio_sem_up(&sk_out->lock);
132}
133
134void sk_out_assign(struct sk_out *sk_out)
135{
136 if (!sk_out)
137 return;
138
139 sk_lock(sk_out);
140 sk_out->refs++;
141 sk_unlock(sk_out);
142 pthread_setspecific(sk_out_key, sk_out);
143}
144
145static void sk_out_free(struct sk_out *sk_out)
146{
147 __fio_sem_remove(&sk_out->lock);
148 __fio_sem_remove(&sk_out->wait);
149 __fio_sem_remove(&sk_out->xmit);
150 sfree(sk_out);
151}
152
153static int __sk_out_drop(struct sk_out *sk_out)
154{
155 if (sk_out) {
156 int refs;
157
158 sk_lock(sk_out);
159 assert(sk_out->refs != 0);
160 refs = --sk_out->refs;
161 sk_unlock(sk_out);
162
163 if (!refs) {
164 sk_out_free(sk_out);
165 pthread_setspecific(sk_out_key, NULL);
166 return 0;
167 }
168 }
169
170 return 1;
171}
172
173void sk_out_drop(void)
174{
175 struct sk_out *sk_out;
176
177 sk_out = pthread_getspecific(sk_out_key);
178 __sk_out_drop(sk_out);
179}
180
181static void __fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
182 uint32_t pdu_len, uint64_t tag)
183{
184 memset(cmd, 0, sizeof(*cmd));
185
186 cmd->version = __cpu_to_le16(FIO_SERVER_VER);
187 cmd->opcode = cpu_to_le16(opcode);
188 cmd->tag = cpu_to_le64(tag);
189 cmd->pdu_len = cpu_to_le32(pdu_len);
190}
191
192
193static void fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
194 const void *pdu, uint32_t pdu_len, uint64_t tag)
195{
196 __fio_init_net_cmd(cmd, opcode, pdu_len, tag);
197
198 if (pdu)
199 memcpy(&cmd->payload, pdu, pdu_len);
200}
201
202const char *fio_server_op(unsigned int op)
203{
204 static char buf[32];
205
206 if (op < FIO_NET_CMD_NR)
207 return fio_server_ops[op];
208
209 sprintf(buf, "UNKNOWN/%d", op);
210 return buf;
211}
212
213static ssize_t iov_total_len(const struct iovec *iov, int count)
214{
215 ssize_t ret = 0;
216
217 while (count--) {
218 ret += iov->iov_len;
219 iov++;
220 }
221
222 return ret;
223}
224
225static int fio_sendv_data(int sk, struct iovec *iov, int count)
226{
227 ssize_t total_len = iov_total_len(iov, count);
228 ssize_t ret;
229
230 do {
231 ret = writev(sk, iov, count);
232 if (ret > 0) {
233 total_len -= ret;
234 if (!total_len)
235 break;
236
237 while (ret) {
238 if (ret >= iov->iov_len) {
239 ret -= iov->iov_len;
240 iov++;
241 continue;
242 }
243 iov->iov_base += ret;
244 iov->iov_len -= ret;
245 ret = 0;
246 }
247 } else if (!ret)
248 break;
249 else if (errno == EAGAIN || errno == EINTR)
250 continue;
251 else
252 break;
253 } while (!exit_backend);
254
255 if (!total_len)
256 return 0;
257
258 return 1;
259}
260
261static int fio_send_data(int sk, const void *p, unsigned int len)
262{
263 struct iovec iov = { .iov_base = (void *) p, .iov_len = len };
264
265 assert(len <= sizeof(struct fio_net_cmd) + FIO_SERVER_MAX_FRAGMENT_PDU);
266
267 return fio_sendv_data(sk, &iov, 1);
268}
269
270bool fio_server_poll_fd(int fd, short events, int timeout)
271{
272 struct pollfd pfd = {
273 .fd = fd,
274 .events = events,
275 };
276 int ret;
277
278 ret = poll(&pfd, 1, timeout);
279 if (ret < 0) {
280 if (errno == EINTR)
281 return false;
282 log_err("fio: poll: %s\n", strerror(errno));
283 return false;
284 } else if (!ret) {
285 return false;
286 }
287 if (pfd.revents & events)
288 return true;
289 return false;
290}
291
292static int fio_recv_data(int sk, void *buf, unsigned int len, bool wait)
293{
294 int flags;
295 char *p = buf;
296
297 if (wait)
298 flags = MSG_WAITALL;
299 else
300 flags = OS_MSG_DONTWAIT;
301
302 do {
303 int ret = recv(sk, p, len, flags);
304
305 if (ret > 0) {
306 len -= ret;
307 if (!len)
308 break;
309 p += ret;
310 continue;
311 } else if (!ret)
312 break;
313 else if (errno == EAGAIN || errno == EINTR) {
314 if (wait)
315 continue;
316 break;
317 } else
318 break;
319 } while (!exit_backend);
320
321 if (!len)
322 return 0;
323
324 return -1;
325}
326
327static int verify_convert_cmd(struct fio_net_cmd *cmd)
328{
329 uint16_t crc;
330
331 cmd->cmd_crc16 = le16_to_cpu(cmd->cmd_crc16);
332 cmd->pdu_crc16 = le16_to_cpu(cmd->pdu_crc16);
333
334 crc = fio_crc16(cmd, FIO_NET_CMD_CRC_SZ);
335 if (crc != cmd->cmd_crc16) {
336 log_err("fio: server bad crc on command (got %x, wanted %x)\n",
337 cmd->cmd_crc16, crc);
338 fprintf(f_err, "fio: server bad crc on command (got %x, wanted %x)\n",
339 cmd->cmd_crc16, crc);
340 return 1;
341 }
342
343 cmd->version = le16_to_cpu(cmd->version);
344 cmd->opcode = le16_to_cpu(cmd->opcode);
345 cmd->flags = le32_to_cpu(cmd->flags);
346 cmd->tag = le64_to_cpu(cmd->tag);
347 cmd->pdu_len = le32_to_cpu(cmd->pdu_len);
348
349 switch (cmd->version) {
350 case FIO_SERVER_VER:
351 break;
352 default:
353 log_err("fio: bad server cmd version %d\n", cmd->version);
354 fprintf(f_err, "fio: client/server version mismatch (%d != %d)\n",
355 cmd->version, FIO_SERVER_VER);
356 return 1;
357 }
358
359 if (cmd->pdu_len > FIO_SERVER_MAX_FRAGMENT_PDU) {
360 log_err("fio: command payload too large: %u\n", cmd->pdu_len);
361 return 1;
362 }
363
364 return 0;
365}
366
367/*
368 * Read (and defragment, if necessary) incoming commands
369 */
370struct fio_net_cmd *fio_net_recv_cmd(int sk, bool wait)
371{
372 struct fio_net_cmd cmd, *tmp, *cmdret = NULL;
373 size_t cmd_size = 0, pdu_offset = 0;
374 uint16_t crc;
375 int ret, first = 1;
376 void *pdu = NULL;
377
378 do {
379 ret = fio_recv_data(sk, &cmd, sizeof(cmd), wait);
380 if (ret)
381 break;
382
383 /* We have a command, verify it and swap if need be */
384 ret = verify_convert_cmd(&cmd);
385 if (ret)
386 break;
387
388 if (first) {
389 /* if this is text, add room for \0 at the end */
390 cmd_size = sizeof(cmd) + cmd.pdu_len + 1;
391 assert(!cmdret);
392 } else
393 cmd_size += cmd.pdu_len;
394
395 if (cmd_size / 1024 > FIO_SERVER_MAX_CMD_MB * 1024) {
396 log_err("fio: cmd+pdu too large (%llu)\n", (unsigned long long) cmd_size);
397 ret = 1;
398 break;
399 }
400
401 tmp = realloc(cmdret, cmd_size);
402 if (!tmp) {
403 log_err("fio: server failed allocating cmd\n");
404 ret = 1;
405 break;
406 }
407 cmdret = tmp;
408
409 if (first)
410 memcpy(cmdret, &cmd, sizeof(cmd));
411 else if (cmdret->opcode != cmd.opcode) {
412 log_err("fio: fragment opcode mismatch (%d != %d)\n",
413 cmdret->opcode, cmd.opcode);
414 ret = 1;
415 break;
416 }
417
418 if (!cmd.pdu_len)
419 break;
420
421 /* There's payload, get it */
422 pdu = (char *) cmdret->payload + pdu_offset;
423 ret = fio_recv_data(sk, pdu, cmd.pdu_len, wait);
424 if (ret)
425 break;
426
427 /* Verify payload crc */
428 crc = fio_crc16(pdu, cmd.pdu_len);
429 if (crc != cmd.pdu_crc16) {
430 log_err("fio: server bad crc on payload ");
431 log_err("(got %x, wanted %x)\n", cmd.pdu_crc16, crc);
432 ret = 1;
433 break;
434 }
435
436 pdu_offset += cmd.pdu_len;
437 if (!first)
438 cmdret->pdu_len += cmd.pdu_len;
439 first = 0;
440 } while (cmd.flags & FIO_NET_CMD_F_MORE);
441
442 if (ret) {
443 free(cmdret);
444 cmdret = NULL;
445 } else if (cmdret) {
446 /* zero-terminate text input */
447 if (cmdret->pdu_len) {
448 if (cmdret->opcode == FIO_NET_CMD_TEXT) {
449 struct cmd_text_pdu *__pdu = (struct cmd_text_pdu *) cmdret->payload;
450 char *buf = (char *) __pdu->buf;
451 int len = le32_to_cpu(__pdu->buf_len);
452
453 buf[len] = '\0';
454 } else if (cmdret->opcode == FIO_NET_CMD_JOB) {
455 struct cmd_job_pdu *__pdu = (struct cmd_job_pdu *) cmdret->payload;
456 char *buf = (char *) __pdu->buf;
457 int len = le32_to_cpu(__pdu->buf_len);
458
459 buf[len] = '\0';
460 }
461 }
462
463 /* frag flag is internal */
464 cmdret->flags &= ~FIO_NET_CMD_F_MORE;
465 }
466
467 return cmdret;
468}
469
470static void add_reply(uint64_t tag, struct flist_head *list)
471{
472 struct fio_net_cmd_reply *reply;
473
474 reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
475 flist_add_tail(&reply->list, list);
476}
477
478static uint64_t alloc_reply(uint64_t tag, uint16_t opcode)
479{
480 struct fio_net_cmd_reply *reply;
481
482 reply = calloc(1, sizeof(*reply));
483 INIT_FLIST_HEAD(&reply->list);
484 fio_gettime(&reply->ts, NULL);
485 reply->saved_tag = tag;
486 reply->opcode = opcode;
487
488 return (uintptr_t) reply;
489}
490
491static void free_reply(uint64_t tag)
492{
493 struct fio_net_cmd_reply *reply;
494
495 reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
496 free(reply);
497}
498
499static void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu)
500{
501 uint32_t pdu_len;
502
503 cmd->cmd_crc16 = __cpu_to_le16(fio_crc16(cmd, FIO_NET_CMD_CRC_SZ));
504
505 pdu_len = le32_to_cpu(cmd->pdu_len);
506 cmd->pdu_crc16 = __cpu_to_le16(fio_crc16(pdu, pdu_len));
507}
508
509static void fio_net_cmd_crc(struct fio_net_cmd *cmd)
510{
511 fio_net_cmd_crc_pdu(cmd, cmd->payload);
512}
513
514int fio_net_send_cmd(int fd, uint16_t opcode, const void *buf, off_t size,
515 uint64_t *tagptr, struct flist_head *list)
516{
517 struct fio_net_cmd *cmd = NULL;
518 size_t this_len, cur_len = 0;
519 uint64_t tag;
520 int ret;
521
522 if (list) {
523 assert(tagptr);
524 tag = *tagptr = alloc_reply(*tagptr, opcode);
525 } else
526 tag = tagptr ? *tagptr : 0;
527
528 do {
529 this_len = size;
530 if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
531 this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
532
533 if (!cmd || cur_len < sizeof(*cmd) + this_len) {
534 if (cmd)
535 free(cmd);
536
537 cur_len = sizeof(*cmd) + this_len;
538 cmd = malloc(cur_len);
539 }
540
541 fio_init_net_cmd(cmd, opcode, buf, this_len, tag);
542
543 if (this_len < size)
544 cmd->flags = __cpu_to_le32(FIO_NET_CMD_F_MORE);
545
546 fio_net_cmd_crc(cmd);
547
548 ret = fio_send_data(fd, cmd, sizeof(*cmd) + this_len);
549 size -= this_len;
550 buf += this_len;
551 } while (!ret && size);
552
553 if (list) {
554 if (ret)
555 free_reply(tag);
556 else
557 add_reply(tag, list);
558 }
559
560 if (cmd)
561 free(cmd);
562
563 return ret;
564}
565
566static struct sk_entry *fio_net_prep_cmd(uint16_t opcode, void *buf,
567 size_t size, uint64_t *tagptr,
568 int flags)
569{
570 struct sk_entry *entry;
571
572 entry = smalloc(sizeof(*entry));
573 if (!entry)
574 return NULL;
575
576 INIT_FLIST_HEAD(&entry->next);
577 entry->opcode = opcode;
578 if (flags & SK_F_COPY) {
579 entry->buf = smalloc(size);
580 memcpy(entry->buf, buf, size);
581 } else
582 entry->buf = buf;
583
584 entry->size = size;
585 if (tagptr)
586 entry->tag = *tagptr;
587 else
588 entry->tag = 0;
589 entry->flags = flags;
590 return entry;
591}
592
593static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry);
594
595static void fio_net_queue_entry(struct sk_entry *entry)
596{
597 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
598
599 if (entry->flags & SK_F_INLINE)
600 handle_sk_entry(sk_out, entry);
601 else {
602 sk_lock(sk_out);
603 flist_add_tail(&entry->list, &sk_out->list);
604 sk_unlock(sk_out);
605
606 fio_sem_up(&sk_out->wait);
607 }
608}
609
610static int fio_net_queue_cmd(uint16_t opcode, void *buf, off_t size,
611 uint64_t *tagptr, int flags)
612{
613 struct sk_entry *entry;
614
615 entry = fio_net_prep_cmd(opcode, buf, size, tagptr, flags);
616 if (entry) {
617 fio_net_queue_entry(entry);
618 return 0;
619 }
620
621 return 1;
622}
623
624static int fio_net_send_simple_stack_cmd(int sk, uint16_t opcode, uint64_t tag)
625{
626 struct fio_net_cmd cmd;
627
628 fio_init_net_cmd(&cmd, opcode, NULL, 0, tag);
629 fio_net_cmd_crc(&cmd);
630
631 return fio_send_data(sk, &cmd, sizeof(cmd));
632}
633
634/*
635 * If 'list' is non-NULL, then allocate and store the sent command for
636 * later verification.
637 */
638int fio_net_send_simple_cmd(int sk, uint16_t opcode, uint64_t tag,
639 struct flist_head *list)
640{
641 int ret;
642
643 if (list)
644 tag = alloc_reply(tag, opcode);
645
646 ret = fio_net_send_simple_stack_cmd(sk, opcode, tag);
647 if (ret) {
648 if (list)
649 free_reply(tag);
650
651 return ret;
652 }
653
654 if (list)
655 add_reply(tag, list);
656
657 return 0;
658}
659
660static int fio_net_queue_quit(void)
661{
662 dprint(FD_NET, "server: sending quit\n");
663
664 return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, NULL, SK_F_SIMPLE);
665}
666
667int fio_net_send_quit(int sk)
668{
669 dprint(FD_NET, "server: sending quit\n");
670
671 return fio_net_send_simple_cmd(sk, FIO_NET_CMD_QUIT, 0, NULL);
672}
673
674static int fio_net_send_ack(struct fio_net_cmd *cmd, int error, int signal)
675{
676 struct cmd_end_pdu epdu;
677 uint64_t tag = 0;
678
679 if (cmd)
680 tag = cmd->tag;
681
682 epdu.error = __cpu_to_le32(error);
683 epdu.signal = __cpu_to_le32(signal);
684 return fio_net_queue_cmd(FIO_NET_CMD_STOP, &epdu, sizeof(epdu), &tag, SK_F_COPY);
685}
686
687static int fio_net_queue_stop(int error, int signal)
688{
689 dprint(FD_NET, "server: sending stop (%d, %d)\n", error, signal);
690 return fio_net_send_ack(NULL, error, signal);
691}
692
693#ifdef WIN32
694static void fio_server_add_fork_item(struct ffi_element *element, struct flist_head *list)
695{
696 struct fio_fork_item *ffi;
697
698 ffi = malloc(sizeof(*ffi));
699 ffi->exitval = 0;
700 ffi->signal = 0;
701 ffi->exited = 0;
702 ffi->element = *element;
703 flist_add_tail(&ffi->list, list);
704}
705
706static void fio_server_add_conn_pid(struct flist_head *conn_list, HANDLE hProcess)
707{
708 struct ffi_element element = {.hProcess = hProcess, .is_thread=FALSE};
709 dprint(FD_NET, "server: forked off connection job (tid=%u)\n", (int) element.thread);
710
711 fio_server_add_fork_item(&element, conn_list);
712}
713
714static void fio_server_add_job_pid(struct flist_head *job_list, pthread_t thread)
715{
716 struct ffi_element element = {.thread = thread, .is_thread=TRUE};
717 dprint(FD_NET, "server: forked off job job (tid=%u)\n", (int) element.thread);
718 fio_server_add_fork_item(&element, job_list);
719}
720
721static void fio_server_check_fork_item(struct fio_fork_item *ffi)
722{
723 int ret;
724
725 if (ffi->element.is_thread) {
726
727 ret = pthread_kill(ffi->element.thread, 0);
728 if (ret) {
729 int rev_val;
730 pthread_join(ffi->element.thread, (void**) &rev_val); /*if the thread is dead, then join it to get status*/
731
732 ffi->exitval = rev_val;
733 if (ffi->exitval)
734 log_err("thread (tid=%u) exited with %x\n", (int) ffi->element.thread, (int) ffi->exitval);
735 dprint(FD_PROCESS, "thread (tid=%u) exited with %x\n", (int) ffi->element.thread, (int) ffi->exitval);
736 ffi->exited = 1;
737 }
738 } else {
739 DWORD exit_val;
740 GetExitCodeProcess(ffi->element.hProcess, &exit_val);
741
742 if (exit_val != STILL_ACTIVE) {
743 dprint(FD_PROCESS, "process %u exited with %d\n", GetProcessId(ffi->element.hProcess), exit_val);
744 ffi->exited = 1;
745 ffi->exitval = exit_val;
746 }
747 }
748}
749#else
750static void fio_server_add_fork_item(pid_t pid, struct flist_head *list)
751{
752 struct fio_fork_item *ffi;
753
754 ffi = malloc(sizeof(*ffi));
755 ffi->exitval = 0;
756 ffi->signal = 0;
757 ffi->exited = 0;
758 ffi->pid = pid;
759 flist_add_tail(&ffi->list, list);
760}
761
762static void fio_server_add_conn_pid(struct flist_head *conn_list, pid_t pid)
763{
764 dprint(FD_NET, "server: forked off connection job (pid=%u)\n", (int) pid);
765 fio_server_add_fork_item(pid, conn_list);
766}
767
768static void fio_server_add_job_pid(struct flist_head *job_list, pid_t pid)
769{
770 dprint(FD_NET, "server: forked off job job (pid=%u)\n", (int) pid);
771 fio_server_add_fork_item(pid, job_list);
772}
773
774static void fio_server_check_fork_item(struct fio_fork_item *ffi)
775{
776 int ret, status;
777
778 ret = waitpid(ffi->pid, &status, WNOHANG);
779 if (ret < 0) {
780 if (errno == ECHILD) {
781 log_err("fio: connection pid %u disappeared\n", (int) ffi->pid);
782 ffi->exited = 1;
783 } else
784 log_err("fio: waitpid: %s\n", strerror(errno));
785 } else if (ret == ffi->pid) {
786 if (WIFSIGNALED(status)) {
787 ffi->signal = WTERMSIG(status);
788 ffi->exited = 1;
789 }
790 if (WIFEXITED(status)) {
791 if (WEXITSTATUS(status))
792 ffi->exitval = WEXITSTATUS(status);
793 ffi->exited = 1;
794 }
795 }
796}
797#endif
798
799static void fio_server_fork_item_done(struct fio_fork_item *ffi, bool stop)
800{
801#ifdef WIN32
802 if (ffi->element.is_thread)
803 dprint(FD_NET, "tid %u exited, sig=%u, exitval=%d\n", (int) ffi->element.thread, ffi->signal, ffi->exitval);
804 else {
805 dprint(FD_NET, "pid %u exited, sig=%u, exitval=%d\n", (int) GetProcessId(ffi->element.hProcess), ffi->signal, ffi->exitval);
806 CloseHandle(ffi->element.hProcess);
807 ffi->element.hProcess = INVALID_HANDLE_VALUE;
808 }
809#else
810 dprint(FD_NET, "pid %u exited, sig=%u, exitval=%d\n", (int) ffi->pid, ffi->signal, ffi->exitval);
811#endif
812
813 /*
814 * Fold STOP and QUIT...
815 */
816 if (stop) {
817 fio_net_queue_stop(ffi->exitval, ffi->signal);
818 fio_net_queue_quit();
819 }
820
821 flist_del(&ffi->list);
822 free(ffi);
823}
824
825static void fio_server_check_fork_items(struct flist_head *list, bool stop)
826{
827 struct flist_head *entry, *tmp;
828 struct fio_fork_item *ffi;
829
830 flist_for_each_safe(entry, tmp, list) {
831 ffi = flist_entry(entry, struct fio_fork_item, list);
832
833 fio_server_check_fork_item(ffi);
834
835 if (ffi->exited)
836 fio_server_fork_item_done(ffi, stop);
837 }
838}
839
840static void fio_server_check_jobs(struct flist_head *job_list)
841{
842 fio_server_check_fork_items(job_list, true);
843}
844
845static void fio_server_check_conns(struct flist_head *conn_list)
846{
847 fio_server_check_fork_items(conn_list, false);
848}
849
850static int handle_load_file_cmd(struct fio_net_cmd *cmd)
851{
852 struct cmd_load_file_pdu *pdu = (struct cmd_load_file_pdu *) cmd->payload;
853 void *file_name = pdu->file;
854 struct cmd_start_pdu spdu;
855
856 dprint(FD_NET, "server: loading local file %s\n", (char *) file_name);
857
858 pdu->name_len = le16_to_cpu(pdu->name_len);
859 pdu->client_type = le16_to_cpu(pdu->client_type);
860
861 if (parse_jobs_ini(file_name, 0, 0, pdu->client_type)) {
862 fio_net_queue_quit();
863 return -1;
864 }
865
866 spdu.jobs = cpu_to_le32(thread_number);
867 spdu.stat_outputs = cpu_to_le32(stat_number);
868 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
869 return 0;
870}
871
872#ifdef WIN32
873static void *fio_backend_thread(void *data)
874{
875 int ret;
876 struct sk_out *sk_out = (struct sk_out *) data;
877
878 sk_out_assign(sk_out);
879
880 ret = fio_backend(sk_out);
881 sk_out_drop();
882
883 pthread_exit((void*) (intptr_t) ret);
884 return NULL;
885}
886#endif
887
888static int handle_run_cmd(struct sk_out *sk_out, struct flist_head *job_list,
889 struct fio_net_cmd *cmd)
890{
891 int ret;
892
893 fio_time_init();
894 set_genesis_time();
895
896#ifdef WIN32
897 {
898 pthread_t thread;
899 /* both this thread and backend_thread call sk_out_assign() to double increment
900 * the ref count. This ensures struct is valid until both threads are done with it
901 */
902 sk_out_assign(sk_out);
903 ret = pthread_create(&thread, NULL, fio_backend_thread, sk_out);
904 if (ret) {
905 log_err("pthread_create: %s\n", strerror(ret));
906 return ret;
907 }
908
909 fio_server_add_job_pid(job_list, thread);
910 return ret;
911 }
912#else
913 {
914 pid_t pid;
915 sk_out_assign(sk_out);
916 pid = fork();
917 if (pid) {
918 fio_server_add_job_pid(job_list, pid);
919 return 0;
920 }
921
922 ret = fio_backend(sk_out);
923 free_threads_shm();
924 sk_out_drop();
925 _exit(ret);
926 }
927#endif
928}
929
930static int handle_job_cmd(struct fio_net_cmd *cmd)
931{
932 struct cmd_job_pdu *pdu = (struct cmd_job_pdu *) cmd->payload;
933 void *buf = pdu->buf;
934 struct cmd_start_pdu spdu;
935
936 pdu->buf_len = le32_to_cpu(pdu->buf_len);
937 pdu->client_type = le32_to_cpu(pdu->client_type);
938
939 if (parse_jobs_ini(buf, 1, 0, pdu->client_type)) {
940 fio_net_queue_quit();
941 return -1;
942 }
943
944 spdu.jobs = cpu_to_le32(thread_number);
945 spdu.stat_outputs = cpu_to_le32(stat_number);
946
947 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
948 return 0;
949}
950
951static int handle_jobline_cmd(struct fio_net_cmd *cmd)
952{
953 void *pdu = cmd->payload;
954 struct cmd_single_line_pdu *cslp;
955 struct cmd_line_pdu *clp;
956 unsigned long offset;
957 struct cmd_start_pdu spdu;
958 char **argv;
959 int i;
960
961 clp = pdu;
962 clp->lines = le16_to_cpu(clp->lines);
963 clp->client_type = le16_to_cpu(clp->client_type);
964 argv = malloc(clp->lines * sizeof(char *));
965 offset = sizeof(*clp);
966
967 dprint(FD_NET, "server: %d command line args\n", clp->lines);
968
969 for (i = 0; i < clp->lines; i++) {
970 cslp = pdu + offset;
971 argv[i] = (char *) cslp->text;
972
973 offset += sizeof(*cslp) + le16_to_cpu(cslp->len);
974 dprint(FD_NET, "server: %d: %s\n", i, argv[i]);
975 }
976
977 if (parse_cmd_line(clp->lines, argv, clp->client_type)) {
978 fio_net_queue_quit();
979 free(argv);
980 return -1;
981 }
982
983 free(argv);
984
985 spdu.jobs = cpu_to_le32(thread_number);
986 spdu.stat_outputs = cpu_to_le32(stat_number);
987
988 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
989 return 0;
990}
991
992static int handle_probe_cmd(struct fio_net_cmd *cmd)
993{
994 struct cmd_client_probe_pdu *pdu = (struct cmd_client_probe_pdu *) cmd->payload;
995 uint64_t tag = cmd->tag;
996 struct cmd_probe_reply_pdu probe = {
997#ifdef CONFIG_BIG_ENDIAN
998 .bigendian = 1,
999#endif
1000 .os = FIO_OS,
1001 .arch = FIO_ARCH,
1002 .bpp = sizeof(void *),
1003 .cpus = __cpu_to_le32(cpus_configured()),
1004 };
1005
1006 dprint(FD_NET, "server: sending probe reply\n");
1007
1008 strcpy(me, (char *) pdu->server);
1009
1010 gethostname((char *) probe.hostname, sizeof(probe.hostname));
1011 snprintf((char *) probe.fio_version, sizeof(probe.fio_version), "%s",
1012 fio_version_string);
1013
1014 /*
1015 * If the client supports compression and we do too, then enable it
1016 */
1017 if (has_zlib && le64_to_cpu(pdu->flags) & FIO_PROBE_FLAG_ZLIB) {
1018 probe.flags = __cpu_to_le64(FIO_PROBE_FLAG_ZLIB);
1019 use_zlib = 1;
1020 } else {
1021 probe.flags = 0;
1022 use_zlib = 0;
1023 }
1024
1025 return fio_net_queue_cmd(FIO_NET_CMD_PROBE, &probe, sizeof(probe), &tag, SK_F_COPY);
1026}
1027
1028static int handle_send_eta_cmd(struct fio_net_cmd *cmd)
1029{
1030 struct jobs_eta *je;
1031 uint64_t tag = cmd->tag;
1032 size_t size;
1033 int i;
1034
1035 dprint(FD_NET, "server sending status\n");
1036
1037 /*
1038 * Fake ETA return if we don't have a local one, otherwise the client
1039 * will end up timing out waiting for a response to the ETA request
1040 */
1041 je = get_jobs_eta(true, &size);
1042 if (!je) {
1043 size = sizeof(*je);
1044 je = calloc(1, size);
1045 } else {
1046 je->nr_running = cpu_to_le32(je->nr_running);
1047 je->nr_ramp = cpu_to_le32(je->nr_ramp);
1048 je->nr_pending = cpu_to_le32(je->nr_pending);
1049 je->nr_setting_up = cpu_to_le32(je->nr_setting_up);
1050 je->files_open = cpu_to_le32(je->files_open);
1051
1052 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1053 je->m_rate[i] = cpu_to_le64(je->m_rate[i]);
1054 je->t_rate[i] = cpu_to_le64(je->t_rate[i]);
1055 je->m_iops[i] = cpu_to_le32(je->m_iops[i]);
1056 je->t_iops[i] = cpu_to_le32(je->t_iops[i]);
1057 je->rate[i] = cpu_to_le64(je->rate[i]);
1058 je->iops[i] = cpu_to_le32(je->iops[i]);
1059 }
1060
1061 je->elapsed_sec = cpu_to_le64(je->elapsed_sec);
1062 je->eta_sec = cpu_to_le64(je->eta_sec);
1063 je->nr_threads = cpu_to_le32(je->nr_threads);
1064 je->is_pow2 = cpu_to_le32(je->is_pow2);
1065 je->unit_base = cpu_to_le32(je->unit_base);
1066 }
1067
1068 fio_net_queue_cmd(FIO_NET_CMD_ETA, je, size, &tag, SK_F_FREE);
1069 return 0;
1070}
1071
1072static int send_update_job_reply(uint64_t __tag, int error)
1073{
1074 uint64_t tag = __tag;
1075 uint32_t pdu_error;
1076
1077 pdu_error = __cpu_to_le32(error);
1078 return fio_net_queue_cmd(FIO_NET_CMD_UPDATE_JOB, &pdu_error, sizeof(pdu_error), &tag, SK_F_COPY);
1079}
1080
1081static int handle_update_job_cmd(struct fio_net_cmd *cmd)
1082{
1083 struct cmd_add_job_pdu *pdu = (struct cmd_add_job_pdu *) cmd->payload;
1084 struct thread_data *td;
1085 uint32_t tnumber;
1086 int ret;
1087
1088 tnumber = le32_to_cpu(pdu->thread_number);
1089
1090 dprint(FD_NET, "server: updating options for job %u\n", tnumber);
1091
1092 if (!tnumber || tnumber > thread_number) {
1093 send_update_job_reply(cmd->tag, ENODEV);
1094 return 0;
1095 }
1096
1097 td = tnumber_to_td(tnumber);
1098 ret = convert_thread_options_to_cpu(&td->o, &pdu->top,
1099 cmd->pdu_len - offsetof(struct cmd_add_job_pdu, top));
1100 send_update_job_reply(cmd->tag, ret);
1101 return 0;
1102}
1103
1104static int handle_trigger_cmd(struct fio_net_cmd *cmd, struct flist_head *job_list)
1105{
1106 struct cmd_vtrigger_pdu *pdu = (struct cmd_vtrigger_pdu *) cmd->payload;
1107 char *buf = (char *) pdu->cmd;
1108 struct all_io_list *rep;
1109 size_t sz;
1110
1111 pdu->len = le16_to_cpu(pdu->len);
1112 buf[pdu->len] = '\0';
1113
1114 rep = get_all_io_list(IO_LIST_ALL, &sz);
1115 if (!rep) {
1116 struct all_io_list state;
1117
1118 state.threads = cpu_to_le64((uint64_t) 0);
1119 fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY | SK_F_INLINE);
1120 } else
1121 fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE | SK_F_INLINE);
1122
1123 fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
1124 fio_server_check_jobs(job_list);
1125 exec_trigger(buf);
1126 return 0;
1127}
1128
1129static int handle_command(struct sk_out *sk_out, struct flist_head *job_list,
1130 struct fio_net_cmd *cmd)
1131{
1132 int ret;
1133
1134 dprint(FD_NET, "server: got op [%s], pdu=%u, tag=%llx\n",
1135 fio_server_op(cmd->opcode), cmd->pdu_len,
1136 (unsigned long long) cmd->tag);
1137
1138 switch (cmd->opcode) {
1139 case FIO_NET_CMD_QUIT:
1140 fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
1141 ret = 0;
1142 break;
1143 case FIO_NET_CMD_EXIT:
1144 exit_backend = true;
1145 return -1;
1146 case FIO_NET_CMD_LOAD_FILE:
1147 ret = handle_load_file_cmd(cmd);
1148 break;
1149 case FIO_NET_CMD_JOB:
1150 ret = handle_job_cmd(cmd);
1151 break;
1152 case FIO_NET_CMD_JOBLINE:
1153 ret = handle_jobline_cmd(cmd);
1154 break;
1155 case FIO_NET_CMD_PROBE:
1156 ret = handle_probe_cmd(cmd);
1157 break;
1158 case FIO_NET_CMD_SEND_ETA:
1159 ret = handle_send_eta_cmd(cmd);
1160 break;
1161 case FIO_NET_CMD_RUN:
1162 ret = handle_run_cmd(sk_out, job_list, cmd);
1163 break;
1164 case FIO_NET_CMD_UPDATE_JOB:
1165 ret = handle_update_job_cmd(cmd);
1166 break;
1167 case FIO_NET_CMD_VTRIGGER:
1168 ret = handle_trigger_cmd(cmd, job_list);
1169 break;
1170 case FIO_NET_CMD_SENDFILE: {
1171 struct cmd_sendfile_reply *in;
1172 struct cmd_reply *rep;
1173
1174 rep = (struct cmd_reply *) (uintptr_t) cmd->tag;
1175
1176 in = (struct cmd_sendfile_reply *) cmd->payload;
1177 in->size = le32_to_cpu(in->size);
1178 in->error = le32_to_cpu(in->error);
1179 if (in->error) {
1180 ret = 1;
1181 rep->error = in->error;
1182 } else {
1183 ret = 0;
1184 rep->data = smalloc(in->size);
1185 if (!rep->data) {
1186 ret = 1;
1187 rep->error = ENOMEM;
1188 } else {
1189 rep->size = in->size;
1190 memcpy(rep->data, in->data, in->size);
1191 }
1192 }
1193 fio_sem_up(&rep->lock);
1194 break;
1195 }
1196 default:
1197 log_err("fio: unknown opcode: %s\n", fio_server_op(cmd->opcode));
1198 ret = 1;
1199 }
1200
1201 return ret;
1202}
1203
1204/*
1205 * Send a command with a separate PDU, not inlined in the command
1206 */
1207static int fio_send_cmd_ext_pdu(int sk, uint16_t opcode, const void *buf,
1208 off_t size, uint64_t tag, uint32_t flags)
1209{
1210 struct fio_net_cmd cmd;
1211 struct iovec iov[2];
1212 size_t this_len;
1213 int ret;
1214
1215 iov[0].iov_base = (void *) &cmd;
1216 iov[0].iov_len = sizeof(cmd);
1217
1218 do {
1219 uint32_t this_flags = flags;
1220
1221 this_len = size;
1222 if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
1223 this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
1224
1225 if (this_len < size)
1226 this_flags |= FIO_NET_CMD_F_MORE;
1227
1228 __fio_init_net_cmd(&cmd, opcode, this_len, tag);
1229 cmd.flags = __cpu_to_le32(this_flags);
1230 fio_net_cmd_crc_pdu(&cmd, buf);
1231
1232 iov[1].iov_base = (void *) buf;
1233 iov[1].iov_len = this_len;
1234
1235 ret = fio_sendv_data(sk, iov, 2);
1236 size -= this_len;
1237 buf += this_len;
1238 } while (!ret && size);
1239
1240 return ret;
1241}
1242
1243static void finish_entry(struct sk_entry *entry)
1244{
1245 if (entry->flags & SK_F_FREE)
1246 free(entry->buf);
1247 else if (entry->flags & SK_F_COPY)
1248 sfree(entry->buf);
1249
1250 sfree(entry);
1251}
1252
1253static void entry_set_flags(struct sk_entry *entry, struct flist_head *list,
1254 unsigned int *flags)
1255{
1256 if (!flist_empty(list))
1257 *flags = FIO_NET_CMD_F_MORE;
1258 else
1259 *flags = 0;
1260}
1261
1262static int send_vec_entry(struct sk_out *sk_out, struct sk_entry *first)
1263{
1264 unsigned int flags;
1265 int ret;
1266
1267 entry_set_flags(first, &first->next, &flags);
1268
1269 ret = fio_send_cmd_ext_pdu(sk_out->sk, first->opcode, first->buf,
1270 first->size, first->tag, flags);
1271
1272 while (!flist_empty(&first->next)) {
1273 struct sk_entry *next;
1274
1275 next = flist_first_entry(&first->next, struct sk_entry, list);
1276 flist_del_init(&next->list);
1277
1278 entry_set_flags(next, &first->next, &flags);
1279
1280 ret += fio_send_cmd_ext_pdu(sk_out->sk, next->opcode, next->buf,
1281 next->size, next->tag, flags);
1282 finish_entry(next);
1283 }
1284
1285 return ret;
1286}
1287
1288static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry)
1289{
1290 int ret;
1291
1292 fio_sem_down(&sk_out->xmit);
1293
1294 if (entry->flags & SK_F_VEC)
1295 ret = send_vec_entry(sk_out, entry);
1296 else if (entry->flags & SK_F_SIMPLE) {
1297 ret = fio_net_send_simple_cmd(sk_out->sk, entry->opcode,
1298 entry->tag, NULL);
1299 } else {
1300 ret = fio_net_send_cmd(sk_out->sk, entry->opcode, entry->buf,
1301 entry->size, &entry->tag, NULL);
1302 }
1303
1304 fio_sem_up(&sk_out->xmit);
1305
1306 if (ret)
1307 log_err("fio: failed handling cmd %s\n", fio_server_op(entry->opcode));
1308
1309 finish_entry(entry);
1310 return ret;
1311}
1312
1313static int handle_xmits(struct sk_out *sk_out)
1314{
1315 struct sk_entry *entry;
1316 FLIST_HEAD(list);
1317 int ret = 0;
1318
1319 sk_lock(sk_out);
1320 if (flist_empty(&sk_out->list)) {
1321 sk_unlock(sk_out);
1322 return 0;
1323 }
1324
1325 flist_splice_init(&sk_out->list, &list);
1326 sk_unlock(sk_out);
1327
1328 while (!flist_empty(&list)) {
1329 entry = flist_first_entry(&list, struct sk_entry, list);
1330 flist_del(&entry->list);
1331 ret += handle_sk_entry(sk_out, entry);
1332 }
1333
1334 return ret;
1335}
1336
1337static int handle_connection(struct sk_out *sk_out)
1338{
1339 struct fio_net_cmd *cmd = NULL;
1340 FLIST_HEAD(job_list);
1341 int ret = 0;
1342
1343 reset_fio_state();
1344
1345 /* read forever */
1346 while (!exit_backend) {
1347 struct pollfd pfd = {
1348 .fd = sk_out->sk,
1349 .events = POLLIN,
1350 };
1351
1352 do {
1353 int timeout = 1000;
1354
1355 if (!flist_empty(&job_list))
1356 timeout = 100;
1357
1358 handle_xmits(sk_out);
1359
1360 ret = poll(&pfd, 1, 0);
1361 if (ret < 0) {
1362 if (errno == EINTR)
1363 break;
1364 log_err("fio: poll: %s\n", strerror(errno));
1365 break;
1366 } else if (!ret) {
1367 fio_server_check_jobs(&job_list);
1368 fio_sem_down_timeout(&sk_out->wait, timeout);
1369 continue;
1370 }
1371
1372 if (pfd.revents & POLLIN)
1373 break;
1374 if (pfd.revents & (POLLERR|POLLHUP)) {
1375 ret = 1;
1376 break;
1377 }
1378 } while (!exit_backend);
1379
1380 fio_server_check_jobs(&job_list);
1381
1382 if (ret < 0)
1383 break;
1384
1385 if (pfd.revents & POLLIN)
1386 cmd = fio_net_recv_cmd(sk_out->sk, true);
1387 if (!cmd) {
1388 ret = -1;
1389 break;
1390 }
1391
1392 ret = handle_command(sk_out, &job_list, cmd);
1393 if (ret)
1394 break;
1395
1396 free(cmd);
1397 cmd = NULL;
1398 }
1399
1400 if (cmd)
1401 free(cmd);
1402
1403 handle_xmits(sk_out);
1404
1405 close(sk_out->sk);
1406 sk_out->sk = -1;
1407 __sk_out_drop(sk_out);
1408 _exit(ret);
1409}
1410
1411/* get the address on this host bound by the input socket,
1412 * whether it is ipv6 or ipv4 */
1413
1414static int get_my_addr_str(int sk)
1415{
1416 struct sockaddr_in6 myaddr6 = { 0, };
1417 struct sockaddr_in myaddr4 = { 0, };
1418 struct sockaddr *sockaddr_p;
1419 char *net_addr;
1420 socklen_t len;
1421 int ret;
1422
1423 if (use_ipv6) {
1424 len = sizeof(myaddr6);
1425 sockaddr_p = (struct sockaddr * )&myaddr6;
1426 net_addr = (char * )&myaddr6.sin6_addr;
1427 } else {
1428 len = sizeof(myaddr4);
1429 sockaddr_p = (struct sockaddr * )&myaddr4;
1430 net_addr = (char * )&myaddr4.sin_addr;
1431 }
1432
1433 ret = getsockname(sk, sockaddr_p, &len);
1434 if (ret) {
1435 log_err("fio: getsockname: %s\n", strerror(errno));
1436 return -1;
1437 }
1438
1439 if (!inet_ntop(use_ipv6?AF_INET6:AF_INET, net_addr, client_sockaddr_str, INET6_ADDRSTRLEN - 1)) {
1440 log_err("inet_ntop: failed to convert addr to string\n");
1441 return -1;
1442 }
1443
1444 dprint(FD_NET, "fio server bound to addr %s\n", client_sockaddr_str);
1445 return 0;
1446}
1447
1448#ifdef WIN32
1449static int handle_connection_process(void)
1450{
1451 WSAPROTOCOL_INFO protocol_info;
1452 DWORD bytes_read;
1453 HANDLE hpipe;
1454 int sk;
1455 struct sk_out *sk_out;
1456 int ret;
1457 char *msg = (char *) "connected";
1458
1459 log_info("server enter accept loop. ProcessID %d\n", GetCurrentProcessId());
1460
1461 hpipe = CreateFile(
1462 fio_server_pipe_name,
1463 GENERIC_READ | GENERIC_WRITE,
1464 0, NULL,
1465 OPEN_EXISTING,
1466 0, NULL);
1467
1468 if (hpipe == INVALID_HANDLE_VALUE) {
1469 log_err("couldnt open pipe %s error %lu\n",
1470 fio_server_pipe_name, GetLastError());
1471 return -1;
1472 }
1473
1474 if (!ReadFile(hpipe, &protocol_info, sizeof(protocol_info), &bytes_read, NULL)) {
1475 log_err("couldnt read pi from pipe %s error %lu\n", fio_server_pipe_name,
1476 GetLastError());
1477 }
1478
1479 if (use_ipv6) /* use protocol_info to create a duplicate of parents socket */
1480 sk = WSASocket(AF_INET6, SOCK_STREAM, 0, &protocol_info, 0, 0);
1481 else
1482 sk = WSASocket(AF_INET, SOCK_STREAM, 0, &protocol_info, 0, 0);
1483
1484 sk_out = scalloc(1, sizeof(*sk_out));
1485 if (!sk_out) {
1486 CloseHandle(hpipe);
1487 close(sk);
1488 return -1;
1489 }
1490
1491 sk_out->sk = sk;
1492 sk_out->hProcess = INVALID_HANDLE_VALUE;
1493 INIT_FLIST_HEAD(&sk_out->list);
1494 __fio_sem_init(&sk_out->lock, FIO_SEM_UNLOCKED);
1495 __fio_sem_init(&sk_out->wait, FIO_SEM_LOCKED);
1496 __fio_sem_init(&sk_out->xmit, FIO_SEM_UNLOCKED);
1497
1498 get_my_addr_str(sk);
1499
1500 if (!WriteFile(hpipe, msg, strlen(msg), NULL, NULL)) {
1501 log_err("couldnt write pipe\n");
1502 close(sk);
1503 return -1;
1504 }
1505 CloseHandle(hpipe);
1506
1507 sk_out_assign(sk_out);
1508
1509 ret = handle_connection(sk_out);
1510 __sk_out_drop(sk_out);
1511 return ret;
1512}
1513#endif
1514
1515static int accept_loop(int listen_sk)
1516{
1517 struct sockaddr_in addr;
1518 struct sockaddr_in6 addr6;
1519 socklen_t len = use_ipv6 ? sizeof(addr6) : sizeof(addr);
1520 struct pollfd pfd;
1521 int ret = 0, sk, exitval = 0;
1522 FLIST_HEAD(conn_list);
1523
1524 dprint(FD_NET, "server enter accept loop\n");
1525
1526 fio_set_fd_nonblocking(listen_sk, "server");
1527
1528 while (!exit_backend) {
1529 struct sk_out *sk_out;
1530 const char *from;
1531 char buf[64];
1532#ifdef WIN32
1533 HANDLE hProcess;
1534#else
1535 pid_t pid;
1536#endif
1537 pfd.fd = listen_sk;
1538 pfd.events = POLLIN;
1539 do {
1540 int timeout = 1000;
1541
1542 if (!flist_empty(&conn_list))
1543 timeout = 100;
1544
1545 ret = poll(&pfd, 1, timeout);
1546 if (ret < 0) {
1547 if (errno == EINTR)
1548 break;
1549 log_err("fio: poll: %s\n", strerror(errno));
1550 break;
1551 } else if (!ret) {
1552 fio_server_check_conns(&conn_list);
1553 continue;
1554 }
1555
1556 if (pfd.revents & POLLIN)
1557 break;
1558 } while (!exit_backend);
1559
1560 fio_server_check_conns(&conn_list);
1561
1562 if (exit_backend || ret < 0)
1563 break;
1564
1565 if (use_ipv6)
1566 sk = accept(listen_sk, (struct sockaddr *) &addr6, &len);
1567 else
1568 sk = accept(listen_sk, (struct sockaddr *) &addr, &len);
1569
1570 if (sk < 0) {
1571 log_err("fio: accept: %s\n", strerror(errno));
1572 return -1;
1573 }
1574
1575 if (use_ipv6)
1576 from = inet_ntop(AF_INET6, (struct sockaddr *) &addr6.sin6_addr, buf, sizeof(buf));
1577 else
1578 from = inet_ntop(AF_INET, (struct sockaddr *) &addr.sin_addr, buf, sizeof(buf));
1579
1580 dprint(FD_NET, "server: connect from %s\n", from);
1581
1582 sk_out = scalloc(1, sizeof(*sk_out));
1583 if (!sk_out) {
1584 close(sk);
1585 return -1;
1586 }
1587
1588 sk_out->sk = sk;
1589 INIT_FLIST_HEAD(&sk_out->list);
1590 __fio_sem_init(&sk_out->lock, FIO_SEM_UNLOCKED);
1591 __fio_sem_init(&sk_out->wait, FIO_SEM_LOCKED);
1592 __fio_sem_init(&sk_out->xmit, FIO_SEM_UNLOCKED);
1593
1594#ifdef WIN32
1595 hProcess = windows_handle_connection(hjob, sk);
1596 if (hProcess == INVALID_HANDLE_VALUE)
1597 return -1;
1598 sk_out->hProcess = hProcess;
1599 fio_server_add_conn_pid(&conn_list, hProcess);
1600#else
1601 pid = fork();
1602 if (pid) {
1603 close(sk);
1604 fio_server_add_conn_pid(&conn_list, pid);
1605 continue;
1606 }
1607
1608 /* if error, it's already logged, non-fatal */
1609 get_my_addr_str(sk);
1610
1611 /*
1612 * Assign sk_out here, it'll be dropped in handle_connection()
1613 * since that function calls _exit() when done
1614 */
1615 sk_out_assign(sk_out);
1616 handle_connection(sk_out);
1617#endif
1618 }
1619
1620 return exitval;
1621}
1622
1623int fio_server_text_output(int level, const char *buf, size_t len)
1624{
1625 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
1626 struct cmd_text_pdu *pdu;
1627 unsigned int tlen;
1628 struct timeval tv;
1629
1630 if (!sk_out || sk_out->sk == -1)
1631 return -1;
1632
1633 tlen = sizeof(*pdu) + len;
1634 pdu = malloc(tlen);
1635
1636 pdu->level = __cpu_to_le32(level);
1637 pdu->buf_len = __cpu_to_le32(len);
1638
1639 gettimeofday(&tv, NULL);
1640 pdu->log_sec = __cpu_to_le64(tv.tv_sec);
1641 pdu->log_usec = __cpu_to_le64(tv.tv_usec);
1642
1643 memcpy(pdu->buf, buf, len);
1644
1645 fio_net_queue_cmd(FIO_NET_CMD_TEXT, pdu, tlen, NULL, SK_F_COPY);
1646 free(pdu);
1647 return len;
1648}
1649
1650static void convert_io_stat(struct io_stat *dst, struct io_stat *src)
1651{
1652 dst->max_val = cpu_to_le64(src->max_val);
1653 dst->min_val = cpu_to_le64(src->min_val);
1654 dst->samples = cpu_to_le64(src->samples);
1655
1656 /*
1657 * Encode to IEEE 754 for network transfer
1658 */
1659 dst->mean.u.i = cpu_to_le64(fio_double_to_uint64(src->mean.u.f));
1660 dst->S.u.i = cpu_to_le64(fio_double_to_uint64(src->S.u.f));
1661}
1662
1663static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src)
1664{
1665 int i;
1666
1667 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1668 dst->max_run[i] = cpu_to_le64(src->max_run[i]);
1669 dst->min_run[i] = cpu_to_le64(src->min_run[i]);
1670 dst->max_bw[i] = cpu_to_le64(src->max_bw[i]);
1671 dst->min_bw[i] = cpu_to_le64(src->min_bw[i]);
1672 dst->iobytes[i] = cpu_to_le64(src->iobytes[i]);
1673 dst->agg[i] = cpu_to_le64(src->agg[i]);
1674 }
1675
1676 dst->kb_base = cpu_to_le32(src->kb_base);
1677 dst->unit_base = cpu_to_le32(src->unit_base);
1678 dst->groupid = cpu_to_le32(src->groupid);
1679 dst->unified_rw_rep = cpu_to_le32(src->unified_rw_rep);
1680 dst->sig_figs = cpu_to_le32(src->sig_figs);
1681}
1682
1683/*
1684 * Send a CMD_TS, which packs struct thread_stat and group_run_stats
1685 * into a single payload.
1686 */
1687void fio_server_send_ts(struct thread_stat *ts, struct group_run_stats *rs)
1688{
1689 struct cmd_ts_pdu p;
1690 int i, j, k;
1691 size_t clat_prio_stats_extra_size = 0;
1692 size_t ss_extra_size = 0;
1693 size_t extended_buf_size = 0;
1694 void *extended_buf;
1695 void *extended_buf_wp;
1696
1697 dprint(FD_NET, "server sending end stats\n");
1698
1699 memset(&p, 0, sizeof(p));
1700
1701 snprintf(p.ts.name, sizeof(p.ts.name), "%s", ts->name);
1702 snprintf(p.ts.verror, sizeof(p.ts.verror), "%s", ts->verror);
1703 snprintf(p.ts.description, sizeof(p.ts.description), "%s",
1704 ts->description);
1705
1706 p.ts.error = cpu_to_le32(ts->error);
1707 p.ts.thread_number = cpu_to_le32(ts->thread_number);
1708 p.ts.groupid = cpu_to_le32(ts->groupid);
1709 p.ts.job_start = cpu_to_le64(ts->job_start);
1710 p.ts.pid = cpu_to_le32(ts->pid);
1711 p.ts.members = cpu_to_le32(ts->members);
1712 p.ts.unified_rw_rep = cpu_to_le32(ts->unified_rw_rep);
1713 p.ts.ioprio = cpu_to_le32(ts->ioprio);
1714 p.ts.disable_prio_stat = cpu_to_le32(ts->disable_prio_stat);
1715
1716 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1717 convert_io_stat(&p.ts.clat_stat[i], &ts->clat_stat[i]);
1718 convert_io_stat(&p.ts.slat_stat[i], &ts->slat_stat[i]);
1719 convert_io_stat(&p.ts.lat_stat[i], &ts->lat_stat[i]);
1720 convert_io_stat(&p.ts.bw_stat[i], &ts->bw_stat[i]);
1721 convert_io_stat(&p.ts.iops_stat[i], &ts->iops_stat[i]);
1722 }
1723 convert_io_stat(&p.ts.sync_stat, &ts->sync_stat);
1724
1725 p.ts.usr_time = cpu_to_le64(ts->usr_time);
1726 p.ts.sys_time = cpu_to_le64(ts->sys_time);
1727 p.ts.ctx = cpu_to_le64(ts->ctx);
1728 p.ts.minf = cpu_to_le64(ts->minf);
1729 p.ts.majf = cpu_to_le64(ts->majf);
1730 p.ts.clat_percentiles = cpu_to_le32(ts->clat_percentiles);
1731 p.ts.lat_percentiles = cpu_to_le32(ts->lat_percentiles);
1732 p.ts.slat_percentiles = cpu_to_le32(ts->slat_percentiles);
1733 p.ts.percentile_precision = cpu_to_le64(ts->percentile_precision);
1734
1735 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
1736 fio_fp64_t *src = &ts->percentile_list[i];
1737 fio_fp64_t *dst = &p.ts.percentile_list[i];
1738
1739 dst->u.i = cpu_to_le64(fio_double_to_uint64(src->u.f));
1740 }
1741
1742 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
1743 p.ts.io_u_map[i] = cpu_to_le64(ts->io_u_map[i]);
1744 p.ts.io_u_submit[i] = cpu_to_le64(ts->io_u_submit[i]);
1745 p.ts.io_u_complete[i] = cpu_to_le64(ts->io_u_complete[i]);
1746 }
1747
1748 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
1749 p.ts.io_u_lat_n[i] = cpu_to_le64(ts->io_u_lat_n[i]);
1750 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1751 p.ts.io_u_lat_u[i] = cpu_to_le64(ts->io_u_lat_u[i]);
1752 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1753 p.ts.io_u_lat_m[i] = cpu_to_le64(ts->io_u_lat_m[i]);
1754
1755 for (i = 0; i < FIO_LAT_CNT; i++)
1756 for (j = 0; j < DDIR_RWDIR_CNT; j++)
1757 for (k = 0; k < FIO_IO_U_PLAT_NR; k++)
1758 p.ts.io_u_plat[i][j][k] = cpu_to_le64(ts->io_u_plat[i][j][k]);
1759
1760 for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
1761 p.ts.io_u_sync_plat[j] = cpu_to_le64(ts->io_u_sync_plat[j]);
1762
1763 for (i = 0; i < DDIR_RWDIR_SYNC_CNT; i++)
1764 p.ts.total_io_u[i] = cpu_to_le64(ts->total_io_u[i]);
1765
1766 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1767 p.ts.short_io_u[i] = cpu_to_le64(ts->short_io_u[i]);
1768 p.ts.drop_io_u[i] = cpu_to_le64(ts->drop_io_u[i]);
1769 }
1770
1771 p.ts.total_submit = cpu_to_le64(ts->total_submit);
1772 p.ts.total_complete = cpu_to_le64(ts->total_complete);
1773 p.ts.nr_zone_resets = cpu_to_le64(ts->nr_zone_resets);
1774
1775 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1776 p.ts.io_bytes[i] = cpu_to_le64(ts->io_bytes[i]);
1777 p.ts.runtime[i] = cpu_to_le64(ts->runtime[i]);
1778 }
1779
1780 p.ts.total_run_time = cpu_to_le64(ts->total_run_time);
1781 p.ts.continue_on_error = cpu_to_le16(ts->continue_on_error);
1782 p.ts.total_err_count = cpu_to_le64(ts->total_err_count);
1783 p.ts.first_error = cpu_to_le32(ts->first_error);
1784 p.ts.kb_base = cpu_to_le32(ts->kb_base);
1785 p.ts.unit_base = cpu_to_le32(ts->unit_base);
1786
1787 p.ts.latency_depth = cpu_to_le32(ts->latency_depth);
1788 p.ts.latency_target = cpu_to_le64(ts->latency_target);
1789 p.ts.latency_window = cpu_to_le64(ts->latency_window);
1790 p.ts.latency_percentile.u.i = cpu_to_le64(fio_double_to_uint64(ts->latency_percentile.u.f));
1791
1792 p.ts.sig_figs = cpu_to_le32(ts->sig_figs);
1793
1794 p.ts.nr_block_infos = cpu_to_le64(ts->nr_block_infos);
1795 for (i = 0; i < p.ts.nr_block_infos; i++)
1796 p.ts.block_infos[i] = cpu_to_le32(ts->block_infos[i]);
1797
1798 p.ts.ss_dur = cpu_to_le64(ts->ss_dur);
1799 p.ts.ss_state = cpu_to_le32(ts->ss_state);
1800 p.ts.ss_head = cpu_to_le32(ts->ss_head);
1801 p.ts.ss_limit.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_limit.u.f));
1802 p.ts.ss_slope.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_slope.u.f));
1803 p.ts.ss_deviation.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_deviation.u.f));
1804 p.ts.ss_criterion.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_criterion.u.f));
1805
1806 p.ts.cachehit = cpu_to_le64(ts->cachehit);
1807 p.ts.cachemiss = cpu_to_le64(ts->cachemiss);
1808
1809 convert_gs(&p.rs, rs);
1810
1811 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1812 if (ts->nr_clat_prio[i])
1813 clat_prio_stats_extra_size += ts->nr_clat_prio[i] * sizeof(*ts->clat_prio[i]);
1814 }
1815 extended_buf_size += clat_prio_stats_extra_size;
1816
1817 dprint(FD_NET, "ts->ss_state = %d\n", ts->ss_state);
1818 if (ts->ss_state & FIO_SS_DATA)
1819 ss_extra_size = 2 * ts->ss_dur * sizeof(uint64_t);
1820
1821 extended_buf_size += ss_extra_size;
1822 if (!extended_buf_size) {
1823 fio_net_queue_cmd(FIO_NET_CMD_TS, &p, sizeof(p), NULL, SK_F_COPY);
1824 return;
1825 }
1826
1827 extended_buf_size += sizeof(p);
1828 extended_buf = calloc(1, extended_buf_size);
1829 if (!extended_buf) {
1830 log_err("fio: failed to allocate FIO_NET_CMD_TS buffer\n");
1831 return;
1832 }
1833
1834 memcpy(extended_buf, &p, sizeof(p));
1835 extended_buf_wp = (struct cmd_ts_pdu *)extended_buf + 1;
1836
1837 if (clat_prio_stats_extra_size) {
1838 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1839 struct clat_prio_stat *prio = (struct clat_prio_stat *) extended_buf_wp;
1840
1841 for (j = 0; j < ts->nr_clat_prio[i]; j++) {
1842 for (k = 0; k < FIO_IO_U_PLAT_NR; k++)
1843 prio->io_u_plat[k] =
1844 cpu_to_le64(ts->clat_prio[i][j].io_u_plat[k]);
1845 convert_io_stat(&prio->clat_stat,
1846 &ts->clat_prio[i][j].clat_stat);
1847 prio->ioprio = cpu_to_le32(ts->clat_prio[i][j].ioprio);
1848 prio++;
1849 }
1850
1851 if (ts->nr_clat_prio[i]) {
1852 uint64_t offset = (char *)extended_buf_wp - (char *)extended_buf;
1853 struct cmd_ts_pdu *ptr = extended_buf;
1854
1855 ptr->ts.clat_prio_offset[i] = cpu_to_le64(offset);
1856 ptr->ts.nr_clat_prio[i] = cpu_to_le32(ts->nr_clat_prio[i]);
1857 }
1858
1859 extended_buf_wp = prio;
1860 }
1861 }
1862
1863 if (ss_extra_size) {
1864 uint64_t *ss_iops, *ss_bw;
1865 uint64_t offset;
1866 struct cmd_ts_pdu *ptr = extended_buf;
1867
1868 dprint(FD_NET, "server sending steadystate ring buffers\n");
1869
1870 /* ss iops */
1871 ss_iops = (uint64_t *) extended_buf_wp;
1872 for (i = 0; i < ts->ss_dur; i++)
1873 ss_iops[i] = cpu_to_le64(ts->ss_iops_data[i]);
1874
1875 offset = (char *)extended_buf_wp - (char *)extended_buf;
1876 ptr->ts.ss_iops_data_offset = cpu_to_le64(offset);
1877 extended_buf_wp = ss_iops + (int) ts->ss_dur;
1878
1879 /* ss bw */
1880 ss_bw = extended_buf_wp;
1881 for (i = 0; i < ts->ss_dur; i++)
1882 ss_bw[i] = cpu_to_le64(ts->ss_bw_data[i]);
1883
1884 offset = (char *)extended_buf_wp - (char *)extended_buf;
1885 ptr->ts.ss_bw_data_offset = cpu_to_le64(offset);
1886 }
1887
1888 fio_net_queue_cmd(FIO_NET_CMD_TS, extended_buf, extended_buf_size, NULL, SK_F_COPY);
1889 free(extended_buf);
1890}
1891
1892void fio_server_send_gs(struct group_run_stats *rs)
1893{
1894 struct group_run_stats gs;
1895
1896 dprint(FD_NET, "server sending group run stats\n");
1897
1898 convert_gs(&gs, rs);
1899 fio_net_queue_cmd(FIO_NET_CMD_GS, &gs, sizeof(gs), NULL, SK_F_COPY);
1900}
1901
1902void fio_server_send_job_options(struct flist_head *opt_list,
1903 unsigned int gid)
1904{
1905 struct cmd_job_option pdu;
1906 struct flist_head *entry;
1907
1908 if (flist_empty(opt_list))
1909 return;
1910
1911 flist_for_each(entry, opt_list) {
1912 struct print_option *p;
1913 size_t len;
1914
1915 p = flist_entry(entry, struct print_option, list);
1916 memset(&pdu, 0, sizeof(pdu));
1917
1918 if (gid == -1U) {
1919 pdu.global = __cpu_to_le16(1);
1920 pdu.groupid = 0;
1921 } else {
1922 pdu.global = 0;
1923 pdu.groupid = cpu_to_le32(gid);
1924 }
1925 len = strlen(p->name);
1926 if (len >= sizeof(pdu.name)) {
1927 len = sizeof(pdu.name) - 1;
1928 pdu.truncated = __cpu_to_le16(1);
1929 }
1930 memcpy(pdu.name, p->name, len);
1931 if (p->value) {
1932 len = strlen(p->value);
1933 if (len >= sizeof(pdu.value)) {
1934 len = sizeof(pdu.value) - 1;
1935 pdu.truncated = __cpu_to_le16(1);
1936 }
1937 memcpy(pdu.value, p->value, len);
1938 }
1939 fio_net_queue_cmd(FIO_NET_CMD_JOB_OPT, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1940 }
1941}
1942
1943static void convert_agg(struct disk_util_agg *dst, struct disk_util_agg *src)
1944{
1945 int i;
1946
1947 for (i = 0; i < 2; i++) {
1948 dst->ios[i] = cpu_to_le64(src->ios[i]);
1949 dst->merges[i] = cpu_to_le64(src->merges[i]);
1950 dst->sectors[i] = cpu_to_le64(src->sectors[i]);
1951 dst->ticks[i] = cpu_to_le64(src->ticks[i]);
1952 }
1953
1954 dst->io_ticks = cpu_to_le64(src->io_ticks);
1955 dst->time_in_queue = cpu_to_le64(src->time_in_queue);
1956 dst->slavecount = cpu_to_le32(src->slavecount);
1957 dst->max_util.u.i = cpu_to_le64(fio_double_to_uint64(src->max_util.u.f));
1958}
1959
1960static void convert_dus(struct disk_util_stat *dst, struct disk_util_stat *src)
1961{
1962 int i;
1963
1964 snprintf((char *) dst->name, sizeof(dst->name), "%s", src->name);
1965
1966 for (i = 0; i < 2; i++) {
1967 dst->s.ios[i] = cpu_to_le64(src->s.ios[i]);
1968 dst->s.merges[i] = cpu_to_le64(src->s.merges[i]);
1969 dst->s.sectors[i] = cpu_to_le64(src->s.sectors[i]);
1970 dst->s.ticks[i] = cpu_to_le64(src->s.ticks[i]);
1971 }
1972
1973 dst->s.io_ticks = cpu_to_le64(src->s.io_ticks);
1974 dst->s.time_in_queue = cpu_to_le64(src->s.time_in_queue);
1975 dst->s.msec = cpu_to_le64(src->s.msec);
1976}
1977
1978void fio_server_send_du(void)
1979{
1980 struct disk_util *du;
1981 struct flist_head *entry;
1982 struct cmd_du_pdu pdu;
1983
1984 dprint(FD_NET, "server: sending disk_util %d\n", !flist_empty(&disk_list));
1985
1986 memset(&pdu, 0, sizeof(pdu));
1987
1988 flist_for_each(entry, &disk_list) {
1989 du = flist_entry(entry, struct disk_util, list);
1990
1991 convert_dus(&pdu.dus, &du->dus);
1992 convert_agg(&pdu.agg, &du->agg);
1993
1994 fio_net_queue_cmd(FIO_NET_CMD_DU, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1995 }
1996}
1997
1998#ifdef CONFIG_ZLIB
1999
2000static inline void __fio_net_prep_tail(z_stream *stream, void *out_pdu,
2001 struct sk_entry **last_entry,
2002 struct sk_entry *first)
2003{
2004 unsigned int this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
2005
2006 *last_entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
2007 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
2008 if (*last_entry)
2009 flist_add_tail(&(*last_entry)->list, &first->next);
2010}
2011
2012/*
2013 * Deflates the next input given, creating as many new packets in the
2014 * linked list as necessary.
2015 */
2016static int __deflate_pdu_buffer(void *next_in, unsigned int next_sz, void **out_pdu,
2017 struct sk_entry **last_entry, z_stream *stream,
2018 struct sk_entry *first)
2019{
2020 int ret;
2021
2022 stream->next_in = next_in;
2023 stream->avail_in = next_sz;
2024 do {
2025 if (!stream->avail_out) {
2026 __fio_net_prep_tail(stream, *out_pdu, last_entry, first);
2027 if (*last_entry == NULL)
2028 return 1;
2029
2030 *out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
2031
2032 stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
2033 stream->next_out = *out_pdu;
2034 }
2035
2036 ret = deflate(stream, Z_BLOCK);
2037
2038 if (ret < 0) {
2039 free(*out_pdu);
2040 return 1;
2041 }
2042 } while (stream->avail_in);
2043
2044 return 0;
2045}
2046
2047static int __fio_append_iolog_gz_hist(struct sk_entry *first, struct io_log *log,
2048 struct io_logs *cur_log, z_stream *stream)
2049{
2050 struct sk_entry *entry;
2051 void *out_pdu;
2052 int ret, i, j;
2053 int sample_sz = log_entry_sz(log);
2054
2055 out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
2056 stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
2057 stream->next_out = out_pdu;
2058
2059 for (i = 0; i < cur_log->nr_samples; i++) {
2060 struct io_sample *s;
2061 struct io_u_plat_entry *cur_plat_entry, *prev_plat_entry;
2062 uint64_t *cur_plat, *prev_plat;
2063
2064 s = get_sample(log, cur_log, i);
2065 ret = __deflate_pdu_buffer(s, sample_sz, &out_pdu, &entry, stream, first);
2066 if (ret)
2067 return ret;
2068
2069 /* Do the subtraction on server side so that client doesn't have to
2070 * reconstruct our linked list from packets.
2071 */
2072 cur_plat_entry = s->data.plat_entry;
2073 prev_plat_entry = flist_first_entry(&cur_plat_entry->list, struct io_u_plat_entry, list);
2074 cur_plat = cur_plat_entry->io_u_plat;
2075 prev_plat = prev_plat_entry->io_u_plat;
2076
2077 for (j = 0; j < FIO_IO_U_PLAT_NR; j++) {
2078 cur_plat[j] -= prev_plat[j];
2079 }
2080
2081 flist_del(&prev_plat_entry->list);
2082 free(prev_plat_entry);
2083
2084 ret = __deflate_pdu_buffer(cur_plat_entry, sizeof(*cur_plat_entry),
2085 &out_pdu, &entry, stream, first);
2086
2087 if (ret)
2088 return ret;
2089 }
2090
2091 __fio_net_prep_tail(stream, out_pdu, &entry, first);
2092 return entry == NULL;
2093}
2094
2095static int __fio_append_iolog_gz(struct sk_entry *first, struct io_log *log,
2096 struct io_logs *cur_log, z_stream *stream)
2097{
2098 unsigned int this_len;
2099 void *out_pdu;
2100 int ret;
2101
2102 if (log->log_type == IO_LOG_TYPE_HIST)
2103 return __fio_append_iolog_gz_hist(first, log, cur_log, stream);
2104
2105 stream->next_in = (void *) cur_log->log;
2106 stream->avail_in = cur_log->nr_samples * log_entry_sz(log);
2107
2108 do {
2109 struct sk_entry *entry;
2110
2111 /*
2112 * Dirty - since the log is potentially huge, compress it into
2113 * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
2114 * side defragment it.
2115 */
2116 out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
2117
2118 stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
2119 stream->next_out = out_pdu;
2120 ret = deflate(stream, Z_BLOCK);
2121 /* may be Z_OK, or Z_STREAM_END */
2122 if (ret < 0) {
2123 free(out_pdu);
2124 return 1;
2125 }
2126
2127 this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
2128
2129 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
2130 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
2131 if (!entry) {
2132 free(out_pdu);
2133 return 1;
2134 }
2135 flist_add_tail(&entry->list, &first->next);
2136 } while (stream->avail_in);
2137
2138 return 0;
2139}
2140
2141static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
2142{
2143 z_stream stream = {
2144 .zalloc = Z_NULL,
2145 .zfree = Z_NULL,
2146 .opaque = Z_NULL,
2147 };
2148 int ret = 0;
2149
2150 if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK)
2151 return 1;
2152
2153 while (!flist_empty(&log->io_logs)) {
2154 struct io_logs *cur_log;
2155
2156 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
2157 flist_del_init(&cur_log->list);
2158
2159 ret = __fio_append_iolog_gz(first, log, cur_log, &stream);
2160 if (ret)
2161 break;
2162 }
2163
2164 ret = deflate(&stream, Z_FINISH);
2165
2166 while (ret != Z_STREAM_END) {
2167 struct sk_entry *entry;
2168 unsigned int this_len;
2169 void *out_pdu;
2170
2171 out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
2172 stream.avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
2173 stream.next_out = out_pdu;
2174
2175 ret = deflate(&stream, Z_FINISH);
2176 /* may be Z_OK, or Z_STREAM_END */
2177 if (ret < 0) {
2178 free(out_pdu);
2179 break;
2180 }
2181
2182 this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream.avail_out;
2183
2184 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
2185 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
2186 if (!entry) {
2187 free(out_pdu);
2188 break;
2189 }
2190 flist_add_tail(&entry->list, &first->next);
2191 }
2192
2193 ret = deflateEnd(&stream);
2194 if (ret == Z_OK)
2195 return 0;
2196
2197 return 1;
2198}
2199#else
2200static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
2201{
2202 return 1;
2203}
2204#endif
2205
2206static int fio_append_gz_chunks(struct sk_entry *first, struct io_log *log)
2207{
2208 struct sk_entry *entry;
2209 struct flist_head *node;
2210 int ret = 0;
2211
2212 pthread_mutex_lock(&log->chunk_lock);
2213 flist_for_each(node, &log->chunk_list) {
2214 struct iolog_compress *c;
2215
2216 c = flist_entry(node, struct iolog_compress, list);
2217 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, c->buf, c->len,
2218 NULL, SK_F_VEC | SK_F_INLINE);
2219 if (!entry) {
2220 ret = 1;
2221 break;
2222 }
2223 flist_add_tail(&entry->list, &first->next);
2224 }
2225 pthread_mutex_unlock(&log->chunk_lock);
2226 return ret;
2227}
2228
2229static int fio_append_text_log(struct sk_entry *first, struct io_log *log)
2230{
2231 struct sk_entry *entry;
2232 int ret = 0;
2233
2234 while (!flist_empty(&log->io_logs)) {
2235 struct io_logs *cur_log;
2236 size_t size;
2237
2238 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
2239 flist_del_init(&cur_log->list);
2240
2241 size = cur_log->nr_samples * log_entry_sz(log);
2242
2243 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, cur_log->log, size,
2244 NULL, SK_F_VEC | SK_F_INLINE);
2245 if (!entry) {
2246 ret = 1;
2247 break;
2248 }
2249 flist_add_tail(&entry->list, &first->next);
2250 }
2251
2252 return ret;
2253}
2254
2255int fio_send_iolog(struct thread_data *td, struct io_log *log, const char *name)
2256{
2257 struct cmd_iolog_pdu pdu = {
2258 .nr_samples = cpu_to_le64(iolog_nr_samples(log)),
2259 .thread_number = cpu_to_le32(td->thread_number),
2260 .log_type = cpu_to_le32(log->log_type),
2261 .log_hist_coarseness = cpu_to_le32(log->hist_coarseness),
2262 .per_job_logs = cpu_to_le32(td->o.per_job_logs),
2263 };
2264 struct sk_entry *first;
2265 struct flist_head *entry;
2266 int ret = 0;
2267
2268 if (!flist_empty(&log->chunk_list))
2269 pdu.compressed = __cpu_to_le32(STORE_COMPRESSED);
2270 else if (use_zlib)
2271 pdu.compressed = __cpu_to_le32(XMIT_COMPRESSED);
2272 else
2273 pdu.compressed = 0;
2274
2275 snprintf((char *) pdu.name, sizeof(pdu.name), "%s", name);
2276
2277 /*
2278 * We can't do this for a pre-compressed log, but for that case,
2279 * log->nr_samples is zero anyway.
2280 */
2281 flist_for_each(entry, &log->io_logs) {
2282 struct io_logs *cur_log;
2283 int i;
2284
2285 cur_log = flist_entry(entry, struct io_logs, list);
2286
2287 for (i = 0; i < cur_log->nr_samples; i++) {
2288 struct io_sample *s = get_sample(log, cur_log, i);
2289
2290 s->time = cpu_to_le64(s->time);
2291 if (log->log_type != IO_LOG_TYPE_HIST) {
2292 s->data.val.val0 = cpu_to_le64(s->data.val.val0);
2293 s->data.val.val1 = cpu_to_le64(s->data.val.val1);
2294 }
2295 s->__ddir = __cpu_to_le32(s->__ddir);
2296 s->bs = cpu_to_le64(s->bs);
2297
2298 if (log->log_offset) {
2299 struct io_sample_offset *so = (void *) s;
2300
2301 so->offset = cpu_to_le64(so->offset);
2302 }
2303 }
2304 }
2305
2306 /*
2307 * Assemble header entry first
2308 */
2309 first = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, &pdu, sizeof(pdu), NULL, SK_F_VEC | SK_F_INLINE | SK_F_COPY);
2310 if (!first)
2311 return 1;
2312
2313 /*
2314 * Now append actual log entries. If log compression was enabled on
2315 * the job, just send out the compressed chunks directly. If we
2316 * have a plain log, compress if we can, then send. Otherwise, send
2317 * the plain text output.
2318 */
2319 if (!flist_empty(&log->chunk_list))
2320 ret = fio_append_gz_chunks(first, log);
2321 else if (use_zlib)
2322 ret = fio_append_iolog_gz(first, log);
2323 else
2324 ret = fio_append_text_log(first, log);
2325
2326 fio_net_queue_entry(first);
2327 return ret;
2328}
2329
2330void fio_server_send_add_job(struct thread_data *td)
2331{
2332 struct cmd_add_job_pdu *pdu;
2333 size_t cmd_sz = offsetof(struct cmd_add_job_pdu, top) +
2334 thread_options_pack_size(&td->o);
2335
2336 pdu = malloc(cmd_sz);
2337 pdu->thread_number = cpu_to_le32(td->thread_number);
2338 pdu->groupid = cpu_to_le32(td->groupid);
2339
2340 convert_thread_options_to_net(&pdu->top, &td->o);
2341
2342 fio_net_queue_cmd(FIO_NET_CMD_ADD_JOB, pdu, cmd_sz, NULL, SK_F_COPY);
2343 free(pdu);
2344}
2345
2346void fio_server_send_start(struct thread_data *td)
2347{
2348 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
2349
2350 if (sk_out->sk == -1) {
2351 log_err("pthread getting specific for key failed, sk_out %p, sk %i, err: %i:%s",
2352 sk_out, sk_out->sk, errno, strerror(errno));
2353 abort();
2354 }
2355
2356 fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, NULL, SK_F_SIMPLE);
2357}
2358
2359int fio_server_get_verify_state(const char *name, int threadnumber,
2360 void **datap)
2361{
2362 struct thread_io_list *s;
2363 struct cmd_sendfile out;
2364 struct cmd_reply *rep;
2365 uint64_t tag;
2366 void *data;
2367 int ret;
2368
2369 dprint(FD_NET, "server: request verify state\n");
2370
2371 rep = smalloc(sizeof(*rep));
2372 if (!rep)
2373 return ENOMEM;
2374
2375 __fio_sem_init(&rep->lock, FIO_SEM_LOCKED);
2376 rep->data = NULL;
2377 rep->error = 0;
2378
2379 verify_state_gen_name((char *) out.path, sizeof(out.path), name, me,
2380 threadnumber);
2381 tag = (uint64_t) (uintptr_t) rep;
2382 fio_net_queue_cmd(FIO_NET_CMD_SENDFILE, &out, sizeof(out), &tag,
2383 SK_F_COPY);
2384
2385 /*
2386 * Wait for the backend to receive the reply
2387 */
2388 if (fio_sem_down_timeout(&rep->lock, 10000)) {
2389 log_err("fio: timed out waiting for reply\n");
2390 ret = ETIMEDOUT;
2391 goto fail;
2392 }
2393
2394 if (rep->error) {
2395 log_err("fio: failure on receiving state file %s: %s\n",
2396 out.path, strerror(rep->error));
2397 ret = rep->error;
2398fail:
2399 *datap = NULL;
2400 sfree(rep);
2401 fio_net_queue_quit();
2402 return ret;
2403 }
2404
2405 /*
2406 * The format is verify_state_hdr, then thread_io_list. Verify
2407 * the header, and the thread_io_list checksum
2408 */
2409 s = rep->data + sizeof(struct verify_state_hdr);
2410 if (verify_state_hdr(rep->data, s)) {
2411 ret = EILSEQ;
2412 goto fail;
2413 }
2414
2415 /*
2416 * Don't need the header from now, copy just the thread_io_list
2417 */
2418 ret = 0;
2419 rep->size -= sizeof(struct verify_state_hdr);
2420 data = malloc(rep->size);
2421 memcpy(data, s, rep->size);
2422 *datap = data;
2423
2424 sfree(rep->data);
2425 __fio_sem_remove(&rep->lock);
2426 sfree(rep);
2427 return ret;
2428}
2429
2430static int fio_init_server_ip(void)
2431{
2432 struct sockaddr *addr;
2433 socklen_t socklen;
2434 char buf[80];
2435 const char *str;
2436 int sk, opt;
2437
2438 if (use_ipv6)
2439 sk = socket(AF_INET6, SOCK_STREAM, 0);
2440 else
2441 sk = socket(AF_INET, SOCK_STREAM, 0);
2442
2443 if (sk < 0) {
2444 log_err("fio: socket: %s\n", strerror(errno));
2445 return -1;
2446 }
2447
2448 opt = 1;
2449 if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, (void *)&opt, sizeof(opt)) < 0) {
2450 log_err("fio: setsockopt(REUSEADDR): %s\n", strerror(errno));
2451 close(sk);
2452 return -1;
2453 }
2454#ifdef SO_REUSEPORT
2455 /*
2456 * Not fatal if fails, so just ignore it if that happens
2457 */
2458 if (setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt))) {
2459 }
2460#endif
2461
2462 if (use_ipv6) {
2463 void *src = &saddr_in6.sin6_addr;
2464
2465 addr = (struct sockaddr *) &saddr_in6;
2466 socklen = sizeof(saddr_in6);
2467 saddr_in6.sin6_family = AF_INET6;
2468 str = inet_ntop(AF_INET6, src, buf, sizeof(buf));
2469 } else {
2470 void *src = &saddr_in.sin_addr;
2471
2472 addr = (struct sockaddr *) &saddr_in;
2473 socklen = sizeof(saddr_in);
2474 saddr_in.sin_family = AF_INET;
2475 str = inet_ntop(AF_INET, src, buf, sizeof(buf));
2476 }
2477
2478 if (bind(sk, addr, socklen) < 0) {
2479 log_err("fio: bind: %s\n", strerror(errno));
2480 log_info("fio: failed with IPv%c %s\n", use_ipv6 ? '6' : '4', str);
2481 close(sk);
2482 return -1;
2483 }
2484
2485 return sk;
2486}
2487
2488static int fio_init_server_sock(void)
2489{
2490 struct sockaddr_un addr;
2491 socklen_t len;
2492 mode_t mode;
2493 int sk;
2494
2495 sk = socket(AF_UNIX, SOCK_STREAM, 0);
2496 if (sk < 0) {
2497 log_err("fio: socket: %s\n", strerror(errno));
2498 return -1;
2499 }
2500
2501 mode = umask(000);
2502
2503 addr.sun_family = AF_UNIX;
2504 snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", bind_sock);
2505
2506 len = sizeof(addr.sun_family) + strlen(bind_sock) + 1;
2507
2508 if (bind(sk, (struct sockaddr *) &addr, len) < 0) {
2509 log_err("fio: bind: %s\n", strerror(errno));
2510 close(sk);
2511 return -1;
2512 }
2513
2514 umask(mode);
2515 return sk;
2516}
2517
2518static int fio_init_server_connection(void)
2519{
2520 char bind_str[128];
2521 int sk;
2522
2523 dprint(FD_NET, "starting server\n");
2524
2525 if (!bind_sock)
2526 sk = fio_init_server_ip();
2527 else
2528 sk = fio_init_server_sock();
2529
2530 if (sk < 0)
2531 return sk;
2532
2533 memset(bind_str, 0, sizeof(bind_str));
2534
2535 if (!bind_sock) {
2536 char *p, port[16];
2537 void *src;
2538 int af;
2539
2540 if (use_ipv6) {
2541 af = AF_INET6;
2542 src = &saddr_in6.sin6_addr;
2543 } else {
2544 af = AF_INET;
2545 src = &saddr_in.sin_addr;
2546 }
2547
2548 p = (char *) inet_ntop(af, src, bind_str, sizeof(bind_str));
2549
2550 sprintf(port, ",%u", fio_net_port);
2551 if (p)
2552 strcat(p, port);
2553 else
2554 snprintf(bind_str, sizeof(bind_str), "%s", port);
2555 } else
2556 snprintf(bind_str, sizeof(bind_str), "%s", bind_sock);
2557
2558 log_info("fio: server listening on %s\n", bind_str);
2559
2560 if (listen(sk, 4) < 0) {
2561 log_err("fio: listen: %s\n", strerror(errno));
2562 close(sk);
2563 return -1;
2564 }
2565
2566 return sk;
2567}
2568
2569int fio_server_parse_host(const char *host, int ipv6, struct in_addr *inp,
2570 struct in6_addr *inp6)
2571
2572{
2573 int ret = 0;
2574
2575 if (ipv6)
2576 ret = inet_pton(AF_INET6, host, inp6);
2577 else
2578 ret = inet_pton(AF_INET, host, inp);
2579
2580 if (ret != 1) {
2581 struct addrinfo *res, hints = {
2582 .ai_family = ipv6 ? AF_INET6 : AF_INET,
2583 .ai_socktype = SOCK_STREAM,
2584 };
2585
2586 ret = getaddrinfo(host, NULL, &hints, &res);
2587 if (ret) {
2588 log_err("fio: failed to resolve <%s> (%s)\n", host,
2589 gai_strerror(ret));
2590 return 1;
2591 }
2592
2593 if (ipv6)
2594 memcpy(inp6, &((struct sockaddr_in6 *) res->ai_addr)->sin6_addr, sizeof(*inp6));
2595 else
2596 memcpy(inp, &((struct sockaddr_in *) res->ai_addr)->sin_addr, sizeof(*inp));
2597
2598 ret = 1;
2599 freeaddrinfo(res);
2600 }
2601
2602 return !(ret == 1);
2603}
2604
2605/*
2606 * Parse a host/ip/port string. Reads from 'str'.
2607 *
2608 * Outputs:
2609 *
2610 * For IPv4:
2611 * *ptr is the host, *port is the port, inp is the destination.
2612 * For IPv6:
2613 * *ptr is the host, *port is the port, inp6 is the dest, and *ipv6 is 1.
2614 * For local domain sockets:
2615 * *ptr is the filename, *is_sock is 1.
2616 */
2617int fio_server_parse_string(const char *str, char **ptr, bool *is_sock,
2618 int *port, struct in_addr *inp,
2619 struct in6_addr *inp6, int *ipv6)
2620{
2621 const char *host = str;
2622 char *portp;
2623 int lport = 0;
2624
2625 *ptr = NULL;
2626 *is_sock = false;
2627 *port = fio_net_port;
2628 *ipv6 = 0;
2629
2630 if (!strncmp(str, "sock:", 5)) {
2631 *ptr = strdup(str + 5);
2632 *is_sock = true;
2633
2634 return 0;
2635 }
2636
2637 /*
2638 * Is it ip:<ip or host>:port
2639 */
2640 if (!strncmp(host, "ip:", 3))
2641 host += 3;
2642 else if (!strncmp(host, "ip4:", 4))
2643 host += 4;
2644 else if (!strncmp(host, "ip6:", 4)) {
2645 host += 4;
2646 *ipv6 = 1;
2647 } else if (host[0] == ':') {
2648 /* String is :port */
2649 host++;
2650 lport = atoi(host);
2651 if (!lport || lport > 65535) {
2652 log_err("fio: bad server port %u\n", lport);
2653 return 1;
2654 }
2655 /* no hostname given, we are done */
2656 *port = lport;
2657 return 0;
2658 }
2659
2660 /*
2661 * If no port seen yet, check if there's a last ',' at the end
2662 */
2663 if (!lport) {
2664 portp = strchr(host, ',');
2665 if (portp) {
2666 *portp = '\0';
2667 portp++;
2668 lport = atoi(portp);
2669 if (!lport || lport > 65535) {
2670 log_err("fio: bad server port %u\n", lport);
2671 return 1;
2672 }
2673 }
2674 }
2675
2676 if (lport)
2677 *port = lport;
2678
2679 if (!strlen(host))
2680 return 0;
2681
2682 *ptr = strdup(host);
2683
2684 if (fio_server_parse_host(*ptr, *ipv6, inp, inp6)) {
2685 free(*ptr);
2686 *ptr = NULL;
2687 return 1;
2688 }
2689
2690 if (*port == 0)
2691 *port = fio_net_port;
2692
2693 return 0;
2694}
2695
2696/*
2697 * Server arg should be one of:
2698 *
2699 * sock:/path/to/socket
2700 * ip:1.2.3.4
2701 * 1.2.3.4
2702 *
2703 * Where sock uses unix domain sockets, and ip binds the server to
2704 * a specific interface. If no arguments are given to the server, it
2705 * uses IP and binds to 0.0.0.0.
2706 *
2707 */
2708static int fio_handle_server_arg(void)
2709{
2710 int port = fio_net_port;
2711 bool is_sock;
2712 int ret = 0;
2713
2714 saddr_in.sin_addr.s_addr = htonl(INADDR_ANY);
2715
2716 if (!fio_server_arg)
2717 goto out;
2718
2719 ret = fio_server_parse_string(fio_server_arg, &bind_sock, &is_sock,
2720 &port, &saddr_in.sin_addr,
2721 &saddr_in6.sin6_addr, &use_ipv6);
2722
2723 if (!is_sock && bind_sock) {
2724 free(bind_sock);
2725 bind_sock = NULL;
2726 }
2727
2728out:
2729 fio_net_port = port;
2730 saddr_in.sin_port = htons(port);
2731 saddr_in6.sin6_port = htons(port);
2732 return ret;
2733}
2734
2735static void sig_int(int sig)
2736{
2737 if (bind_sock)
2738 unlink(bind_sock);
2739}
2740
2741static void set_sig_handlers(void)
2742{
2743 struct sigaction act = {
2744 .sa_handler = sig_int,
2745 .sa_flags = SA_RESTART,
2746 };
2747
2748 sigaction(SIGINT, &act, NULL);
2749
2750 /* Windows uses SIGBREAK as a quit signal from other applications */
2751#ifdef WIN32
2752 sigaction(SIGBREAK, &act, NULL);
2753#endif
2754}
2755
2756void fio_server_destroy_sk_key(void)
2757{
2758 pthread_key_delete(sk_out_key);
2759}
2760
2761int fio_server_create_sk_key(void)
2762{
2763 if (pthread_key_create(&sk_out_key, NULL)) {
2764 log_err("fio: can't create sk_out backend key\n");
2765 return 1;
2766 }
2767
2768 pthread_setspecific(sk_out_key, NULL);
2769 return 0;
2770}
2771
2772static int fio_server(void)
2773{
2774 int sk, ret;
2775
2776 dprint(FD_NET, "starting server\n");
2777
2778 if (fio_handle_server_arg())
2779 return -1;
2780
2781 set_sig_handlers();
2782
2783#ifdef WIN32
2784 /* if this is a child process, go handle the connection */
2785 if (fio_server_pipe_name != NULL) {
2786 ret = handle_connection_process();
2787 return ret;
2788 }
2789
2790 /* job to link child processes so they terminate together */
2791 hjob = windows_create_job();
2792 if (hjob == INVALID_HANDLE_VALUE)
2793 return -1;
2794#endif
2795
2796 sk = fio_init_server_connection();
2797 if (sk < 0)
2798 return -1;
2799
2800 ret = accept_loop(sk);
2801
2802 close(sk);
2803
2804 if (fio_server_arg) {
2805 free(fio_server_arg);
2806 fio_server_arg = NULL;
2807 }
2808 if (bind_sock)
2809 free(bind_sock);
2810
2811 return ret;
2812}
2813
2814void fio_server_got_signal(int signal)
2815{
2816 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
2817
2818 assert(sk_out);
2819
2820 if (signal == SIGPIPE)
2821 sk_out->sk = -1;
2822 else {
2823 log_info("\nfio: terminating on signal %d\n", signal);
2824 exit_backend = true;
2825 }
2826}
2827
2828static int check_existing_pidfile(const char *pidfile)
2829{
2830 struct stat sb;
2831 char buf[16];
2832 pid_t pid;
2833 FILE *f;
2834
2835 if (stat(pidfile, &sb))
2836 return 0;
2837
2838 f = fopen(pidfile, "r");
2839 if (!f)
2840 return 0;
2841
2842 if (fread(buf, sb.st_size, 1, f) <= 0) {
2843 fclose(f);
2844 return 1;
2845 }
2846 fclose(f);
2847
2848 pid = atoi(buf);
2849 if (kill(pid, SIGCONT) < 0)
2850 return errno != ESRCH;
2851
2852 return 1;
2853}
2854
2855static int write_pid(pid_t pid, const char *pidfile)
2856{
2857 FILE *fpid;
2858
2859 fpid = fopen(pidfile, "w");
2860 if (!fpid) {
2861 log_err("fio: failed opening pid file %s\n", pidfile);
2862 return 1;
2863 }
2864
2865 fprintf(fpid, "%u\n", (unsigned int) pid);
2866 fclose(fpid);
2867 return 0;
2868}
2869
2870/*
2871 * If pidfile is specified, background us.
2872 */
2873int fio_start_server(char *pidfile)
2874{
2875 FILE *file;
2876 pid_t pid;
2877 int ret;
2878
2879#if defined(WIN32)
2880 WSADATA wsd;
2881 WSAStartup(MAKEWORD(2, 2), &wsd);
2882#endif
2883
2884 if (!pidfile)
2885 return fio_server();
2886
2887 if (check_existing_pidfile(pidfile)) {
2888 log_err("fio: pidfile %s exists and server appears alive\n",
2889 pidfile);
2890 free(pidfile);
2891 return -1;
2892 }
2893
2894 pid = fork();
2895 if (pid < 0) {
2896 log_err("fio: failed server fork: %s\n", strerror(errno));
2897 free(pidfile);
2898 return -1;
2899 } else if (pid) {
2900 ret = write_pid(pid, pidfile);
2901 free(pidfile);
2902 _exit(ret);
2903 }
2904
2905 setsid();
2906 openlog("fio", LOG_NDELAY|LOG_NOWAIT|LOG_PID, LOG_USER);
2907 log_syslog = true;
2908
2909 file = freopen("/dev/null", "r", stdin);
2910 if (!file)
2911 perror("freopen");
2912
2913 file = freopen("/dev/null", "w", stdout);
2914 if (!file)
2915 perror("freopen");
2916
2917 file = freopen("/dev/null", "w", stderr);
2918 if (!file)
2919 perror("freopen");
2920
2921 f_out = NULL;
2922 f_err = NULL;
2923
2924 ret = fio_server();
2925
2926 fclose(stdin);
2927 fclose(stdout);
2928 fclose(stderr);
2929
2930 closelog();
2931 unlink(pidfile);
2932 free(pidfile);
2933 return ret;
2934}
2935
2936void fio_server_set_arg(const char *arg)
2937{
2938 fio_server_arg = strdup(arg);
2939}
2940
2941#ifdef WIN32
2942void fio_server_internal_set(const char *arg)
2943{
2944 fio_server_pipe_name = strdup(arg);
2945}
2946#endif