Ensure that command line options also end up in json output
[fio.git] / server.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <stdlib.h>
3#include <stdarg.h>
4#include <unistd.h>
5#include <limits.h>
6#include <errno.h>
7#include <sys/poll.h>
8#include <sys/types.h>
9#include <sys/wait.h>
10#include <sys/socket.h>
11#include <sys/stat.h>
12#include <sys/un.h>
13#include <sys/uio.h>
14#include <netinet/in.h>
15#include <arpa/inet.h>
16#include <netdb.h>
17#include <syslog.h>
18#include <signal.h>
19#ifdef CONFIG_ZLIB
20#include <zlib.h>
21#endif
22
23#include "fio.h"
24#include "options.h"
25#include "server.h"
26#include "crc/crc16.h"
27#include "lib/ieee754.h"
28#include "verify.h"
29#include "smalloc.h"
30
31int fio_net_port = FIO_NET_PORT;
32
33int exit_backend = 0;
34
35enum {
36 SK_F_FREE = 1,
37 SK_F_COPY = 2,
38 SK_F_SIMPLE = 4,
39 SK_F_VEC = 8,
40};
41
42struct sk_entry {
43 struct flist_head list; /* link on sk_out->list */
44 int flags; /* SK_F_* */
45 int opcode; /* Actual command fields */
46 void *buf;
47 off_t size;
48 uint64_t *tagptr;
49 struct flist_head next; /* Other sk_entry's, if linked command */
50};
51
52struct sk_out {
53 unsigned int refs; /* frees sk_out when it drops to zero.
54 * protected by below ->lock */
55
56 int sk; /* socket fd to talk to client */
57 struct fio_mutex *lock; /* protects ref and below list */
58 struct flist_head list; /* list of pending transmit work */
59 struct fio_mutex *wait; /* wake backend when items added to list */
60};
61
62static char *fio_server_arg;
63static char *bind_sock;
64static struct sockaddr_in saddr_in;
65static struct sockaddr_in6 saddr_in6;
66static int use_ipv6;
67#ifdef CONFIG_ZLIB
68static unsigned int has_zlib = 1;
69#else
70static unsigned int has_zlib = 0;
71#endif
72static unsigned int use_zlib;
73static char me[128];
74
75static pthread_key_t sk_out_key;
76
77struct fio_fork_item {
78 struct flist_head list;
79 int exitval;
80 int signal;
81 int exited;
82 pid_t pid;
83};
84
85struct cmd_reply {
86 struct fio_mutex lock;
87 void *data;
88 size_t size;
89 int error;
90};
91
92static const char *fio_server_ops[FIO_NET_CMD_NR] = {
93 "",
94 "QUIT",
95 "EXIT",
96 "JOB",
97 "JOBLINE",
98 "TEXT",
99 "TS",
100 "GS",
101 "SEND_ETA",
102 "ETA",
103 "PROBE",
104 "START",
105 "STOP",
106 "DISK_UTIL",
107 "SERVER_START",
108 "ADD_JOB",
109 "RUN",
110 "IOLOG",
111 "UPDATE_JOB",
112 "LOAD_FILE",
113 "VTRIGGER",
114 "SENDFILE",
115};
116
117static void sk_lock(struct sk_out *sk_out)
118{
119 fio_mutex_down(sk_out->lock);
120}
121
122static void sk_unlock(struct sk_out *sk_out)
123{
124 fio_mutex_up(sk_out->lock);
125}
126
127void sk_out_assign(struct sk_out *sk_out)
128{
129 if (!sk_out)
130 return;
131
132 sk_lock(sk_out);
133 sk_out->refs++;
134 sk_unlock(sk_out);
135 pthread_setspecific(sk_out_key, sk_out);
136}
137
138static void sk_out_free(struct sk_out *sk_out)
139{
140 fio_mutex_remove(sk_out->lock);
141 fio_mutex_remove(sk_out->wait);
142 sfree(sk_out);
143}
144
145static int __sk_out_drop(struct sk_out *sk_out)
146{
147 if (sk_out) {
148 int refs;
149
150 sk_lock(sk_out);
151 refs = --sk_out->refs;
152 sk_unlock(sk_out);
153
154 if (!refs) {
155 sk_out_free(sk_out);
156 return 0;
157 }
158 }
159
160 return 1;
161}
162
163void sk_out_drop(void)
164{
165 struct sk_out *sk_out;
166
167 sk_out = pthread_getspecific(sk_out_key);
168 if (!__sk_out_drop(sk_out))
169 pthread_setspecific(sk_out_key, NULL);
170}
171
172static void __fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
173 uint32_t pdu_len, uint64_t tag)
174{
175 memset(cmd, 0, sizeof(*cmd));
176
177 cmd->version = __cpu_to_le16(FIO_SERVER_VER);
178 cmd->opcode = cpu_to_le16(opcode);
179 cmd->tag = cpu_to_le64(tag);
180 cmd->pdu_len = cpu_to_le32(pdu_len);
181}
182
183
184static void fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
185 const void *pdu, uint32_t pdu_len, uint64_t tag)
186{
187 __fio_init_net_cmd(cmd, opcode, pdu_len, tag);
188
189 if (pdu)
190 memcpy(&cmd->payload, pdu, pdu_len);
191}
192
193const char *fio_server_op(unsigned int op)
194{
195 static char buf[32];
196
197 if (op < FIO_NET_CMD_NR)
198 return fio_server_ops[op];
199
200 sprintf(buf, "UNKNOWN/%d", op);
201 return buf;
202}
203
204static ssize_t iov_total_len(const struct iovec *iov, int count)
205{
206 ssize_t ret = 0;
207
208 while (count--) {
209 ret += iov->iov_len;
210 iov++;
211 }
212
213 return ret;
214}
215
216static int fio_sendv_data(int sk, struct iovec *iov, int count)
217{
218 ssize_t total_len = iov_total_len(iov, count);
219 ssize_t ret;
220
221 do {
222 ret = writev(sk, iov, count);
223 if (ret > 0) {
224 total_len -= ret;
225 if (!total_len)
226 break;
227
228 while (ret) {
229 if (ret >= iov->iov_len) {
230 ret -= iov->iov_len;
231 iov++;
232 continue;
233 }
234 iov->iov_base += ret;
235 iov->iov_len -= ret;
236 ret = 0;
237 }
238 } else if (!ret)
239 break;
240 else if (errno == EAGAIN || errno == EINTR)
241 continue;
242 else
243 break;
244 } while (!exit_backend);
245
246 if (!total_len)
247 return 0;
248
249 return 1;
250}
251
252static int fio_send_data(int sk, const void *p, unsigned int len)
253{
254 struct iovec iov = { .iov_base = (void *) p, .iov_len = len };
255
256 assert(len <= sizeof(struct fio_net_cmd) + FIO_SERVER_MAX_FRAGMENT_PDU);
257
258 return fio_sendv_data(sk, &iov, 1);
259}
260
261static int fio_recv_data(int sk, void *p, unsigned int len)
262{
263 do {
264 int ret = recv(sk, p, len, MSG_WAITALL);
265
266 if (ret > 0) {
267 len -= ret;
268 if (!len)
269 break;
270 p += ret;
271 continue;
272 } else if (!ret)
273 break;
274 else if (errno == EAGAIN || errno == EINTR)
275 continue;
276 else
277 break;
278 } while (!exit_backend);
279
280 if (!len)
281 return 0;
282
283 return -1;
284}
285
286static int verify_convert_cmd(struct fio_net_cmd *cmd)
287{
288 uint16_t crc;
289
290 cmd->cmd_crc16 = le16_to_cpu(cmd->cmd_crc16);
291 cmd->pdu_crc16 = le16_to_cpu(cmd->pdu_crc16);
292
293 crc = fio_crc16(cmd, FIO_NET_CMD_CRC_SZ);
294 if (crc != cmd->cmd_crc16) {
295 log_err("fio: server bad crc on command (got %x, wanted %x)\n",
296 cmd->cmd_crc16, crc);
297 return 1;
298 }
299
300 cmd->version = le16_to_cpu(cmd->version);
301 cmd->opcode = le16_to_cpu(cmd->opcode);
302 cmd->flags = le32_to_cpu(cmd->flags);
303 cmd->tag = le64_to_cpu(cmd->tag);
304 cmd->pdu_len = le32_to_cpu(cmd->pdu_len);
305
306 switch (cmd->version) {
307 case FIO_SERVER_VER:
308 break;
309 default:
310 log_err("fio: bad server cmd version %d\n", cmd->version);
311 return 1;
312 }
313
314 if (cmd->pdu_len > FIO_SERVER_MAX_FRAGMENT_PDU) {
315 log_err("fio: command payload too large: %u\n", cmd->pdu_len);
316 return 1;
317 }
318
319 return 0;
320}
321
322/*
323 * Read (and defragment, if necessary) incoming commands
324 */
325struct fio_net_cmd *fio_net_recv_cmd(int sk)
326{
327 struct fio_net_cmd cmd, *tmp, *cmdret = NULL;
328 size_t cmd_size = 0, pdu_offset = 0;
329 uint16_t crc;
330 int ret, first = 1;
331 void *pdu = NULL;
332
333 do {
334 ret = fio_recv_data(sk, &cmd, sizeof(cmd));
335 if (ret)
336 break;
337
338 /* We have a command, verify it and swap if need be */
339 ret = verify_convert_cmd(&cmd);
340 if (ret)
341 break;
342
343 if (first) {
344 /* if this is text, add room for \0 at the end */
345 cmd_size = sizeof(cmd) + cmd.pdu_len + 1;
346 assert(!cmdret);
347 } else
348 cmd_size += cmd.pdu_len;
349
350 if (cmd_size / 1024 > FIO_SERVER_MAX_CMD_MB * 1024) {
351 log_err("fio: cmd+pdu too large (%llu)\n", (unsigned long long) cmd_size);
352 ret = 1;
353 break;
354 }
355
356 tmp = realloc(cmdret, cmd_size);
357 if (!tmp) {
358 log_err("fio: server failed allocating cmd\n");
359 ret = 1;
360 break;
361 }
362 cmdret = tmp;
363
364 if (first)
365 memcpy(cmdret, &cmd, sizeof(cmd));
366 else if (cmdret->opcode != cmd.opcode) {
367 log_err("fio: fragment opcode mismatch (%d != %d)\n",
368 cmdret->opcode, cmd.opcode);
369 ret = 1;
370 break;
371 }
372
373 if (!cmd.pdu_len)
374 break;
375
376 /* There's payload, get it */
377 pdu = (void *) cmdret->payload + pdu_offset;
378 ret = fio_recv_data(sk, pdu, cmd.pdu_len);
379 if (ret)
380 break;
381
382 /* Verify payload crc */
383 crc = fio_crc16(pdu, cmd.pdu_len);
384 if (crc != cmd.pdu_crc16) {
385 log_err("fio: server bad crc on payload ");
386 log_err("(got %x, wanted %x)\n", cmd.pdu_crc16, crc);
387 ret = 1;
388 break;
389 }
390
391 pdu_offset += cmd.pdu_len;
392 if (!first)
393 cmdret->pdu_len += cmd.pdu_len;
394 first = 0;
395 } while (cmd.flags & FIO_NET_CMD_F_MORE);
396
397 if (ret) {
398 free(cmdret);
399 cmdret = NULL;
400 } else if (cmdret) {
401 /* zero-terminate text input */
402 if (cmdret->pdu_len) {
403 if (cmdret->opcode == FIO_NET_CMD_TEXT) {
404 struct cmd_text_pdu *__pdu = (struct cmd_text_pdu *) cmdret->payload;
405 char *buf = (char *) __pdu->buf;
406
407 buf[__pdu->buf_len] = '\0';
408 } else if (cmdret->opcode == FIO_NET_CMD_JOB) {
409 struct cmd_job_pdu *__pdu = (struct cmd_job_pdu *) cmdret->payload;
410 char *buf = (char *) __pdu->buf;
411 int len = le32_to_cpu(__pdu->buf_len);
412
413 buf[len] = '\0';
414 }
415 }
416
417 /* frag flag is internal */
418 cmdret->flags &= ~FIO_NET_CMD_F_MORE;
419 }
420
421 return cmdret;
422}
423
424static void add_reply(uint64_t tag, struct flist_head *list)
425{
426 struct fio_net_cmd_reply *reply;
427
428 reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
429 flist_add_tail(&reply->list, list);
430}
431
432static uint64_t alloc_reply(uint64_t tag, uint16_t opcode)
433{
434 struct fio_net_cmd_reply *reply;
435
436 reply = calloc(1, sizeof(*reply));
437 INIT_FLIST_HEAD(&reply->list);
438 fio_gettime(&reply->tv, NULL);
439 reply->saved_tag = tag;
440 reply->opcode = opcode;
441
442 return (uintptr_t) reply;
443}
444
445static void free_reply(uint64_t tag)
446{
447 struct fio_net_cmd_reply *reply;
448
449 reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
450 free(reply);
451}
452
453static void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu)
454{
455 uint32_t pdu_len;
456
457 cmd->cmd_crc16 = __cpu_to_le16(fio_crc16(cmd, FIO_NET_CMD_CRC_SZ));
458
459 pdu_len = le32_to_cpu(cmd->pdu_len);
460 cmd->pdu_crc16 = __cpu_to_le16(fio_crc16(pdu, pdu_len));
461}
462
463static void fio_net_cmd_crc(struct fio_net_cmd *cmd)
464{
465 fio_net_cmd_crc_pdu(cmd, cmd->payload);
466}
467
468int fio_net_send_cmd(int fd, uint16_t opcode, const void *buf, off_t size,
469 uint64_t *tagptr, struct flist_head *list)
470{
471 struct fio_net_cmd *cmd = NULL;
472 size_t this_len, cur_len = 0;
473 uint64_t tag;
474 int ret;
475
476 if (list) {
477 assert(tagptr);
478 tag = *tagptr = alloc_reply(*tagptr, opcode);
479 } else
480 tag = tagptr ? *tagptr : 0;
481
482 do {
483 this_len = size;
484 if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
485 this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
486
487 if (!cmd || cur_len < sizeof(*cmd) + this_len) {
488 if (cmd)
489 free(cmd);
490
491 cur_len = sizeof(*cmd) + this_len;
492 cmd = malloc(cur_len);
493 }
494
495 fio_init_net_cmd(cmd, opcode, buf, this_len, tag);
496
497 if (this_len < size)
498 cmd->flags = __cpu_to_le32(FIO_NET_CMD_F_MORE);
499
500 fio_net_cmd_crc(cmd);
501
502 ret = fio_send_data(fd, cmd, sizeof(*cmd) + this_len);
503 size -= this_len;
504 buf += this_len;
505 } while (!ret && size);
506
507 if (list) {
508 if (ret)
509 free_reply(tag);
510 else
511 add_reply(tag, list);
512 }
513
514 if (cmd)
515 free(cmd);
516
517 return ret;
518}
519
520static struct sk_entry *fio_net_prep_cmd(uint16_t opcode, void *buf, off_t size,
521 uint64_t *tagptr, int flags)
522{
523 struct sk_entry *entry;
524
525 entry = smalloc(sizeof(*entry));
526 INIT_FLIST_HEAD(&entry->next);
527 entry->opcode = opcode;
528 if (flags & SK_F_COPY) {
529 entry->buf = smalloc(size);
530 memcpy(entry->buf, buf, size);
531 } else
532 entry->buf = buf;
533 entry->size = size;
534 entry->tagptr = tagptr;
535 entry->flags = flags;
536
537 return entry;
538}
539
540static void fio_net_queue_entry(struct sk_entry *entry)
541{
542 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
543
544 sk_lock(sk_out);
545 flist_add_tail(&entry->list, &sk_out->list);
546 sk_unlock(sk_out);
547
548 fio_mutex_up(sk_out->wait);
549}
550
551static int fio_net_queue_cmd(uint16_t opcode, void *buf, off_t size,
552 uint64_t *tagptr, int flags)
553{
554 struct sk_entry *entry;
555
556 entry = fio_net_prep_cmd(opcode, buf, size, tagptr, flags);
557 fio_net_queue_entry(entry);
558 return 0;
559}
560
561static int fio_net_send_simple_stack_cmd(int sk, uint16_t opcode, uint64_t tag)
562{
563 struct fio_net_cmd cmd;
564
565 fio_init_net_cmd(&cmd, opcode, NULL, 0, tag);
566 fio_net_cmd_crc(&cmd);
567
568 return fio_send_data(sk, &cmd, sizeof(cmd));
569}
570
571/*
572 * If 'list' is non-NULL, then allocate and store the sent command for
573 * later verification.
574 */
575int fio_net_send_simple_cmd(int sk, uint16_t opcode, uint64_t tag,
576 struct flist_head *list)
577{
578 int ret;
579
580 if (list)
581 tag = alloc_reply(tag, opcode);
582
583 ret = fio_net_send_simple_stack_cmd(sk, opcode, tag);
584 if (ret) {
585 if (list)
586 free_reply(tag);
587
588 return ret;
589 }
590
591 if (list)
592 add_reply(tag, list);
593
594 return 0;
595}
596
597static int fio_net_queue_quit(void)
598{
599 dprint(FD_NET, "server: sending quit\n");
600
601 return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, 0, SK_F_SIMPLE);
602}
603
604int fio_net_send_quit(int sk)
605{
606 dprint(FD_NET, "server: sending quit\n");
607
608 return fio_net_send_simple_cmd(sk, FIO_NET_CMD_QUIT, 0, NULL);
609}
610
611static int fio_net_send_ack(struct fio_net_cmd *cmd, int error, int signal)
612{
613 struct cmd_end_pdu epdu;
614 uint64_t tag = 0;
615
616 if (cmd)
617 tag = cmd->tag;
618
619 epdu.error = __cpu_to_le32(error);
620 epdu.signal = __cpu_to_le32(signal);
621 return fio_net_queue_cmd(FIO_NET_CMD_STOP, &epdu, sizeof(epdu), &tag, SK_F_COPY);
622}
623
624static int fio_net_queue_stop(int error, int signal)
625{
626 dprint(FD_NET, "server: sending stop (%d, %d)\n", error, signal);
627 return fio_net_send_ack(NULL, error, signal);
628}
629
630static void fio_server_add_fork_item(pid_t pid, struct flist_head *list)
631{
632 struct fio_fork_item *ffi;
633
634 ffi = malloc(sizeof(*ffi));
635 ffi->exitval = 0;
636 ffi->signal = 0;
637 ffi->exited = 0;
638 ffi->pid = pid;
639 flist_add_tail(&ffi->list, list);
640}
641
642static void fio_server_add_conn_pid(struct flist_head *conn_list, pid_t pid)
643{
644 dprint(FD_NET, "server: forked off connection job (pid=%u)\n", (int) pid);
645 fio_server_add_fork_item(pid, conn_list);
646}
647
648static void fio_server_add_job_pid(struct flist_head *job_list, pid_t pid)
649{
650 dprint(FD_NET, "server: forked off job job (pid=%u)\n", (int) pid);
651 fio_server_add_fork_item(pid, job_list);
652}
653
654static void fio_server_check_fork_item(struct fio_fork_item *ffi)
655{
656 int ret, status;
657
658 ret = waitpid(ffi->pid, &status, WNOHANG);
659 if (ret < 0) {
660 if (errno == ECHILD) {
661 log_err("fio: connection pid %u disappeared\n", (int) ffi->pid);
662 ffi->exited = 1;
663 } else
664 log_err("fio: waitpid: %s\n", strerror(errno));
665 } else if (ret == ffi->pid) {
666 if (WIFSIGNALED(status)) {
667 ffi->signal = WTERMSIG(status);
668 ffi->exited = 1;
669 }
670 if (WIFEXITED(status)) {
671 if (WEXITSTATUS(status))
672 ffi->exitval = WEXITSTATUS(status);
673 ffi->exited = 1;
674 }
675 }
676}
677
678static void fio_server_fork_item_done(struct fio_fork_item *ffi, bool stop)
679{
680 dprint(FD_NET, "pid %u exited, sig=%u, exitval=%d\n", (int) ffi->pid, ffi->signal, ffi->exitval);
681
682 /*
683 * Fold STOP and QUIT...
684 */
685 if (stop) {
686 fio_net_queue_stop(ffi->exitval, ffi->signal);
687 fio_net_queue_quit();
688 }
689
690 flist_del(&ffi->list);
691 free(ffi);
692}
693
694static void fio_server_check_fork_items(struct flist_head *list, bool stop)
695{
696 struct flist_head *entry, *tmp;
697 struct fio_fork_item *ffi;
698
699 flist_for_each_safe(entry, tmp, list) {
700 ffi = flist_entry(entry, struct fio_fork_item, list);
701
702 fio_server_check_fork_item(ffi);
703
704 if (ffi->exited)
705 fio_server_fork_item_done(ffi, stop);
706 }
707}
708
709static void fio_server_check_jobs(struct flist_head *job_list)
710{
711 fio_server_check_fork_items(job_list, true);
712}
713
714static void fio_server_check_conns(struct flist_head *conn_list)
715{
716 fio_server_check_fork_items(conn_list, false);
717}
718
719static int handle_load_file_cmd(struct fio_net_cmd *cmd)
720{
721 struct cmd_load_file_pdu *pdu = (struct cmd_load_file_pdu *) cmd->payload;
722 void *file_name = pdu->file;
723 struct cmd_start_pdu spdu;
724
725 dprint(FD_NET, "server: loading local file %s\n", (char *) file_name);
726
727 pdu->name_len = le16_to_cpu(pdu->name_len);
728 pdu->client_type = le16_to_cpu(pdu->client_type);
729
730 if (parse_jobs_ini(file_name, 0, 0, pdu->client_type)) {
731 fio_net_queue_quit();
732 return -1;
733 }
734
735 spdu.jobs = cpu_to_le32(thread_number);
736 spdu.stat_outputs = cpu_to_le32(stat_number);
737 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
738 return 0;
739}
740
741static int handle_run_cmd(struct sk_out *sk_out, struct flist_head *job_list,
742 struct fio_net_cmd *cmd)
743{
744 pid_t pid;
745 int ret;
746
747 fio_time_init();
748 set_genesis_time();
749
750 pid = fork();
751 if (pid) {
752 fio_server_add_job_pid(job_list, pid);
753 return 0;
754 }
755
756 ret = fio_backend(sk_out);
757 free_threads_shm();
758 _exit(ret);
759}
760
761static int handle_job_cmd(struct fio_net_cmd *cmd)
762{
763 struct cmd_job_pdu *pdu = (struct cmd_job_pdu *) cmd->payload;
764 void *buf = pdu->buf;
765 struct cmd_start_pdu spdu;
766
767 pdu->buf_len = le32_to_cpu(pdu->buf_len);
768 pdu->client_type = le32_to_cpu(pdu->client_type);
769
770 if (parse_jobs_ini(buf, 1, 0, pdu->client_type)) {
771 fio_net_queue_quit();
772 return -1;
773 }
774
775 spdu.jobs = cpu_to_le32(thread_number);
776 spdu.stat_outputs = cpu_to_le32(stat_number);
777
778 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
779 return 0;
780}
781
782static int handle_jobline_cmd(struct fio_net_cmd *cmd)
783{
784 void *pdu = cmd->payload;
785 struct cmd_single_line_pdu *cslp;
786 struct cmd_line_pdu *clp;
787 unsigned long offset;
788 struct cmd_start_pdu spdu;
789 char **argv;
790 int i;
791
792 clp = pdu;
793 clp->lines = le16_to_cpu(clp->lines);
794 clp->client_type = le16_to_cpu(clp->client_type);
795 argv = malloc(clp->lines * sizeof(char *));
796 offset = sizeof(*clp);
797
798 dprint(FD_NET, "server: %d command line args\n", clp->lines);
799
800 for (i = 0; i < clp->lines; i++) {
801 cslp = pdu + offset;
802 argv[i] = (char *) cslp->text;
803
804 offset += sizeof(*cslp) + le16_to_cpu(cslp->len);
805 dprint(FD_NET, "server: %d: %s\n", i, argv[i]);
806 }
807
808 if (parse_cmd_line(clp->lines, argv, clp->client_type)) {
809 fio_net_queue_quit();
810 free(argv);
811 return -1;
812 }
813
814 free(argv);
815
816 spdu.jobs = cpu_to_le32(thread_number);
817 spdu.stat_outputs = cpu_to_le32(stat_number);
818
819 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
820 return 0;
821}
822
823static int handle_probe_cmd(struct fio_net_cmd *cmd)
824{
825 struct cmd_client_probe_pdu *pdu = (struct cmd_client_probe_pdu *) cmd->payload;
826 struct cmd_probe_reply_pdu probe;
827 uint64_t tag = cmd->tag;
828
829 dprint(FD_NET, "server: sending probe reply\n");
830
831 strcpy(me, (char *) pdu->server);
832
833 memset(&probe, 0, sizeof(probe));
834 gethostname((char *) probe.hostname, sizeof(probe.hostname));
835#ifdef CONFIG_BIG_ENDIAN
836 probe.bigendian = 1;
837#endif
838 strncpy((char *) probe.fio_version, fio_version_string, sizeof(probe.fio_version));
839
840 probe.os = FIO_OS;
841 probe.arch = FIO_ARCH;
842 probe.bpp = sizeof(void *);
843 probe.cpus = __cpu_to_le32(cpus_online());
844
845 /*
846 * If the client supports compression and we do too, then enable it
847 */
848 if (has_zlib && le64_to_cpu(pdu->flags) & FIO_PROBE_FLAG_ZLIB) {
849 probe.flags = __cpu_to_le64(FIO_PROBE_FLAG_ZLIB);
850 use_zlib = 1;
851 } else {
852 probe.flags = 0;
853 use_zlib = 0;
854 }
855
856 return fio_net_queue_cmd(FIO_NET_CMD_PROBE, &probe, sizeof(probe), &tag, SK_F_COPY);
857}
858
859static int handle_send_eta_cmd(struct fio_net_cmd *cmd)
860{
861 struct jobs_eta *je;
862 uint64_t tag = cmd->tag;
863 size_t size;
864 int i;
865
866 dprint(FD_NET, "server sending status\n");
867
868 /*
869 * Fake ETA return if we don't have a local one, otherwise the client
870 * will end up timing out waiting for a response to the ETA request
871 */
872 je = get_jobs_eta(true, &size);
873 if (!je) {
874 size = sizeof(*je);
875 je = calloc(1, size);
876 } else {
877 je->nr_running = cpu_to_le32(je->nr_running);
878 je->nr_ramp = cpu_to_le32(je->nr_ramp);
879 je->nr_pending = cpu_to_le32(je->nr_pending);
880 je->nr_setting_up = cpu_to_le32(je->nr_setting_up);
881 je->files_open = cpu_to_le32(je->files_open);
882
883 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
884 je->m_rate[i] = cpu_to_le32(je->m_rate[i]);
885 je->t_rate[i] = cpu_to_le32(je->t_rate[i]);
886 je->m_iops[i] = cpu_to_le32(je->m_iops[i]);
887 je->t_iops[i] = cpu_to_le32(je->t_iops[i]);
888 je->rate[i] = cpu_to_le32(je->rate[i]);
889 je->iops[i] = cpu_to_le32(je->iops[i]);
890 }
891
892 je->elapsed_sec = cpu_to_le64(je->elapsed_sec);
893 je->eta_sec = cpu_to_le64(je->eta_sec);
894 je->nr_threads = cpu_to_le32(je->nr_threads);
895 je->is_pow2 = cpu_to_le32(je->is_pow2);
896 je->unit_base = cpu_to_le32(je->unit_base);
897 }
898
899 fio_net_queue_cmd(FIO_NET_CMD_ETA, je, size, &tag, SK_F_FREE);
900 return 0;
901}
902
903static int send_update_job_reply(uint64_t __tag, int error)
904{
905 uint64_t tag = __tag;
906 uint32_t pdu_error;
907
908 pdu_error = __cpu_to_le32(error);
909 return fio_net_queue_cmd(FIO_NET_CMD_UPDATE_JOB, &pdu_error, sizeof(pdu_error), &tag, SK_F_COPY);
910}
911
912static int handle_update_job_cmd(struct fio_net_cmd *cmd)
913{
914 struct cmd_add_job_pdu *pdu = (struct cmd_add_job_pdu *) cmd->payload;
915 struct thread_data *td;
916 uint32_t tnumber;
917
918 tnumber = le32_to_cpu(pdu->thread_number);
919
920 dprint(FD_NET, "server: updating options for job %u\n", tnumber);
921
922 if (!tnumber || tnumber > thread_number) {
923 send_update_job_reply(cmd->tag, ENODEV);
924 return 0;
925 }
926
927 td = &threads[tnumber - 1];
928 convert_thread_options_to_cpu(&td->o, &pdu->top);
929 send_update_job_reply(cmd->tag, 0);
930 return 0;
931}
932
933static int handle_trigger_cmd(struct fio_net_cmd *cmd)
934{
935 struct cmd_vtrigger_pdu *pdu = (struct cmd_vtrigger_pdu *) cmd->payload;
936 char *buf = (char *) pdu->cmd;
937 struct all_io_list *rep;
938 size_t sz;
939
940 pdu->len = le16_to_cpu(pdu->len);
941 buf[pdu->len] = '\0';
942
943 rep = get_all_io_list(IO_LIST_ALL, &sz);
944 if (!rep) {
945 struct all_io_list state;
946
947 state.threads = cpu_to_le64((uint64_t) 0);
948 fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY);
949 } else
950 fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE);
951
952 exec_trigger(buf);
953 return 0;
954}
955
956static int handle_command(struct sk_out *sk_out, struct flist_head *job_list,
957 struct fio_net_cmd *cmd)
958{
959 int ret;
960
961 dprint(FD_NET, "server: got op [%s], pdu=%u, tag=%llx\n",
962 fio_server_op(cmd->opcode), cmd->pdu_len,
963 (unsigned long long) cmd->tag);
964
965 switch (cmd->opcode) {
966 case FIO_NET_CMD_QUIT:
967 fio_terminate_threads(TERMINATE_ALL);
968 ret = 0;
969 break;
970 case FIO_NET_CMD_EXIT:
971 exit_backend = 1;
972 return -1;
973 case FIO_NET_CMD_LOAD_FILE:
974 ret = handle_load_file_cmd(cmd);
975 break;
976 case FIO_NET_CMD_JOB:
977 ret = handle_job_cmd(cmd);
978 break;
979 case FIO_NET_CMD_JOBLINE:
980 ret = handle_jobline_cmd(cmd);
981 break;
982 case FIO_NET_CMD_PROBE:
983 ret = handle_probe_cmd(cmd);
984 break;
985 case FIO_NET_CMD_SEND_ETA:
986 ret = handle_send_eta_cmd(cmd);
987 break;
988 case FIO_NET_CMD_RUN:
989 ret = handle_run_cmd(sk_out, job_list, cmd);
990 break;
991 case FIO_NET_CMD_UPDATE_JOB:
992 ret = handle_update_job_cmd(cmd);
993 break;
994 case FIO_NET_CMD_VTRIGGER:
995 ret = handle_trigger_cmd(cmd);
996 break;
997 case FIO_NET_CMD_SENDFILE: {
998 struct cmd_sendfile_reply *in;
999 struct cmd_reply *rep;
1000
1001 rep = (struct cmd_reply *) (uintptr_t) cmd->tag;
1002
1003 in = (struct cmd_sendfile_reply *) cmd->payload;
1004 in->size = le32_to_cpu(in->size);
1005 in->error = le32_to_cpu(in->error);
1006 if (in->error) {
1007 ret = 1;
1008 rep->error = in->error;
1009 } else {
1010 ret = 0;
1011 rep->data = smalloc(in->size);
1012 if (!rep->data) {
1013 ret = 1;
1014 rep->error = ENOMEM;
1015 } else {
1016 rep->size = in->size;
1017 memcpy(rep->data, in->data, in->size);
1018 }
1019 }
1020 fio_mutex_up(&rep->lock);
1021 break;
1022 }
1023 default:
1024 log_err("fio: unknown opcode: %s\n", fio_server_op(cmd->opcode));
1025 ret = 1;
1026 }
1027
1028 return ret;
1029}
1030
1031/*
1032 * Send a command with a separate PDU, not inlined in the command
1033 */
1034static int fio_send_cmd_ext_pdu(int sk, uint16_t opcode, const void *buf,
1035 off_t size, uint64_t tag, uint32_t flags)
1036{
1037 struct fio_net_cmd cmd;
1038 struct iovec iov[2];
1039
1040 iov[0].iov_base = (void *) &cmd;
1041 iov[0].iov_len = sizeof(cmd);
1042 iov[1].iov_base = (void *) buf;
1043 iov[1].iov_len = size;
1044
1045 __fio_init_net_cmd(&cmd, opcode, size, tag);
1046 cmd.flags = __cpu_to_le32(flags);
1047 fio_net_cmd_crc_pdu(&cmd, buf);
1048
1049 return fio_sendv_data(sk, iov, 2);
1050}
1051
1052static void finish_entry(struct sk_entry *entry)
1053{
1054 if (entry->flags & SK_F_FREE)
1055 free(entry->buf);
1056 else if (entry->flags & SK_F_COPY)
1057 sfree(entry->buf);
1058
1059 sfree(entry);
1060}
1061
1062static void entry_set_flags_tag(struct sk_entry *entry, struct flist_head *list,
1063 unsigned int *flags, uint64_t *tag)
1064{
1065 if (!flist_empty(list))
1066 *flags = FIO_NET_CMD_F_MORE;
1067 else
1068 *flags = 0;
1069
1070 if (entry->tagptr)
1071 *tag = *entry->tagptr;
1072 else
1073 *tag = 0;
1074}
1075
1076static int send_vec_entry(struct sk_out *sk_out, struct sk_entry *first)
1077{
1078 unsigned int flags;
1079 uint64_t tag;
1080 int ret;
1081
1082 entry_set_flags_tag(first, &first->next, &flags, &tag);
1083
1084 ret = fio_send_cmd_ext_pdu(sk_out->sk, first->opcode, first->buf, first->size, tag, flags);
1085
1086 while (!flist_empty(&first->next)) {
1087 struct sk_entry *next;
1088
1089 next = flist_first_entry(&first->next, struct sk_entry, list);
1090 flist_del_init(&next->list);
1091
1092 entry_set_flags_tag(next, &first->next, &flags, &tag);
1093
1094 ret += fio_send_cmd_ext_pdu(sk_out->sk, next->opcode, next->buf, next->size, tag, flags);
1095 finish_entry(next);
1096 }
1097
1098 return ret;
1099}
1100
1101static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry)
1102{
1103 int ret;
1104
1105 if (entry->flags & SK_F_VEC)
1106 ret = send_vec_entry(sk_out, entry);
1107 if (entry->flags & SK_F_SIMPLE) {
1108 uint64_t tag = 0;
1109
1110 if (entry->tagptr)
1111 tag = *entry->tagptr;
1112
1113 ret = fio_net_send_simple_cmd(sk_out->sk, entry->opcode, tag, NULL);
1114 } else
1115 ret = fio_net_send_cmd(sk_out->sk, entry->opcode, entry->buf, entry->size, entry->tagptr, NULL);
1116
1117 if (ret)
1118 log_err("fio: failed handling cmd %s\n", fio_server_op(entry->opcode));
1119
1120 finish_entry(entry);
1121 return ret;
1122}
1123
1124static int handle_xmits(struct sk_out *sk_out)
1125{
1126 struct sk_entry *entry;
1127 FLIST_HEAD(list);
1128 int ret = 0;
1129
1130 sk_lock(sk_out);
1131 if (flist_empty(&sk_out->list)) {
1132 sk_unlock(sk_out);
1133 return 0;
1134 }
1135
1136 flist_splice_init(&sk_out->list, &list);
1137 sk_unlock(sk_out);
1138
1139 while (!flist_empty(&list)) {
1140 entry = flist_entry(list.next, struct sk_entry, list);
1141 flist_del(&entry->list);
1142 ret += handle_sk_entry(sk_out, entry);
1143 }
1144
1145 return ret;
1146}
1147
1148static int handle_connection(struct sk_out *sk_out)
1149{
1150 struct fio_net_cmd *cmd = NULL;
1151 FLIST_HEAD(job_list);
1152 int ret = 0;
1153
1154 reset_fio_state();
1155
1156 /* read forever */
1157 while (!exit_backend) {
1158 struct pollfd pfd = {
1159 .fd = sk_out->sk,
1160 .events = POLLIN,
1161 };
1162
1163 ret = 0;
1164 do {
1165 int timeout = 1000;
1166
1167 if (!flist_empty(&job_list))
1168 timeout = 100;
1169
1170 handle_xmits(sk_out);
1171
1172 ret = poll(&pfd, 1, 0);
1173 if (ret < 0) {
1174 if (errno == EINTR)
1175 break;
1176 log_err("fio: poll: %s\n", strerror(errno));
1177 break;
1178 } else if (!ret) {
1179 fio_server_check_jobs(&job_list);
1180 fio_mutex_down_timeout(sk_out->wait, timeout);
1181 continue;
1182 }
1183
1184 if (pfd.revents & POLLIN)
1185 break;
1186 if (pfd.revents & (POLLERR|POLLHUP)) {
1187 ret = 1;
1188 break;
1189 }
1190 } while (!exit_backend);
1191
1192 fio_server_check_jobs(&job_list);
1193
1194 if (ret < 0)
1195 break;
1196
1197 cmd = fio_net_recv_cmd(sk_out->sk);
1198 if (!cmd) {
1199 ret = -1;
1200 break;
1201 }
1202
1203 ret = handle_command(sk_out, &job_list, cmd);
1204 if (ret)
1205 break;
1206
1207 free(cmd);
1208 cmd = NULL;
1209 }
1210
1211 if (cmd)
1212 free(cmd);
1213
1214 handle_xmits(sk_out);
1215
1216 close(sk_out->sk);
1217 sk_out->sk = -1;
1218 __sk_out_drop(sk_out);
1219 _exit(ret);
1220}
1221
1222/* get the address on this host bound by the input socket,
1223 * whether it is ipv6 or ipv4 */
1224
1225static int get_my_addr_str(int sk)
1226{
1227 struct sockaddr_in6 myaddr6 = { 0, };
1228 struct sockaddr_in myaddr4 = { 0, };
1229 struct sockaddr *sockaddr_p;
1230 char *net_addr;
1231 socklen_t len;
1232 int ret;
1233
1234 if (use_ipv6) {
1235 len = sizeof(myaddr6);
1236 sockaddr_p = (struct sockaddr * )&myaddr6;
1237 net_addr = (char * )&myaddr6.sin6_addr;
1238 } else {
1239 len = sizeof(myaddr4);
1240 sockaddr_p = (struct sockaddr * )&myaddr4;
1241 net_addr = (char * )&myaddr4.sin_addr;
1242 }
1243
1244 ret = getsockname(sk, sockaddr_p, &len);
1245 if (ret) {
1246 log_err("fio: getsockaddr: %s\n", strerror(errno));
1247 return -1;
1248 }
1249
1250 if (!inet_ntop(use_ipv6?AF_INET6:AF_INET, net_addr, client_sockaddr_str, INET6_ADDRSTRLEN - 1)) {
1251 log_err("inet_ntop: failed to convert addr to string\n");
1252 return -1;
1253 }
1254
1255 dprint(FD_NET, "fio server bound to addr %s\n", client_sockaddr_str);
1256 return 0;
1257}
1258
1259static int accept_loop(int listen_sk)
1260{
1261 struct sockaddr_in addr;
1262 struct sockaddr_in6 addr6;
1263 socklen_t len = use_ipv6 ? sizeof(addr6) : sizeof(addr);
1264 struct pollfd pfd;
1265 int ret = 0, sk, exitval = 0;
1266 FLIST_HEAD(conn_list);
1267
1268 dprint(FD_NET, "server enter accept loop\n");
1269
1270 fio_set_fd_nonblocking(listen_sk, "server");
1271
1272 while (!exit_backend) {
1273 struct sk_out *sk_out;
1274 const char *from;
1275 char buf[64];
1276 pid_t pid;
1277
1278 pfd.fd = listen_sk;
1279 pfd.events = POLLIN;
1280 do {
1281 int timeout = 1000;
1282
1283 if (!flist_empty(&conn_list))
1284 timeout = 100;
1285
1286 ret = poll(&pfd, 1, timeout);
1287 if (ret < 0) {
1288 if (errno == EINTR)
1289 break;
1290 log_err("fio: poll: %s\n", strerror(errno));
1291 break;
1292 } else if (!ret) {
1293 fio_server_check_conns(&conn_list);
1294 continue;
1295 }
1296
1297 if (pfd.revents & POLLIN)
1298 break;
1299 } while (!exit_backend);
1300
1301 fio_server_check_conns(&conn_list);
1302
1303 if (exit_backend || ret < 0)
1304 break;
1305
1306 if (use_ipv6)
1307 sk = accept(listen_sk, (struct sockaddr *) &addr6, &len);
1308 else
1309 sk = accept(listen_sk, (struct sockaddr *) &addr, &len);
1310
1311 if (sk < 0) {
1312 log_err("fio: accept: %s\n", strerror(errno));
1313 return -1;
1314 }
1315
1316 if (use_ipv6)
1317 from = inet_ntop(AF_INET6, (struct sockaddr *) &addr6.sin6_addr, buf, sizeof(buf));
1318 else
1319 from = inet_ntop(AF_INET, (struct sockaddr *) &addr.sin_addr, buf, sizeof(buf));
1320
1321 dprint(FD_NET, "server: connect from %s\n", from);
1322
1323 sk_out = smalloc(sizeof(*sk_out));
1324 sk_out->sk = sk;
1325 INIT_FLIST_HEAD(&sk_out->list);
1326 sk_out->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1327 sk_out->wait = fio_mutex_init(FIO_MUTEX_LOCKED);
1328
1329 pid = fork();
1330 if (pid) {
1331 close(sk);
1332 fio_server_add_conn_pid(&conn_list, pid);
1333 continue;
1334 }
1335
1336 /* if error, it's already logged, non-fatal */
1337 get_my_addr_str(sk);
1338
1339 /*
1340 * Assign sk_out here, it'll be dropped in handle_connection()
1341 * since that function calls _exit() when done
1342 */
1343 sk_out_assign(sk_out);
1344 handle_connection(sk_out);
1345 }
1346
1347 return exitval;
1348}
1349
1350int fio_server_text_output(int level, const char *buf, size_t len)
1351{
1352 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
1353 struct cmd_text_pdu *pdu;
1354 unsigned int tlen;
1355 struct timeval tv;
1356
1357 if (!sk_out || sk_out->sk == -1)
1358 return -1;
1359
1360 tlen = sizeof(*pdu) + len;
1361 pdu = malloc(tlen);
1362
1363 pdu->level = __cpu_to_le32(level);
1364 pdu->buf_len = __cpu_to_le32(len);
1365
1366 gettimeofday(&tv, NULL);
1367 pdu->log_sec = __cpu_to_le64(tv.tv_sec);
1368 pdu->log_usec = __cpu_to_le64(tv.tv_usec);
1369
1370 memcpy(pdu->buf, buf, len);
1371
1372 fio_net_queue_cmd(FIO_NET_CMD_TEXT, pdu, tlen, NULL, SK_F_COPY);
1373 free(pdu);
1374 return len;
1375}
1376
1377static void convert_io_stat(struct io_stat *dst, struct io_stat *src)
1378{
1379 dst->max_val = cpu_to_le64(src->max_val);
1380 dst->min_val = cpu_to_le64(src->min_val);
1381 dst->samples = cpu_to_le64(src->samples);
1382
1383 /*
1384 * Encode to IEEE 754 for network transfer
1385 */
1386 dst->mean.u.i = cpu_to_le64(fio_double_to_uint64(src->mean.u.f));
1387 dst->S.u.i = cpu_to_le64(fio_double_to_uint64(src->S.u.f));
1388}
1389
1390static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src)
1391{
1392 int i;
1393
1394 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1395 dst->max_run[i] = cpu_to_le64(src->max_run[i]);
1396 dst->min_run[i] = cpu_to_le64(src->min_run[i]);
1397 dst->max_bw[i] = cpu_to_le64(src->max_bw[i]);
1398 dst->min_bw[i] = cpu_to_le64(src->min_bw[i]);
1399 dst->io_kb[i] = cpu_to_le64(src->io_kb[i]);
1400 dst->agg[i] = cpu_to_le64(src->agg[i]);
1401 }
1402
1403 dst->kb_base = cpu_to_le32(src->kb_base);
1404 dst->unit_base = cpu_to_le32(src->unit_base);
1405 dst->groupid = cpu_to_le32(src->groupid);
1406 dst->unified_rw_rep = cpu_to_le32(src->unified_rw_rep);
1407}
1408
1409/*
1410 * Send a CMD_TS, which packs struct thread_stat and group_run_stats
1411 * into a single payload.
1412 */
1413void fio_server_send_ts(struct thread_stat *ts, struct group_run_stats *rs)
1414{
1415 struct cmd_ts_pdu p;
1416 int i, j;
1417
1418 dprint(FD_NET, "server sending end stats\n");
1419
1420 memset(&p, 0, sizeof(p));
1421
1422 strncpy(p.ts.name, ts->name, FIO_JOBNAME_SIZE - 1);
1423 strncpy(p.ts.verror, ts->verror, FIO_VERROR_SIZE - 1);
1424 strncpy(p.ts.description, ts->description, FIO_JOBDESC_SIZE - 1);
1425
1426 p.ts.error = cpu_to_le32(ts->error);
1427 p.ts.thread_number = cpu_to_le32(ts->thread_number);
1428 p.ts.groupid = cpu_to_le32(ts->groupid);
1429 p.ts.pid = cpu_to_le32(ts->pid);
1430 p.ts.members = cpu_to_le32(ts->members);
1431 p.ts.unified_rw_rep = cpu_to_le32(ts->unified_rw_rep);
1432
1433 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1434 convert_io_stat(&p.ts.clat_stat[i], &ts->clat_stat[i]);
1435 convert_io_stat(&p.ts.slat_stat[i], &ts->slat_stat[i]);
1436 convert_io_stat(&p.ts.lat_stat[i], &ts->lat_stat[i]);
1437 convert_io_stat(&p.ts.bw_stat[i], &ts->bw_stat[i]);
1438 }
1439
1440 p.ts.usr_time = cpu_to_le64(ts->usr_time);
1441 p.ts.sys_time = cpu_to_le64(ts->sys_time);
1442 p.ts.ctx = cpu_to_le64(ts->ctx);
1443 p.ts.minf = cpu_to_le64(ts->minf);
1444 p.ts.majf = cpu_to_le64(ts->majf);
1445 p.ts.clat_percentiles = cpu_to_le64(ts->clat_percentiles);
1446 p.ts.percentile_precision = cpu_to_le64(ts->percentile_precision);
1447
1448 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
1449 fio_fp64_t *src = &ts->percentile_list[i];
1450 fio_fp64_t *dst = &p.ts.percentile_list[i];
1451
1452 dst->u.i = cpu_to_le64(fio_double_to_uint64(src->u.f));
1453 }
1454
1455 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
1456 p.ts.io_u_map[i] = cpu_to_le32(ts->io_u_map[i]);
1457 p.ts.io_u_submit[i] = cpu_to_le32(ts->io_u_submit[i]);
1458 p.ts.io_u_complete[i] = cpu_to_le32(ts->io_u_complete[i]);
1459 }
1460
1461 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
1462 p.ts.io_u_lat_u[i] = cpu_to_le32(ts->io_u_lat_u[i]);
1463 p.ts.io_u_lat_m[i] = cpu_to_le32(ts->io_u_lat_m[i]);
1464 }
1465
1466 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1467 for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
1468 p.ts.io_u_plat[i][j] = cpu_to_le32(ts->io_u_plat[i][j]);
1469
1470 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1471 p.ts.total_io_u[i] = cpu_to_le64(ts->total_io_u[i]);
1472 p.ts.short_io_u[i] = cpu_to_le64(ts->short_io_u[i]);
1473 p.ts.drop_io_u[i] = cpu_to_le64(ts->drop_io_u[i]);
1474 }
1475
1476 p.ts.total_submit = cpu_to_le64(ts->total_submit);
1477 p.ts.total_complete = cpu_to_le64(ts->total_complete);
1478
1479 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1480 p.ts.io_bytes[i] = cpu_to_le64(ts->io_bytes[i]);
1481 p.ts.runtime[i] = cpu_to_le64(ts->runtime[i]);
1482 }
1483
1484 p.ts.total_run_time = cpu_to_le64(ts->total_run_time);
1485 p.ts.continue_on_error = cpu_to_le16(ts->continue_on_error);
1486 p.ts.total_err_count = cpu_to_le64(ts->total_err_count);
1487 p.ts.first_error = cpu_to_le32(ts->first_error);
1488 p.ts.kb_base = cpu_to_le32(ts->kb_base);
1489 p.ts.unit_base = cpu_to_le32(ts->unit_base);
1490
1491 p.ts.latency_depth = cpu_to_le32(ts->latency_depth);
1492 p.ts.latency_target = cpu_to_le64(ts->latency_target);
1493 p.ts.latency_window = cpu_to_le64(ts->latency_window);
1494 p.ts.latency_percentile.u.i = cpu_to_le64(fio_double_to_uint64(ts->latency_percentile.u.f));
1495
1496 p.ts.nr_block_infos = le64_to_cpu(ts->nr_block_infos);
1497 for (i = 0; i < p.ts.nr_block_infos; i++)
1498 p.ts.block_infos[i] = le32_to_cpu(ts->block_infos[i]);
1499
1500 convert_gs(&p.rs, rs);
1501
1502 fio_net_queue_cmd(FIO_NET_CMD_TS, &p, sizeof(p), NULL, SK_F_COPY);
1503}
1504
1505void fio_server_send_gs(struct group_run_stats *rs)
1506{
1507 struct group_run_stats gs;
1508
1509 dprint(FD_NET, "server sending group run stats\n");
1510
1511 convert_gs(&gs, rs);
1512 fio_net_queue_cmd(FIO_NET_CMD_GS, &gs, sizeof(gs), NULL, SK_F_COPY);
1513}
1514
1515void fio_server_send_job_options(struct flist_head *opt_list,
1516 unsigned int groupid)
1517{
1518 struct cmd_job_option pdu;
1519 struct flist_head *entry;
1520
1521 if (flist_empty(opt_list))
1522 return;
1523
1524 flist_for_each(entry, opt_list) {
1525 struct print_option *p;
1526 size_t len;
1527
1528 p = flist_entry(entry, struct print_option, list);
1529 memset(&pdu, 0, sizeof(pdu));
1530
1531 if (groupid == -1U) {
1532 pdu.global = __cpu_to_le16(1);
1533 pdu.groupid = 0;
1534 } else {
1535 pdu.global = 0;
1536 pdu.groupid = cpu_to_le32(groupid);
1537 }
1538 len = strlen(p->name);
1539 if (len >= sizeof(pdu.name)) {
1540 len = sizeof(pdu.name) - 1;
1541 pdu.truncated = __cpu_to_le16(1);
1542 }
1543 memcpy(pdu.name, p->name, len);
1544 if (p->value) {
1545 len = strlen(p->value);
1546 if (len >= sizeof(pdu.value)) {
1547 len = sizeof(pdu.value) - 1;
1548 pdu.truncated = __cpu_to_le16(1);
1549 }
1550 memcpy(pdu.value, p->value, len);
1551 }
1552 fio_net_queue_cmd(FIO_NET_CMD_JOB_OPT, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1553 }
1554}
1555
1556static void convert_agg(struct disk_util_agg *dst, struct disk_util_agg *src)
1557{
1558 int i;
1559
1560 for (i = 0; i < 2; i++) {
1561 dst->ios[i] = cpu_to_le64(src->ios[i]);
1562 dst->merges[i] = cpu_to_le64(src->merges[i]);
1563 dst->sectors[i] = cpu_to_le64(src->sectors[i]);
1564 dst->ticks[i] = cpu_to_le64(src->ticks[i]);
1565 }
1566
1567 dst->io_ticks = cpu_to_le64(src->io_ticks);
1568 dst->time_in_queue = cpu_to_le64(src->time_in_queue);
1569 dst->slavecount = cpu_to_le32(src->slavecount);
1570 dst->max_util.u.i = cpu_to_le64(fio_double_to_uint64(src->max_util.u.f));
1571}
1572
1573static void convert_dus(struct disk_util_stat *dst, struct disk_util_stat *src)
1574{
1575 int i;
1576
1577 dst->name[FIO_DU_NAME_SZ - 1] = '\0';
1578 strncpy((char *) dst->name, (char *) src->name, FIO_DU_NAME_SZ - 1);
1579
1580 for (i = 0; i < 2; i++) {
1581 dst->s.ios[i] = cpu_to_le64(src->s.ios[i]);
1582 dst->s.merges[i] = cpu_to_le64(src->s.merges[i]);
1583 dst->s.sectors[i] = cpu_to_le64(src->s.sectors[i]);
1584 dst->s.ticks[i] = cpu_to_le64(src->s.ticks[i]);
1585 }
1586
1587 dst->s.io_ticks = cpu_to_le64(src->s.io_ticks);
1588 dst->s.time_in_queue = cpu_to_le64(src->s.time_in_queue);
1589 dst->s.msec = cpu_to_le64(src->s.msec);
1590}
1591
1592void fio_server_send_du(void)
1593{
1594 struct disk_util *du;
1595 struct flist_head *entry;
1596 struct cmd_du_pdu pdu;
1597
1598 dprint(FD_NET, "server: sending disk_util %d\n", !flist_empty(&disk_list));
1599
1600 memset(&pdu, 0, sizeof(pdu));
1601
1602 flist_for_each(entry, &disk_list) {
1603 du = flist_entry(entry, struct disk_util, list);
1604
1605 convert_dus(&pdu.dus, &du->dus);
1606 convert_agg(&pdu.agg, &du->agg);
1607
1608 fio_net_queue_cmd(FIO_NET_CMD_DU, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1609 }
1610}
1611
1612static int fio_send_iolog_gz(struct sk_entry *first, struct io_log *log)
1613{
1614 int ret = 0;
1615#ifdef CONFIG_ZLIB
1616 struct sk_entry *entry;
1617 z_stream stream;
1618 void *out_pdu;
1619
1620 /*
1621 * Dirty - since the log is potentially huge, compress it into
1622 * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
1623 * side defragment it.
1624 */
1625 out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1626
1627 stream.zalloc = Z_NULL;
1628 stream.zfree = Z_NULL;
1629 stream.opaque = Z_NULL;
1630
1631 if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK) {
1632 ret = 1;
1633 goto err;
1634 }
1635
1636 stream.next_in = (void *) log->log;
1637 stream.avail_in = log->nr_samples * log_entry_sz(log);
1638
1639 do {
1640 unsigned int this_len;
1641
1642 stream.avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1643 stream.next_out = out_pdu;
1644 ret = deflate(&stream, Z_FINISH);
1645 /* may be Z_OK, or Z_STREAM_END */
1646 if (ret < 0)
1647 goto err_zlib;
1648
1649 this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream.avail_out;
1650
1651 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
1652 NULL, SK_F_FREE | SK_F_VEC);
1653 flist_add_tail(&entry->list, &first->next);
1654 } while (stream.avail_in);
1655
1656err_zlib:
1657 deflateEnd(&stream);
1658err:
1659 free(out_pdu);
1660#endif
1661 return ret;
1662}
1663
1664int fio_send_iolog(struct thread_data *td, struct io_log *log, const char *name)
1665{
1666 struct cmd_iolog_pdu pdu;
1667 struct sk_entry *first;
1668 int i, ret = 0;
1669
1670 pdu.nr_samples = cpu_to_le64(log->nr_samples);
1671 pdu.thread_number = cpu_to_le32(td->thread_number);
1672 pdu.log_type = cpu_to_le32(log->log_type);
1673 pdu.compressed = cpu_to_le32(use_zlib);
1674
1675 strncpy((char *) pdu.name, name, FIO_NET_NAME_MAX);
1676 pdu.name[FIO_NET_NAME_MAX - 1] = '\0';
1677
1678 for (i = 0; i < log->nr_samples; i++) {
1679 struct io_sample *s = get_sample(log, i);
1680
1681 s->time = cpu_to_le64(s->time);
1682 s->val = cpu_to_le64(s->val);
1683 s->__ddir = cpu_to_le32(s->__ddir);
1684 s->bs = cpu_to_le32(s->bs);
1685
1686 if (log->log_offset) {
1687 struct io_sample_offset *so = (void *) s;
1688
1689 so->offset = cpu_to_le64(so->offset);
1690 }
1691 }
1692
1693 /*
1694 * Assemble header entry first
1695 */
1696 first = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, &pdu, sizeof(pdu), NULL, SK_F_COPY | SK_F_VEC);
1697
1698 /*
1699 * Now append actual log entries. Compress if we can, otherwise just
1700 * plain text output.
1701 */
1702 if (use_zlib)
1703 ret = fio_send_iolog_gz(first, log);
1704 else {
1705 struct sk_entry *entry;
1706
1707 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, log->log,
1708 log->nr_samples * log_entry_sz(log),
1709 NULL, SK_F_FREE | SK_F_VEC);
1710 flist_add_tail(&entry->list, &first->next);
1711 }
1712
1713 return ret;
1714}
1715
1716void fio_server_send_add_job(struct thread_data *td)
1717{
1718 struct cmd_add_job_pdu pdu;
1719
1720 memset(&pdu, 0, sizeof(pdu));
1721 pdu.thread_number = cpu_to_le32(td->thread_number);
1722 pdu.groupid = cpu_to_le32(td->groupid);
1723 convert_thread_options_to_net(&pdu.top, &td->o);
1724
1725 fio_net_queue_cmd(FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1726}
1727
1728void fio_server_send_start(struct thread_data *td)
1729{
1730 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
1731
1732 assert(sk_out->sk != -1);
1733
1734 fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, 0, SK_F_SIMPLE);
1735}
1736
1737int fio_server_get_verify_state(const char *name, int threadnumber,
1738 void **datap, int *version)
1739{
1740 struct thread_io_list *s;
1741 struct cmd_sendfile out;
1742 struct cmd_reply *rep;
1743 uint64_t tag;
1744 void *data;
1745
1746 dprint(FD_NET, "server: request verify state\n");
1747
1748 rep = smalloc(sizeof(*rep));
1749 if (!rep) {
1750 log_err("fio: smalloc pool too small\n");
1751 return 1;
1752 }
1753
1754 __fio_mutex_init(&rep->lock, FIO_MUTEX_LOCKED);
1755 rep->data = NULL;
1756 rep->error = 0;
1757
1758 verify_state_gen_name((char *) out.path, sizeof(out.path), name, me,
1759 threadnumber);
1760 tag = (uint64_t) (uintptr_t) rep;
1761 fio_net_queue_cmd(FIO_NET_CMD_SENDFILE, &out, sizeof(out), &tag, SK_F_COPY);
1762
1763 /*
1764 * Wait for the backend to receive the reply
1765 */
1766 if (fio_mutex_down_timeout(&rep->lock, 10000)) {
1767 log_err("fio: timed out waiting for reply\n");
1768 goto fail;
1769 }
1770
1771 if (rep->error) {
1772 log_err("fio: failure on receiving state file: %s\n", strerror(rep->error));
1773fail:
1774 *datap = NULL;
1775 sfree(rep);
1776 fio_net_queue_quit();
1777 return 1;
1778 }
1779
1780 /*
1781 * The format is verify_state_hdr, then thread_io_list. Verify
1782 * the header, and the thread_io_list checksum
1783 */
1784 s = rep->data + sizeof(struct verify_state_hdr);
1785 if (verify_state_hdr(rep->data, s, version))
1786 goto fail;
1787
1788 /*
1789 * Don't need the header from now, copy just the thread_io_list
1790 */
1791 rep->size -= sizeof(struct verify_state_hdr);
1792 data = malloc(rep->size);
1793 memcpy(data, s, rep->size);
1794 *datap = data;
1795
1796 sfree(rep->data);
1797 __fio_mutex_remove(&rep->lock);
1798 sfree(rep);
1799 return 0;
1800}
1801
1802static int fio_init_server_ip(void)
1803{
1804 struct sockaddr *addr;
1805 socklen_t socklen;
1806 char buf[80];
1807 const char *str;
1808 int sk, opt;
1809
1810 if (use_ipv6)
1811 sk = socket(AF_INET6, SOCK_STREAM, 0);
1812 else
1813 sk = socket(AF_INET, SOCK_STREAM, 0);
1814
1815 if (sk < 0) {
1816 log_err("fio: socket: %s\n", strerror(errno));
1817 return -1;
1818 }
1819
1820 opt = 1;
1821 if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, (void *)&opt, sizeof(opt)) < 0) {
1822 log_err("fio: setsockopt(REUSEADDR): %s\n", strerror(errno));
1823 close(sk);
1824 return -1;
1825 }
1826#ifdef SO_REUSEPORT
1827 if (setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
1828 log_err("fio: setsockopt(REUSEPORT): %s\n", strerror(errno));
1829 close(sk);
1830 return -1;
1831 }
1832#endif
1833
1834 if (use_ipv6) {
1835 const void *src = &saddr_in6.sin6_addr;
1836
1837 addr = (struct sockaddr *) &saddr_in6;
1838 socklen = sizeof(saddr_in6);
1839 saddr_in6.sin6_family = AF_INET6;
1840 str = inet_ntop(AF_INET6, src, buf, sizeof(buf));
1841 } else {
1842 const void *src = &saddr_in.sin_addr;
1843
1844 addr = (struct sockaddr *) &saddr_in;
1845 socklen = sizeof(saddr_in);
1846 saddr_in.sin_family = AF_INET;
1847 str = inet_ntop(AF_INET, src, buf, sizeof(buf));
1848 }
1849
1850 if (bind(sk, addr, socklen) < 0) {
1851 log_err("fio: bind: %s\n", strerror(errno));
1852 log_info("fio: failed with IPv%c %s\n", use_ipv6 ? '6' : '4', str);
1853 close(sk);
1854 return -1;
1855 }
1856
1857 return sk;
1858}
1859
1860static int fio_init_server_sock(void)
1861{
1862 struct sockaddr_un addr;
1863 socklen_t len;
1864 mode_t mode;
1865 int sk;
1866
1867 sk = socket(AF_UNIX, SOCK_STREAM, 0);
1868 if (sk < 0) {
1869 log_err("fio: socket: %s\n", strerror(errno));
1870 return -1;
1871 }
1872
1873 mode = umask(000);
1874
1875 memset(&addr, 0, sizeof(addr));
1876 addr.sun_family = AF_UNIX;
1877 strncpy(addr.sun_path, bind_sock, sizeof(addr.sun_path) - 1);
1878
1879 len = sizeof(addr.sun_family) + strlen(bind_sock) + 1;
1880
1881 if (bind(sk, (struct sockaddr *) &addr, len) < 0) {
1882 log_err("fio: bind: %s\n", strerror(errno));
1883 close(sk);
1884 return -1;
1885 }
1886
1887 umask(mode);
1888 return sk;
1889}
1890
1891static int fio_init_server_connection(void)
1892{
1893 char bind_str[128];
1894 int sk;
1895
1896 dprint(FD_NET, "starting server\n");
1897
1898 if (!bind_sock)
1899 sk = fio_init_server_ip();
1900 else
1901 sk = fio_init_server_sock();
1902
1903 if (sk < 0)
1904 return sk;
1905
1906 memset(bind_str, 0, sizeof(bind_str));
1907
1908 if (!bind_sock) {
1909 char *p, port[16];
1910 const void *src;
1911 int af;
1912
1913 if (use_ipv6) {
1914 af = AF_INET6;
1915 src = &saddr_in6.sin6_addr;
1916 } else {
1917 af = AF_INET;
1918 src = &saddr_in.sin_addr;
1919 }
1920
1921 p = (char *) inet_ntop(af, src, bind_str, sizeof(bind_str));
1922
1923 sprintf(port, ",%u", fio_net_port);
1924 if (p)
1925 strcat(p, port);
1926 else
1927 strncpy(bind_str, port, sizeof(bind_str) - 1);
1928 } else
1929 strncpy(bind_str, bind_sock, sizeof(bind_str) - 1);
1930
1931 log_info("fio: server listening on %s\n", bind_str);
1932
1933 if (listen(sk, 4) < 0) {
1934 log_err("fio: listen: %s\n", strerror(errno));
1935 close(sk);
1936 return -1;
1937 }
1938
1939 return sk;
1940}
1941
1942int fio_server_parse_host(const char *host, int ipv6, struct in_addr *inp,
1943 struct in6_addr *inp6)
1944
1945{
1946 int ret = 0;
1947
1948 if (ipv6)
1949 ret = inet_pton(AF_INET6, host, inp6);
1950 else
1951 ret = inet_pton(AF_INET, host, inp);
1952
1953 if (ret != 1) {
1954 struct addrinfo hints, *res;
1955
1956 memset(&hints, 0, sizeof(hints));
1957 hints.ai_family = ipv6 ? AF_INET6 : AF_INET;
1958 hints.ai_socktype = SOCK_STREAM;
1959
1960 ret = getaddrinfo(host, NULL, &hints, &res);
1961 if (ret) {
1962 log_err("fio: failed to resolve <%s> (%s)\n", host,
1963 gai_strerror(ret));
1964 return 1;
1965 }
1966
1967 if (ipv6)
1968 memcpy(inp6, &((struct sockaddr_in6 *) res->ai_addr)->sin6_addr, sizeof(*inp6));
1969 else
1970 memcpy(inp, &((struct sockaddr_in *) res->ai_addr)->sin_addr, sizeof(*inp));
1971
1972 ret = 1;
1973 freeaddrinfo(res);
1974 }
1975
1976 return !(ret == 1);
1977}
1978
1979/*
1980 * Parse a host/ip/port string. Reads from 'str'.
1981 *
1982 * Outputs:
1983 *
1984 * For IPv4:
1985 * *ptr is the host, *port is the port, inp is the destination.
1986 * For IPv6:
1987 * *ptr is the host, *port is the port, inp6 is the dest, and *ipv6 is 1.
1988 * For local domain sockets:
1989 * *ptr is the filename, *is_sock is 1.
1990 */
1991int fio_server_parse_string(const char *str, char **ptr, int *is_sock,
1992 int *port, struct in_addr *inp,
1993 struct in6_addr *inp6, int *ipv6)
1994{
1995 const char *host = str;
1996 char *portp;
1997 int lport = 0;
1998
1999 *ptr = NULL;
2000 *is_sock = 0;
2001 *port = fio_net_port;
2002 *ipv6 = 0;
2003
2004 if (!strncmp(str, "sock:", 5)) {
2005 *ptr = strdup(str + 5);
2006 *is_sock = 1;
2007
2008 return 0;
2009 }
2010
2011 /*
2012 * Is it ip:<ip or host>:port
2013 */
2014 if (!strncmp(host, "ip:", 3))
2015 host += 3;
2016 else if (!strncmp(host, "ip4:", 4))
2017 host += 4;
2018 else if (!strncmp(host, "ip6:", 4)) {
2019 host += 4;
2020 *ipv6 = 1;
2021 } else if (host[0] == ':') {
2022 /* String is :port */
2023 host++;
2024 lport = atoi(host);
2025 if (!lport || lport > 65535) {
2026 log_err("fio: bad server port %u\n", lport);
2027 return 1;
2028 }
2029 /* no hostname given, we are done */
2030 *port = lport;
2031 return 0;
2032 }
2033
2034 /*
2035 * If no port seen yet, check if there's a last ',' at the end
2036 */
2037 if (!lport) {
2038 portp = strchr(host, ',');
2039 if (portp) {
2040 *portp = '\0';
2041 portp++;
2042 lport = atoi(portp);
2043 if (!lport || lport > 65535) {
2044 log_err("fio: bad server port %u\n", lport);
2045 return 1;
2046 }
2047 }
2048 }
2049
2050 if (lport)
2051 *port = lport;
2052
2053 if (!strlen(host))
2054 return 0;
2055
2056 *ptr = strdup(host);
2057
2058 if (fio_server_parse_host(*ptr, *ipv6, inp, inp6)) {
2059 free(*ptr);
2060 *ptr = NULL;
2061 return 1;
2062 }
2063
2064 if (*port == 0)
2065 *port = fio_net_port;
2066
2067 return 0;
2068}
2069
2070/*
2071 * Server arg should be one of:
2072 *
2073 * sock:/path/to/socket
2074 * ip:1.2.3.4
2075 * 1.2.3.4
2076 *
2077 * Where sock uses unix domain sockets, and ip binds the server to
2078 * a specific interface. If no arguments are given to the server, it
2079 * uses IP and binds to 0.0.0.0.
2080 *
2081 */
2082static int fio_handle_server_arg(void)
2083{
2084 int port = fio_net_port;
2085 int is_sock, ret = 0;
2086
2087 saddr_in.sin_addr.s_addr = htonl(INADDR_ANY);
2088
2089 if (!fio_server_arg)
2090 goto out;
2091
2092 ret = fio_server_parse_string(fio_server_arg, &bind_sock, &is_sock,
2093 &port, &saddr_in.sin_addr,
2094 &saddr_in6.sin6_addr, &use_ipv6);
2095
2096 if (!is_sock && bind_sock) {
2097 free(bind_sock);
2098 bind_sock = NULL;
2099 }
2100
2101out:
2102 fio_net_port = port;
2103 saddr_in.sin_port = htons(port);
2104 saddr_in6.sin6_port = htons(port);
2105 return ret;
2106}
2107
2108static void sig_int(int sig)
2109{
2110 if (bind_sock)
2111 unlink(bind_sock);
2112}
2113
2114static void set_sig_handlers(void)
2115{
2116 struct sigaction act;
2117
2118 memset(&act, 0, sizeof(act));
2119 act.sa_handler = sig_int;
2120 act.sa_flags = SA_RESTART;
2121 sigaction(SIGINT, &act, NULL);
2122}
2123
2124static int fio_server(void)
2125{
2126 int sk, ret;
2127
2128 if (pthread_key_create(&sk_out_key, NULL)) {
2129 log_err("fio: can't create sk_out backend key\n");
2130 return -1;
2131 }
2132
2133 pthread_setspecific(sk_out_key, NULL);
2134
2135 dprint(FD_NET, "starting server\n");
2136
2137 if (fio_handle_server_arg())
2138 return -1;
2139
2140 sk = fio_init_server_connection();
2141 if (sk < 0)
2142 return -1;
2143
2144 set_sig_handlers();
2145
2146 ret = accept_loop(sk);
2147
2148 close(sk);
2149
2150 if (fio_server_arg) {
2151 free(fio_server_arg);
2152 fio_server_arg = NULL;
2153 }
2154 if (bind_sock)
2155 free(bind_sock);
2156
2157 return ret;
2158}
2159
2160void fio_server_got_signal(int signal)
2161{
2162 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
2163
2164 assert(sk_out);
2165
2166 if (signal == SIGPIPE)
2167 sk_out->sk = -1;
2168 else {
2169 log_info("\nfio: terminating on signal %d\n", signal);
2170 exit_backend = 1;
2171 }
2172}
2173
2174static int check_existing_pidfile(const char *pidfile)
2175{
2176 struct stat sb;
2177 char buf[16];
2178 pid_t pid;
2179 FILE *f;
2180
2181 if (stat(pidfile, &sb))
2182 return 0;
2183
2184 f = fopen(pidfile, "r");
2185 if (!f)
2186 return 0;
2187
2188 if (fread(buf, sb.st_size, 1, f) <= 0) {
2189 fclose(f);
2190 return 1;
2191 }
2192 fclose(f);
2193
2194 pid = atoi(buf);
2195 if (kill(pid, SIGCONT) < 0)
2196 return errno != ESRCH;
2197
2198 return 1;
2199}
2200
2201static int write_pid(pid_t pid, const char *pidfile)
2202{
2203 FILE *fpid;
2204
2205 fpid = fopen(pidfile, "w");
2206 if (!fpid) {
2207 log_err("fio: failed opening pid file %s\n", pidfile);
2208 return 1;
2209 }
2210
2211 fprintf(fpid, "%u\n", (unsigned int) pid);
2212 fclose(fpid);
2213 return 0;
2214}
2215
2216/*
2217 * If pidfile is specified, background us.
2218 */
2219int fio_start_server(char *pidfile)
2220{
2221 pid_t pid;
2222 int ret;
2223
2224#if defined(WIN32)
2225 WSADATA wsd;
2226 WSAStartup(MAKEWORD(2, 2), &wsd);
2227#endif
2228
2229 if (!pidfile)
2230 return fio_server();
2231
2232 if (check_existing_pidfile(pidfile)) {
2233 log_err("fio: pidfile %s exists and server appears alive\n",
2234 pidfile);
2235 free(pidfile);
2236 return -1;
2237 }
2238
2239 pid = fork();
2240 if (pid < 0) {
2241 log_err("fio: failed server fork: %s", strerror(errno));
2242 free(pidfile);
2243 return -1;
2244 } else if (pid) {
2245 ret = write_pid(pid, pidfile);
2246 free(pidfile);
2247 _exit(ret);
2248 }
2249
2250 setsid();
2251 openlog("fio", LOG_NDELAY|LOG_NOWAIT|LOG_PID, LOG_USER);
2252 log_syslog = 1;
2253 close(STDIN_FILENO);
2254 close(STDOUT_FILENO);
2255 close(STDERR_FILENO);
2256 f_out = NULL;
2257 f_err = NULL;
2258
2259 ret = fio_server();
2260
2261 closelog();
2262 unlink(pidfile);
2263 free(pidfile);
2264 return ret;
2265}
2266
2267void fio_server_set_arg(const char *arg)
2268{
2269 fio_server_arg = strdup(arg);
2270}