Bool conversions
[fio.git] / server.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <errno.h>
5#include <poll.h>
6#include <sys/types.h>
7#include <sys/wait.h>
8#include <sys/socket.h>
9#include <sys/stat.h>
10#include <sys/un.h>
11#include <sys/uio.h>
12#include <netinet/in.h>
13#include <arpa/inet.h>
14#include <netdb.h>
15#include <syslog.h>
16#include <signal.h>
17#ifdef CONFIG_ZLIB
18#include <zlib.h>
19#endif
20
21#include "fio.h"
22#include "options.h"
23#include "server.h"
24#include "crc/crc16.h"
25#include "lib/ieee754.h"
26#include "verify-state.h"
27#include "smalloc.h"
28
29int fio_net_port = FIO_NET_PORT;
30
31bool exit_backend = false;
32
33enum {
34 SK_F_FREE = 1,
35 SK_F_COPY = 2,
36 SK_F_SIMPLE = 4,
37 SK_F_VEC = 8,
38 SK_F_INLINE = 16,
39};
40
41struct sk_entry {
42 struct flist_head list; /* link on sk_out->list */
43 int flags; /* SK_F_* */
44 int opcode; /* Actual command fields */
45 void *buf;
46 off_t size;
47 uint64_t tag;
48 struct flist_head next; /* Other sk_entry's, if linked command */
49};
50
51static char *fio_server_arg;
52static char *bind_sock;
53static struct sockaddr_in saddr_in;
54static struct sockaddr_in6 saddr_in6;
55static int use_ipv6;
56#ifdef CONFIG_ZLIB
57static unsigned int has_zlib = 1;
58#else
59static unsigned int has_zlib = 0;
60#endif
61static unsigned int use_zlib;
62static char me[128];
63
64static pthread_key_t sk_out_key;
65
66struct fio_fork_item {
67 struct flist_head list;
68 int exitval;
69 int signal;
70 int exited;
71 pid_t pid;
72};
73
74struct cmd_reply {
75 struct fio_sem lock;
76 void *data;
77 size_t size;
78 int error;
79};
80
81static const char *fio_server_ops[FIO_NET_CMD_NR] = {
82 "",
83 "QUIT",
84 "EXIT",
85 "JOB",
86 "JOBLINE",
87 "TEXT",
88 "TS",
89 "GS",
90 "SEND_ETA",
91 "ETA",
92 "PROBE",
93 "START",
94 "STOP",
95 "DISK_UTIL",
96 "SERVER_START",
97 "ADD_JOB",
98 "RUN",
99 "IOLOG",
100 "UPDATE_JOB",
101 "LOAD_FILE",
102 "VTRIGGER",
103 "SENDFILE",
104 "JOB_OPT",
105};
106
107static void sk_lock(struct sk_out *sk_out)
108{
109 fio_sem_down(&sk_out->lock);
110}
111
112static void sk_unlock(struct sk_out *sk_out)
113{
114 fio_sem_up(&sk_out->lock);
115}
116
117void sk_out_assign(struct sk_out *sk_out)
118{
119 if (!sk_out)
120 return;
121
122 sk_lock(sk_out);
123 sk_out->refs++;
124 sk_unlock(sk_out);
125 pthread_setspecific(sk_out_key, sk_out);
126}
127
128static void sk_out_free(struct sk_out *sk_out)
129{
130 __fio_sem_remove(&sk_out->lock);
131 __fio_sem_remove(&sk_out->wait);
132 __fio_sem_remove(&sk_out->xmit);
133 sfree(sk_out);
134}
135
136static int __sk_out_drop(struct sk_out *sk_out)
137{
138 if (sk_out) {
139 int refs;
140
141 sk_lock(sk_out);
142 assert(sk_out->refs != 0);
143 refs = --sk_out->refs;
144 sk_unlock(sk_out);
145
146 if (!refs) {
147 sk_out_free(sk_out);
148 pthread_setspecific(sk_out_key, NULL);
149 return 0;
150 }
151 }
152
153 return 1;
154}
155
156void sk_out_drop(void)
157{
158 struct sk_out *sk_out;
159
160 sk_out = pthread_getspecific(sk_out_key);
161 __sk_out_drop(sk_out);
162}
163
164static void __fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
165 uint32_t pdu_len, uint64_t tag)
166{
167 memset(cmd, 0, sizeof(*cmd));
168
169 cmd->version = __cpu_to_le16(FIO_SERVER_VER);
170 cmd->opcode = cpu_to_le16(opcode);
171 cmd->tag = cpu_to_le64(tag);
172 cmd->pdu_len = cpu_to_le32(pdu_len);
173}
174
175
176static void fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
177 const void *pdu, uint32_t pdu_len, uint64_t tag)
178{
179 __fio_init_net_cmd(cmd, opcode, pdu_len, tag);
180
181 if (pdu)
182 memcpy(&cmd->payload, pdu, pdu_len);
183}
184
185const char *fio_server_op(unsigned int op)
186{
187 static char buf[32];
188
189 if (op < FIO_NET_CMD_NR)
190 return fio_server_ops[op];
191
192 sprintf(buf, "UNKNOWN/%d", op);
193 return buf;
194}
195
196static ssize_t iov_total_len(const struct iovec *iov, int count)
197{
198 ssize_t ret = 0;
199
200 while (count--) {
201 ret += iov->iov_len;
202 iov++;
203 }
204
205 return ret;
206}
207
208static int fio_sendv_data(int sk, struct iovec *iov, int count)
209{
210 ssize_t total_len = iov_total_len(iov, count);
211 ssize_t ret;
212
213 do {
214 ret = writev(sk, iov, count);
215 if (ret > 0) {
216 total_len -= ret;
217 if (!total_len)
218 break;
219
220 while (ret) {
221 if (ret >= iov->iov_len) {
222 ret -= iov->iov_len;
223 iov++;
224 continue;
225 }
226 iov->iov_base += ret;
227 iov->iov_len -= ret;
228 ret = 0;
229 }
230 } else if (!ret)
231 break;
232 else if (errno == EAGAIN || errno == EINTR)
233 continue;
234 else
235 break;
236 } while (!exit_backend);
237
238 if (!total_len)
239 return 0;
240
241 return 1;
242}
243
244static int fio_send_data(int sk, const void *p, unsigned int len)
245{
246 struct iovec iov = { .iov_base = (void *) p, .iov_len = len };
247
248 assert(len <= sizeof(struct fio_net_cmd) + FIO_SERVER_MAX_FRAGMENT_PDU);
249
250 return fio_sendv_data(sk, &iov, 1);
251}
252
253static int fio_recv_data(int sk, void *buf, unsigned int len, bool wait)
254{
255 int flags;
256 char *p = buf;
257
258 if (wait)
259 flags = MSG_WAITALL;
260 else
261 flags = OS_MSG_DONTWAIT;
262
263 do {
264 int ret = recv(sk, p, len, flags);
265
266 if (ret > 0) {
267 len -= ret;
268 if (!len)
269 break;
270 p += ret;
271 continue;
272 } else if (!ret)
273 break;
274 else if (errno == EAGAIN || errno == EINTR) {
275 if (wait)
276 continue;
277 break;
278 } else
279 break;
280 } while (!exit_backend);
281
282 if (!len)
283 return 0;
284
285 return -1;
286}
287
288static int verify_convert_cmd(struct fio_net_cmd *cmd)
289{
290 uint16_t crc;
291
292 cmd->cmd_crc16 = le16_to_cpu(cmd->cmd_crc16);
293 cmd->pdu_crc16 = le16_to_cpu(cmd->pdu_crc16);
294
295 crc = fio_crc16(cmd, FIO_NET_CMD_CRC_SZ);
296 if (crc != cmd->cmd_crc16) {
297 log_err("fio: server bad crc on command (got %x, wanted %x)\n",
298 cmd->cmd_crc16, crc);
299 fprintf(f_err, "fio: server bad crc on command (got %x, wanted %x)\n",
300 cmd->cmd_crc16, crc);
301 return 1;
302 }
303
304 cmd->version = le16_to_cpu(cmd->version);
305 cmd->opcode = le16_to_cpu(cmd->opcode);
306 cmd->flags = le32_to_cpu(cmd->flags);
307 cmd->tag = le64_to_cpu(cmd->tag);
308 cmd->pdu_len = le32_to_cpu(cmd->pdu_len);
309
310 switch (cmd->version) {
311 case FIO_SERVER_VER:
312 break;
313 default:
314 log_err("fio: bad server cmd version %d\n", cmd->version);
315 fprintf(f_err, "fio: client/server version mismatch (%d != %d)\n",
316 cmd->version, FIO_SERVER_VER);
317 return 1;
318 }
319
320 if (cmd->pdu_len > FIO_SERVER_MAX_FRAGMENT_PDU) {
321 log_err("fio: command payload too large: %u\n", cmd->pdu_len);
322 return 1;
323 }
324
325 return 0;
326}
327
328/*
329 * Read (and defragment, if necessary) incoming commands
330 */
331struct fio_net_cmd *fio_net_recv_cmd(int sk, bool wait)
332{
333 struct fio_net_cmd cmd, *tmp, *cmdret = NULL;
334 size_t cmd_size = 0, pdu_offset = 0;
335 uint16_t crc;
336 int ret, first = 1;
337 void *pdu = NULL;
338
339 do {
340 ret = fio_recv_data(sk, &cmd, sizeof(cmd), wait);
341 if (ret)
342 break;
343
344 /* We have a command, verify it and swap if need be */
345 ret = verify_convert_cmd(&cmd);
346 if (ret)
347 break;
348
349 if (first) {
350 /* if this is text, add room for \0 at the end */
351 cmd_size = sizeof(cmd) + cmd.pdu_len + 1;
352 assert(!cmdret);
353 } else
354 cmd_size += cmd.pdu_len;
355
356 if (cmd_size / 1024 > FIO_SERVER_MAX_CMD_MB * 1024) {
357 log_err("fio: cmd+pdu too large (%llu)\n", (unsigned long long) cmd_size);
358 ret = 1;
359 break;
360 }
361
362 tmp = realloc(cmdret, cmd_size);
363 if (!tmp) {
364 log_err("fio: server failed allocating cmd\n");
365 ret = 1;
366 break;
367 }
368 cmdret = tmp;
369
370 if (first)
371 memcpy(cmdret, &cmd, sizeof(cmd));
372 else if (cmdret->opcode != cmd.opcode) {
373 log_err("fio: fragment opcode mismatch (%d != %d)\n",
374 cmdret->opcode, cmd.opcode);
375 ret = 1;
376 break;
377 }
378
379 if (!cmd.pdu_len)
380 break;
381
382 /* There's payload, get it */
383 pdu = (char *) cmdret->payload + pdu_offset;
384 ret = fio_recv_data(sk, pdu, cmd.pdu_len, wait);
385 if (ret)
386 break;
387
388 /* Verify payload crc */
389 crc = fio_crc16(pdu, cmd.pdu_len);
390 if (crc != cmd.pdu_crc16) {
391 log_err("fio: server bad crc on payload ");
392 log_err("(got %x, wanted %x)\n", cmd.pdu_crc16, crc);
393 ret = 1;
394 break;
395 }
396
397 pdu_offset += cmd.pdu_len;
398 if (!first)
399 cmdret->pdu_len += cmd.pdu_len;
400 first = 0;
401 } while (cmd.flags & FIO_NET_CMD_F_MORE);
402
403 if (ret) {
404 free(cmdret);
405 cmdret = NULL;
406 } else if (cmdret) {
407 /* zero-terminate text input */
408 if (cmdret->pdu_len) {
409 if (cmdret->opcode == FIO_NET_CMD_TEXT) {
410 struct cmd_text_pdu *__pdu = (struct cmd_text_pdu *) cmdret->payload;
411 char *buf = (char *) __pdu->buf;
412
413 buf[__pdu->buf_len] = '\0';
414 } else if (cmdret->opcode == FIO_NET_CMD_JOB) {
415 struct cmd_job_pdu *__pdu = (struct cmd_job_pdu *) cmdret->payload;
416 char *buf = (char *) __pdu->buf;
417 int len = le32_to_cpu(__pdu->buf_len);
418
419 buf[len] = '\0';
420 }
421 }
422
423 /* frag flag is internal */
424 cmdret->flags &= ~FIO_NET_CMD_F_MORE;
425 }
426
427 return cmdret;
428}
429
430static void add_reply(uint64_t tag, struct flist_head *list)
431{
432 struct fio_net_cmd_reply *reply;
433
434 reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
435 flist_add_tail(&reply->list, list);
436}
437
438static uint64_t alloc_reply(uint64_t tag, uint16_t opcode)
439{
440 struct fio_net_cmd_reply *reply;
441
442 reply = calloc(1, sizeof(*reply));
443 INIT_FLIST_HEAD(&reply->list);
444 fio_gettime(&reply->ts, NULL);
445 reply->saved_tag = tag;
446 reply->opcode = opcode;
447
448 return (uintptr_t) reply;
449}
450
451static void free_reply(uint64_t tag)
452{
453 struct fio_net_cmd_reply *reply;
454
455 reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
456 free(reply);
457}
458
459static void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu)
460{
461 uint32_t pdu_len;
462
463 cmd->cmd_crc16 = __cpu_to_le16(fio_crc16(cmd, FIO_NET_CMD_CRC_SZ));
464
465 pdu_len = le32_to_cpu(cmd->pdu_len);
466 cmd->pdu_crc16 = __cpu_to_le16(fio_crc16(pdu, pdu_len));
467}
468
469static void fio_net_cmd_crc(struct fio_net_cmd *cmd)
470{
471 fio_net_cmd_crc_pdu(cmd, cmd->payload);
472}
473
474int fio_net_send_cmd(int fd, uint16_t opcode, const void *buf, off_t size,
475 uint64_t *tagptr, struct flist_head *list)
476{
477 struct fio_net_cmd *cmd = NULL;
478 size_t this_len, cur_len = 0;
479 uint64_t tag;
480 int ret;
481
482 if (list) {
483 assert(tagptr);
484 tag = *tagptr = alloc_reply(*tagptr, opcode);
485 } else
486 tag = tagptr ? *tagptr : 0;
487
488 do {
489 this_len = size;
490 if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
491 this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
492
493 if (!cmd || cur_len < sizeof(*cmd) + this_len) {
494 if (cmd)
495 free(cmd);
496
497 cur_len = sizeof(*cmd) + this_len;
498 cmd = malloc(cur_len);
499 }
500
501 fio_init_net_cmd(cmd, opcode, buf, this_len, tag);
502
503 if (this_len < size)
504 cmd->flags = __cpu_to_le32(FIO_NET_CMD_F_MORE);
505
506 fio_net_cmd_crc(cmd);
507
508 ret = fio_send_data(fd, cmd, sizeof(*cmd) + this_len);
509 size -= this_len;
510 buf += this_len;
511 } while (!ret && size);
512
513 if (list) {
514 if (ret)
515 free_reply(tag);
516 else
517 add_reply(tag, list);
518 }
519
520 if (cmd)
521 free(cmd);
522
523 return ret;
524}
525
526static struct sk_entry *fio_net_prep_cmd(uint16_t opcode, void *buf,
527 size_t size, uint64_t *tagptr,
528 int flags)
529{
530 struct sk_entry *entry;
531
532 entry = smalloc(sizeof(*entry));
533 if (!entry)
534 return NULL;
535
536 INIT_FLIST_HEAD(&entry->next);
537 entry->opcode = opcode;
538 if (flags & SK_F_COPY) {
539 entry->buf = smalloc(size);
540 memcpy(entry->buf, buf, size);
541 } else
542 entry->buf = buf;
543
544 entry->size = size;
545 if (tagptr)
546 entry->tag = *tagptr;
547 else
548 entry->tag = 0;
549 entry->flags = flags;
550 return entry;
551}
552
553static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry);
554
555static void fio_net_queue_entry(struct sk_entry *entry)
556{
557 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
558
559 if (entry->flags & SK_F_INLINE)
560 handle_sk_entry(sk_out, entry);
561 else {
562 sk_lock(sk_out);
563 flist_add_tail(&entry->list, &sk_out->list);
564 sk_unlock(sk_out);
565
566 fio_sem_up(&sk_out->wait);
567 }
568}
569
570static int fio_net_queue_cmd(uint16_t opcode, void *buf, off_t size,
571 uint64_t *tagptr, int flags)
572{
573 struct sk_entry *entry;
574
575 entry = fio_net_prep_cmd(opcode, buf, size, tagptr, flags);
576 if (entry) {
577 fio_net_queue_entry(entry);
578 return 0;
579 }
580
581 return 1;
582}
583
584static int fio_net_send_simple_stack_cmd(int sk, uint16_t opcode, uint64_t tag)
585{
586 struct fio_net_cmd cmd;
587
588 fio_init_net_cmd(&cmd, opcode, NULL, 0, tag);
589 fio_net_cmd_crc(&cmd);
590
591 return fio_send_data(sk, &cmd, sizeof(cmd));
592}
593
594/*
595 * If 'list' is non-NULL, then allocate and store the sent command for
596 * later verification.
597 */
598int fio_net_send_simple_cmd(int sk, uint16_t opcode, uint64_t tag,
599 struct flist_head *list)
600{
601 int ret;
602
603 if (list)
604 tag = alloc_reply(tag, opcode);
605
606 ret = fio_net_send_simple_stack_cmd(sk, opcode, tag);
607 if (ret) {
608 if (list)
609 free_reply(tag);
610
611 return ret;
612 }
613
614 if (list)
615 add_reply(tag, list);
616
617 return 0;
618}
619
620static int fio_net_queue_quit(void)
621{
622 dprint(FD_NET, "server: sending quit\n");
623
624 return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, NULL, SK_F_SIMPLE);
625}
626
627int fio_net_send_quit(int sk)
628{
629 dprint(FD_NET, "server: sending quit\n");
630
631 return fio_net_send_simple_cmd(sk, FIO_NET_CMD_QUIT, 0, NULL);
632}
633
634static int fio_net_send_ack(struct fio_net_cmd *cmd, int error, int signal)
635{
636 struct cmd_end_pdu epdu;
637 uint64_t tag = 0;
638
639 if (cmd)
640 tag = cmd->tag;
641
642 epdu.error = __cpu_to_le32(error);
643 epdu.signal = __cpu_to_le32(signal);
644 return fio_net_queue_cmd(FIO_NET_CMD_STOP, &epdu, sizeof(epdu), &tag, SK_F_COPY);
645}
646
647static int fio_net_queue_stop(int error, int signal)
648{
649 dprint(FD_NET, "server: sending stop (%d, %d)\n", error, signal);
650 return fio_net_send_ack(NULL, error, signal);
651}
652
653static void fio_server_add_fork_item(pid_t pid, struct flist_head *list)
654{
655 struct fio_fork_item *ffi;
656
657 ffi = malloc(sizeof(*ffi));
658 ffi->exitval = 0;
659 ffi->signal = 0;
660 ffi->exited = 0;
661 ffi->pid = pid;
662 flist_add_tail(&ffi->list, list);
663}
664
665static void fio_server_add_conn_pid(struct flist_head *conn_list, pid_t pid)
666{
667 dprint(FD_NET, "server: forked off connection job (pid=%u)\n", (int) pid);
668 fio_server_add_fork_item(pid, conn_list);
669}
670
671static void fio_server_add_job_pid(struct flist_head *job_list, pid_t pid)
672{
673 dprint(FD_NET, "server: forked off job job (pid=%u)\n", (int) pid);
674 fio_server_add_fork_item(pid, job_list);
675}
676
677static void fio_server_check_fork_item(struct fio_fork_item *ffi)
678{
679 int ret, status;
680
681 ret = waitpid(ffi->pid, &status, WNOHANG);
682 if (ret < 0) {
683 if (errno == ECHILD) {
684 log_err("fio: connection pid %u disappeared\n", (int) ffi->pid);
685 ffi->exited = 1;
686 } else
687 log_err("fio: waitpid: %s\n", strerror(errno));
688 } else if (ret == ffi->pid) {
689 if (WIFSIGNALED(status)) {
690 ffi->signal = WTERMSIG(status);
691 ffi->exited = 1;
692 }
693 if (WIFEXITED(status)) {
694 if (WEXITSTATUS(status))
695 ffi->exitval = WEXITSTATUS(status);
696 ffi->exited = 1;
697 }
698 }
699}
700
701static void fio_server_fork_item_done(struct fio_fork_item *ffi, bool stop)
702{
703 dprint(FD_NET, "pid %u exited, sig=%u, exitval=%d\n", (int) ffi->pid, ffi->signal, ffi->exitval);
704
705 /*
706 * Fold STOP and QUIT...
707 */
708 if (stop) {
709 fio_net_queue_stop(ffi->exitval, ffi->signal);
710 fio_net_queue_quit();
711 }
712
713 flist_del(&ffi->list);
714 free(ffi);
715}
716
717static void fio_server_check_fork_items(struct flist_head *list, bool stop)
718{
719 struct flist_head *entry, *tmp;
720 struct fio_fork_item *ffi;
721
722 flist_for_each_safe(entry, tmp, list) {
723 ffi = flist_entry(entry, struct fio_fork_item, list);
724
725 fio_server_check_fork_item(ffi);
726
727 if (ffi->exited)
728 fio_server_fork_item_done(ffi, stop);
729 }
730}
731
732static void fio_server_check_jobs(struct flist_head *job_list)
733{
734 fio_server_check_fork_items(job_list, true);
735}
736
737static void fio_server_check_conns(struct flist_head *conn_list)
738{
739 fio_server_check_fork_items(conn_list, false);
740}
741
742static int handle_load_file_cmd(struct fio_net_cmd *cmd)
743{
744 struct cmd_load_file_pdu *pdu = (struct cmd_load_file_pdu *) cmd->payload;
745 void *file_name = pdu->file;
746 struct cmd_start_pdu spdu;
747
748 dprint(FD_NET, "server: loading local file %s\n", (char *) file_name);
749
750 pdu->name_len = le16_to_cpu(pdu->name_len);
751 pdu->client_type = le16_to_cpu(pdu->client_type);
752
753 if (parse_jobs_ini(file_name, 0, 0, pdu->client_type)) {
754 fio_net_queue_quit();
755 return -1;
756 }
757
758 spdu.jobs = cpu_to_le32(thread_number);
759 spdu.stat_outputs = cpu_to_le32(stat_number);
760 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
761 return 0;
762}
763
764static int handle_run_cmd(struct sk_out *sk_out, struct flist_head *job_list,
765 struct fio_net_cmd *cmd)
766{
767 pid_t pid;
768 int ret;
769
770 sk_out_assign(sk_out);
771
772 fio_time_init();
773 set_genesis_time();
774
775 pid = fork();
776 if (pid) {
777 fio_server_add_job_pid(job_list, pid);
778 return 0;
779 }
780
781 ret = fio_backend(sk_out);
782 free_threads_shm();
783 sk_out_drop();
784 _exit(ret);
785}
786
787static int handle_job_cmd(struct fio_net_cmd *cmd)
788{
789 struct cmd_job_pdu *pdu = (struct cmd_job_pdu *) cmd->payload;
790 void *buf = pdu->buf;
791 struct cmd_start_pdu spdu;
792
793 pdu->buf_len = le32_to_cpu(pdu->buf_len);
794 pdu->client_type = le32_to_cpu(pdu->client_type);
795
796 if (parse_jobs_ini(buf, 1, 0, pdu->client_type)) {
797 fio_net_queue_quit();
798 return -1;
799 }
800
801 spdu.jobs = cpu_to_le32(thread_number);
802 spdu.stat_outputs = cpu_to_le32(stat_number);
803
804 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
805 return 0;
806}
807
808static int handle_jobline_cmd(struct fio_net_cmd *cmd)
809{
810 void *pdu = cmd->payload;
811 struct cmd_single_line_pdu *cslp;
812 struct cmd_line_pdu *clp;
813 unsigned long offset;
814 struct cmd_start_pdu spdu;
815 char **argv;
816 int i;
817
818 clp = pdu;
819 clp->lines = le16_to_cpu(clp->lines);
820 clp->client_type = le16_to_cpu(clp->client_type);
821 argv = malloc(clp->lines * sizeof(char *));
822 offset = sizeof(*clp);
823
824 dprint(FD_NET, "server: %d command line args\n", clp->lines);
825
826 for (i = 0; i < clp->lines; i++) {
827 cslp = pdu + offset;
828 argv[i] = (char *) cslp->text;
829
830 offset += sizeof(*cslp) + le16_to_cpu(cslp->len);
831 dprint(FD_NET, "server: %d: %s\n", i, argv[i]);
832 }
833
834 if (parse_cmd_line(clp->lines, argv, clp->client_type)) {
835 fio_net_queue_quit();
836 free(argv);
837 return -1;
838 }
839
840 free(argv);
841
842 spdu.jobs = cpu_to_le32(thread_number);
843 spdu.stat_outputs = cpu_to_le32(stat_number);
844
845 fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY);
846 return 0;
847}
848
849static int handle_probe_cmd(struct fio_net_cmd *cmd)
850{
851 struct cmd_client_probe_pdu *pdu = (struct cmd_client_probe_pdu *) cmd->payload;
852 uint64_t tag = cmd->tag;
853 struct cmd_probe_reply_pdu probe = {
854#ifdef CONFIG_BIG_ENDIAN
855 .bigendian = 1,
856#endif
857 .os = FIO_OS,
858 .arch = FIO_ARCH,
859 .bpp = sizeof(void *),
860 .cpus = __cpu_to_le32(cpus_online()),
861 };
862
863 dprint(FD_NET, "server: sending probe reply\n");
864
865 strcpy(me, (char *) pdu->server);
866
867 gethostname((char *) probe.hostname, sizeof(probe.hostname));
868 strncpy((char *) probe.fio_version, fio_version_string, sizeof(probe.fio_version) - 1);
869
870 /*
871 * If the client supports compression and we do too, then enable it
872 */
873 if (has_zlib && le64_to_cpu(pdu->flags) & FIO_PROBE_FLAG_ZLIB) {
874 probe.flags = __cpu_to_le64(FIO_PROBE_FLAG_ZLIB);
875 use_zlib = 1;
876 } else {
877 probe.flags = 0;
878 use_zlib = 0;
879 }
880
881 return fio_net_queue_cmd(FIO_NET_CMD_PROBE, &probe, sizeof(probe), &tag, SK_F_COPY);
882}
883
884static int handle_send_eta_cmd(struct fio_net_cmd *cmd)
885{
886 struct jobs_eta *je;
887 uint64_t tag = cmd->tag;
888 size_t size;
889 int i;
890
891 dprint(FD_NET, "server sending status\n");
892
893 /*
894 * Fake ETA return if we don't have a local one, otherwise the client
895 * will end up timing out waiting for a response to the ETA request
896 */
897 je = get_jobs_eta(true, &size);
898 if (!je) {
899 size = sizeof(*je);
900 je = calloc(1, size);
901 } else {
902 je->nr_running = cpu_to_le32(je->nr_running);
903 je->nr_ramp = cpu_to_le32(je->nr_ramp);
904 je->nr_pending = cpu_to_le32(je->nr_pending);
905 je->nr_setting_up = cpu_to_le32(je->nr_setting_up);
906 je->files_open = cpu_to_le32(je->files_open);
907
908 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
909 je->m_rate[i] = cpu_to_le64(je->m_rate[i]);
910 je->t_rate[i] = cpu_to_le64(je->t_rate[i]);
911 je->m_iops[i] = cpu_to_le32(je->m_iops[i]);
912 je->t_iops[i] = cpu_to_le32(je->t_iops[i]);
913 je->rate[i] = cpu_to_le64(je->rate[i]);
914 je->iops[i] = cpu_to_le32(je->iops[i]);
915 }
916
917 je->elapsed_sec = cpu_to_le64(je->elapsed_sec);
918 je->eta_sec = cpu_to_le64(je->eta_sec);
919 je->nr_threads = cpu_to_le32(je->nr_threads);
920 je->is_pow2 = cpu_to_le32(je->is_pow2);
921 je->unit_base = cpu_to_le32(je->unit_base);
922 }
923
924 fio_net_queue_cmd(FIO_NET_CMD_ETA, je, size, &tag, SK_F_FREE);
925 return 0;
926}
927
928static int send_update_job_reply(uint64_t __tag, int error)
929{
930 uint64_t tag = __tag;
931 uint32_t pdu_error;
932
933 pdu_error = __cpu_to_le32(error);
934 return fio_net_queue_cmd(FIO_NET_CMD_UPDATE_JOB, &pdu_error, sizeof(pdu_error), &tag, SK_F_COPY);
935}
936
937static int handle_update_job_cmd(struct fio_net_cmd *cmd)
938{
939 struct cmd_add_job_pdu *pdu = (struct cmd_add_job_pdu *) cmd->payload;
940 struct thread_data *td;
941 uint32_t tnumber;
942
943 tnumber = le32_to_cpu(pdu->thread_number);
944
945 dprint(FD_NET, "server: updating options for job %u\n", tnumber);
946
947 if (!tnumber || tnumber > thread_number) {
948 send_update_job_reply(cmd->tag, ENODEV);
949 return 0;
950 }
951
952 td = &threads[tnumber - 1];
953 convert_thread_options_to_cpu(&td->o, &pdu->top);
954 send_update_job_reply(cmd->tag, 0);
955 return 0;
956}
957
958static int handle_trigger_cmd(struct fio_net_cmd *cmd, struct flist_head *job_list)
959{
960 struct cmd_vtrigger_pdu *pdu = (struct cmd_vtrigger_pdu *) cmd->payload;
961 char *buf = (char *) pdu->cmd;
962 struct all_io_list *rep;
963 size_t sz;
964
965 pdu->len = le16_to_cpu(pdu->len);
966 buf[pdu->len] = '\0';
967
968 rep = get_all_io_list(IO_LIST_ALL, &sz);
969 if (!rep) {
970 struct all_io_list state;
971
972 state.threads = cpu_to_le64((uint64_t) 0);
973 fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY | SK_F_INLINE);
974 } else
975 fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE | SK_F_INLINE);
976
977 fio_terminate_threads(TERMINATE_ALL);
978 fio_server_check_jobs(job_list);
979 exec_trigger(buf);
980 return 0;
981}
982
983static int handle_command(struct sk_out *sk_out, struct flist_head *job_list,
984 struct fio_net_cmd *cmd)
985{
986 int ret;
987
988 dprint(FD_NET, "server: got op [%s], pdu=%u, tag=%llx\n",
989 fio_server_op(cmd->opcode), cmd->pdu_len,
990 (unsigned long long) cmd->tag);
991
992 switch (cmd->opcode) {
993 case FIO_NET_CMD_QUIT:
994 fio_terminate_threads(TERMINATE_ALL);
995 ret = 0;
996 break;
997 case FIO_NET_CMD_EXIT:
998 exit_backend = true;
999 return -1;
1000 case FIO_NET_CMD_LOAD_FILE:
1001 ret = handle_load_file_cmd(cmd);
1002 break;
1003 case FIO_NET_CMD_JOB:
1004 ret = handle_job_cmd(cmd);
1005 break;
1006 case FIO_NET_CMD_JOBLINE:
1007 ret = handle_jobline_cmd(cmd);
1008 break;
1009 case FIO_NET_CMD_PROBE:
1010 ret = handle_probe_cmd(cmd);
1011 break;
1012 case FIO_NET_CMD_SEND_ETA:
1013 ret = handle_send_eta_cmd(cmd);
1014 break;
1015 case FIO_NET_CMD_RUN:
1016 ret = handle_run_cmd(sk_out, job_list, cmd);
1017 break;
1018 case FIO_NET_CMD_UPDATE_JOB:
1019 ret = handle_update_job_cmd(cmd);
1020 break;
1021 case FIO_NET_CMD_VTRIGGER:
1022 ret = handle_trigger_cmd(cmd, job_list);
1023 break;
1024 case FIO_NET_CMD_SENDFILE: {
1025 struct cmd_sendfile_reply *in;
1026 struct cmd_reply *rep;
1027
1028 rep = (struct cmd_reply *) (uintptr_t) cmd->tag;
1029
1030 in = (struct cmd_sendfile_reply *) cmd->payload;
1031 in->size = le32_to_cpu(in->size);
1032 in->error = le32_to_cpu(in->error);
1033 if (in->error) {
1034 ret = 1;
1035 rep->error = in->error;
1036 } else {
1037 ret = 0;
1038 rep->data = smalloc(in->size);
1039 if (!rep->data) {
1040 ret = 1;
1041 rep->error = ENOMEM;
1042 } else {
1043 rep->size = in->size;
1044 memcpy(rep->data, in->data, in->size);
1045 }
1046 }
1047 fio_sem_up(&rep->lock);
1048 break;
1049 }
1050 default:
1051 log_err("fio: unknown opcode: %s\n", fio_server_op(cmd->opcode));
1052 ret = 1;
1053 }
1054
1055 return ret;
1056}
1057
1058/*
1059 * Send a command with a separate PDU, not inlined in the command
1060 */
1061static int fio_send_cmd_ext_pdu(int sk, uint16_t opcode, const void *buf,
1062 off_t size, uint64_t tag, uint32_t flags)
1063{
1064 struct fio_net_cmd cmd;
1065 struct iovec iov[2];
1066 size_t this_len;
1067 int ret;
1068
1069 iov[0].iov_base = (void *) &cmd;
1070 iov[0].iov_len = sizeof(cmd);
1071
1072 do {
1073 uint32_t this_flags = flags;
1074
1075 this_len = size;
1076 if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
1077 this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
1078
1079 if (this_len < size)
1080 this_flags |= FIO_NET_CMD_F_MORE;
1081
1082 __fio_init_net_cmd(&cmd, opcode, this_len, tag);
1083 cmd.flags = __cpu_to_le32(this_flags);
1084 fio_net_cmd_crc_pdu(&cmd, buf);
1085
1086 iov[1].iov_base = (void *) buf;
1087 iov[1].iov_len = this_len;
1088
1089 ret = fio_sendv_data(sk, iov, 2);
1090 size -= this_len;
1091 buf += this_len;
1092 } while (!ret && size);
1093
1094 return ret;
1095}
1096
1097static void finish_entry(struct sk_entry *entry)
1098{
1099 if (entry->flags & SK_F_FREE)
1100 free(entry->buf);
1101 else if (entry->flags & SK_F_COPY)
1102 sfree(entry->buf);
1103
1104 sfree(entry);
1105}
1106
1107static void entry_set_flags(struct sk_entry *entry, struct flist_head *list,
1108 unsigned int *flags)
1109{
1110 if (!flist_empty(list))
1111 *flags = FIO_NET_CMD_F_MORE;
1112 else
1113 *flags = 0;
1114}
1115
1116static int send_vec_entry(struct sk_out *sk_out, struct sk_entry *first)
1117{
1118 unsigned int flags;
1119 int ret;
1120
1121 entry_set_flags(first, &first->next, &flags);
1122
1123 ret = fio_send_cmd_ext_pdu(sk_out->sk, first->opcode, first->buf,
1124 first->size, first->tag, flags);
1125
1126 while (!flist_empty(&first->next)) {
1127 struct sk_entry *next;
1128
1129 next = flist_first_entry(&first->next, struct sk_entry, list);
1130 flist_del_init(&next->list);
1131
1132 entry_set_flags(next, &first->next, &flags);
1133
1134 ret += fio_send_cmd_ext_pdu(sk_out->sk, next->opcode, next->buf,
1135 next->size, next->tag, flags);
1136 finish_entry(next);
1137 }
1138
1139 return ret;
1140}
1141
1142static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry)
1143{
1144 int ret;
1145
1146 fio_sem_down(&sk_out->xmit);
1147
1148 if (entry->flags & SK_F_VEC)
1149 ret = send_vec_entry(sk_out, entry);
1150 else if (entry->flags & SK_F_SIMPLE) {
1151 ret = fio_net_send_simple_cmd(sk_out->sk, entry->opcode,
1152 entry->tag, NULL);
1153 } else {
1154 ret = fio_net_send_cmd(sk_out->sk, entry->opcode, entry->buf,
1155 entry->size, &entry->tag, NULL);
1156 }
1157
1158 fio_sem_up(&sk_out->xmit);
1159
1160 if (ret)
1161 log_err("fio: failed handling cmd %s\n", fio_server_op(entry->opcode));
1162
1163 finish_entry(entry);
1164 return ret;
1165}
1166
1167static int handle_xmits(struct sk_out *sk_out)
1168{
1169 struct sk_entry *entry;
1170 FLIST_HEAD(list);
1171 int ret = 0;
1172
1173 sk_lock(sk_out);
1174 if (flist_empty(&sk_out->list)) {
1175 sk_unlock(sk_out);
1176 return 0;
1177 }
1178
1179 flist_splice_init(&sk_out->list, &list);
1180 sk_unlock(sk_out);
1181
1182 while (!flist_empty(&list)) {
1183 entry = flist_entry(list.next, struct sk_entry, list);
1184 flist_del(&entry->list);
1185 ret += handle_sk_entry(sk_out, entry);
1186 }
1187
1188 return ret;
1189}
1190
1191static int handle_connection(struct sk_out *sk_out)
1192{
1193 struct fio_net_cmd *cmd = NULL;
1194 FLIST_HEAD(job_list);
1195 int ret = 0;
1196
1197 reset_fio_state();
1198
1199 /* read forever */
1200 while (!exit_backend) {
1201 struct pollfd pfd = {
1202 .fd = sk_out->sk,
1203 .events = POLLIN,
1204 };
1205
1206 do {
1207 int timeout = 1000;
1208
1209 if (!flist_empty(&job_list))
1210 timeout = 100;
1211
1212 handle_xmits(sk_out);
1213
1214 ret = poll(&pfd, 1, 0);
1215 if (ret < 0) {
1216 if (errno == EINTR)
1217 break;
1218 log_err("fio: poll: %s\n", strerror(errno));
1219 break;
1220 } else if (!ret) {
1221 fio_server_check_jobs(&job_list);
1222 fio_sem_down_timeout(&sk_out->wait, timeout);
1223 continue;
1224 }
1225
1226 if (pfd.revents & POLLIN)
1227 break;
1228 if (pfd.revents & (POLLERR|POLLHUP)) {
1229 ret = 1;
1230 break;
1231 }
1232 } while (!exit_backend);
1233
1234 fio_server_check_jobs(&job_list);
1235
1236 if (ret < 0)
1237 break;
1238
1239 cmd = fio_net_recv_cmd(sk_out->sk, true);
1240 if (!cmd) {
1241 ret = -1;
1242 break;
1243 }
1244
1245 ret = handle_command(sk_out, &job_list, cmd);
1246 if (ret)
1247 break;
1248
1249 free(cmd);
1250 cmd = NULL;
1251 }
1252
1253 if (cmd)
1254 free(cmd);
1255
1256 handle_xmits(sk_out);
1257
1258 close(sk_out->sk);
1259 sk_out->sk = -1;
1260 __sk_out_drop(sk_out);
1261 _exit(ret);
1262}
1263
1264/* get the address on this host bound by the input socket,
1265 * whether it is ipv6 or ipv4 */
1266
1267static int get_my_addr_str(int sk)
1268{
1269 struct sockaddr_in6 myaddr6 = { 0, };
1270 struct sockaddr_in myaddr4 = { 0, };
1271 struct sockaddr *sockaddr_p;
1272 char *net_addr;
1273 socklen_t len;
1274 int ret;
1275
1276 if (use_ipv6) {
1277 len = sizeof(myaddr6);
1278 sockaddr_p = (struct sockaddr * )&myaddr6;
1279 net_addr = (char * )&myaddr6.sin6_addr;
1280 } else {
1281 len = sizeof(myaddr4);
1282 sockaddr_p = (struct sockaddr * )&myaddr4;
1283 net_addr = (char * )&myaddr4.sin_addr;
1284 }
1285
1286 ret = getsockname(sk, sockaddr_p, &len);
1287 if (ret) {
1288 log_err("fio: getsockname: %s\n", strerror(errno));
1289 return -1;
1290 }
1291
1292 if (!inet_ntop(use_ipv6?AF_INET6:AF_INET, net_addr, client_sockaddr_str, INET6_ADDRSTRLEN - 1)) {
1293 log_err("inet_ntop: failed to convert addr to string\n");
1294 return -1;
1295 }
1296
1297 dprint(FD_NET, "fio server bound to addr %s\n", client_sockaddr_str);
1298 return 0;
1299}
1300
1301static int accept_loop(int listen_sk)
1302{
1303 struct sockaddr_in addr;
1304 struct sockaddr_in6 addr6;
1305 socklen_t len = use_ipv6 ? sizeof(addr6) : sizeof(addr);
1306 struct pollfd pfd;
1307 int ret = 0, sk, exitval = 0;
1308 FLIST_HEAD(conn_list);
1309
1310 dprint(FD_NET, "server enter accept loop\n");
1311
1312 fio_set_fd_nonblocking(listen_sk, "server");
1313
1314 while (!exit_backend) {
1315 struct sk_out *sk_out;
1316 const char *from;
1317 char buf[64];
1318 pid_t pid;
1319
1320 pfd.fd = listen_sk;
1321 pfd.events = POLLIN;
1322 do {
1323 int timeout = 1000;
1324
1325 if (!flist_empty(&conn_list))
1326 timeout = 100;
1327
1328 ret = poll(&pfd, 1, timeout);
1329 if (ret < 0) {
1330 if (errno == EINTR)
1331 break;
1332 log_err("fio: poll: %s\n", strerror(errno));
1333 break;
1334 } else if (!ret) {
1335 fio_server_check_conns(&conn_list);
1336 continue;
1337 }
1338
1339 if (pfd.revents & POLLIN)
1340 break;
1341 } while (!exit_backend);
1342
1343 fio_server_check_conns(&conn_list);
1344
1345 if (exit_backend || ret < 0)
1346 break;
1347
1348 if (use_ipv6)
1349 sk = accept(listen_sk, (struct sockaddr *) &addr6, &len);
1350 else
1351 sk = accept(listen_sk, (struct sockaddr *) &addr, &len);
1352
1353 if (sk < 0) {
1354 log_err("fio: accept: %s\n", strerror(errno));
1355 return -1;
1356 }
1357
1358 if (use_ipv6)
1359 from = inet_ntop(AF_INET6, (struct sockaddr *) &addr6.sin6_addr, buf, sizeof(buf));
1360 else
1361 from = inet_ntop(AF_INET, (struct sockaddr *) &addr.sin_addr, buf, sizeof(buf));
1362
1363 dprint(FD_NET, "server: connect from %s\n", from);
1364
1365 sk_out = scalloc(1, sizeof(*sk_out));
1366 if (!sk_out) {
1367 close(sk);
1368 return -1;
1369 }
1370
1371 sk_out->sk = sk;
1372 INIT_FLIST_HEAD(&sk_out->list);
1373 __fio_sem_init(&sk_out->lock, FIO_SEM_UNLOCKED);
1374 __fio_sem_init(&sk_out->wait, FIO_SEM_LOCKED);
1375 __fio_sem_init(&sk_out->xmit, FIO_SEM_UNLOCKED);
1376
1377 pid = fork();
1378 if (pid) {
1379 close(sk);
1380 fio_server_add_conn_pid(&conn_list, pid);
1381 continue;
1382 }
1383
1384 /* if error, it's already logged, non-fatal */
1385 get_my_addr_str(sk);
1386
1387 /*
1388 * Assign sk_out here, it'll be dropped in handle_connection()
1389 * since that function calls _exit() when done
1390 */
1391 sk_out_assign(sk_out);
1392 handle_connection(sk_out);
1393 }
1394
1395 return exitval;
1396}
1397
1398int fio_server_text_output(int level, const char *buf, size_t len)
1399{
1400 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
1401 struct cmd_text_pdu *pdu;
1402 unsigned int tlen;
1403 struct timeval tv;
1404
1405 if (!sk_out || sk_out->sk == -1)
1406 return -1;
1407
1408 tlen = sizeof(*pdu) + len;
1409 pdu = malloc(tlen);
1410
1411 pdu->level = __cpu_to_le32(level);
1412 pdu->buf_len = __cpu_to_le32(len);
1413
1414 gettimeofday(&tv, NULL);
1415 pdu->log_sec = __cpu_to_le64(tv.tv_sec);
1416 pdu->log_usec = __cpu_to_le64(tv.tv_usec);
1417
1418 memcpy(pdu->buf, buf, len);
1419
1420 fio_net_queue_cmd(FIO_NET_CMD_TEXT, pdu, tlen, NULL, SK_F_COPY);
1421 free(pdu);
1422 return len;
1423}
1424
1425static void convert_io_stat(struct io_stat *dst, struct io_stat *src)
1426{
1427 dst->max_val = cpu_to_le64(src->max_val);
1428 dst->min_val = cpu_to_le64(src->min_val);
1429 dst->samples = cpu_to_le64(src->samples);
1430
1431 /*
1432 * Encode to IEEE 754 for network transfer
1433 */
1434 dst->mean.u.i = cpu_to_le64(fio_double_to_uint64(src->mean.u.f));
1435 dst->S.u.i = cpu_to_le64(fio_double_to_uint64(src->S.u.f));
1436}
1437
1438static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src)
1439{
1440 int i;
1441
1442 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1443 dst->max_run[i] = cpu_to_le64(src->max_run[i]);
1444 dst->min_run[i] = cpu_to_le64(src->min_run[i]);
1445 dst->max_bw[i] = cpu_to_le64(src->max_bw[i]);
1446 dst->min_bw[i] = cpu_to_le64(src->min_bw[i]);
1447 dst->iobytes[i] = cpu_to_le64(src->iobytes[i]);
1448 dst->agg[i] = cpu_to_le64(src->agg[i]);
1449 }
1450
1451 dst->kb_base = cpu_to_le32(src->kb_base);
1452 dst->unit_base = cpu_to_le32(src->unit_base);
1453 dst->groupid = cpu_to_le32(src->groupid);
1454 dst->unified_rw_rep = cpu_to_le32(src->unified_rw_rep);
1455 dst->sig_figs = cpu_to_le32(src->sig_figs);
1456}
1457
1458/*
1459 * Send a CMD_TS, which packs struct thread_stat and group_run_stats
1460 * into a single payload.
1461 */
1462void fio_server_send_ts(struct thread_stat *ts, struct group_run_stats *rs)
1463{
1464 struct cmd_ts_pdu p;
1465 int i, j;
1466 void *ss_buf;
1467 uint64_t *ss_iops, *ss_bw;
1468
1469 dprint(FD_NET, "server sending end stats\n");
1470
1471 memset(&p, 0, sizeof(p));
1472
1473 strncpy(p.ts.name, ts->name, FIO_JOBNAME_SIZE - 1);
1474 strncpy(p.ts.verror, ts->verror, FIO_VERROR_SIZE - 1);
1475 strncpy(p.ts.description, ts->description, FIO_JOBDESC_SIZE - 1);
1476
1477 p.ts.error = cpu_to_le32(ts->error);
1478 p.ts.thread_number = cpu_to_le32(ts->thread_number);
1479 p.ts.groupid = cpu_to_le32(ts->groupid);
1480 p.ts.pid = cpu_to_le32(ts->pid);
1481 p.ts.members = cpu_to_le32(ts->members);
1482 p.ts.unified_rw_rep = cpu_to_le32(ts->unified_rw_rep);
1483
1484 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1485 convert_io_stat(&p.ts.clat_stat[i], &ts->clat_stat[i]);
1486 convert_io_stat(&p.ts.slat_stat[i], &ts->slat_stat[i]);
1487 convert_io_stat(&p.ts.lat_stat[i], &ts->lat_stat[i]);
1488 convert_io_stat(&p.ts.bw_stat[i], &ts->bw_stat[i]);
1489 convert_io_stat(&p.ts.iops_stat[i], &ts->iops_stat[i]);
1490 }
1491
1492 p.ts.usr_time = cpu_to_le64(ts->usr_time);
1493 p.ts.sys_time = cpu_to_le64(ts->sys_time);
1494 p.ts.ctx = cpu_to_le64(ts->ctx);
1495 p.ts.minf = cpu_to_le64(ts->minf);
1496 p.ts.majf = cpu_to_le64(ts->majf);
1497 p.ts.clat_percentiles = cpu_to_le32(ts->clat_percentiles);
1498 p.ts.lat_percentiles = cpu_to_le32(ts->lat_percentiles);
1499 p.ts.percentile_precision = cpu_to_le64(ts->percentile_precision);
1500
1501 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
1502 fio_fp64_t *src = &ts->percentile_list[i];
1503 fio_fp64_t *dst = &p.ts.percentile_list[i];
1504
1505 dst->u.i = cpu_to_le64(fio_double_to_uint64(src->u.f));
1506 }
1507
1508 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
1509 p.ts.io_u_map[i] = cpu_to_le64(ts->io_u_map[i]);
1510 p.ts.io_u_submit[i] = cpu_to_le64(ts->io_u_submit[i]);
1511 p.ts.io_u_complete[i] = cpu_to_le64(ts->io_u_complete[i]);
1512 }
1513
1514 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
1515 p.ts.io_u_lat_n[i] = cpu_to_le64(ts->io_u_lat_n[i]);
1516 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1517 p.ts.io_u_lat_u[i] = cpu_to_le64(ts->io_u_lat_u[i]);
1518 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1519 p.ts.io_u_lat_m[i] = cpu_to_le64(ts->io_u_lat_m[i]);
1520
1521 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1522 for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
1523 p.ts.io_u_plat[i][j] = cpu_to_le64(ts->io_u_plat[i][j]);
1524
1525 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1526 p.ts.total_io_u[i] = cpu_to_le64(ts->total_io_u[i]);
1527 p.ts.short_io_u[i] = cpu_to_le64(ts->short_io_u[i]);
1528 p.ts.drop_io_u[i] = cpu_to_le64(ts->drop_io_u[i]);
1529 }
1530
1531 p.ts.total_submit = cpu_to_le64(ts->total_submit);
1532 p.ts.total_complete = cpu_to_le64(ts->total_complete);
1533
1534 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1535 p.ts.io_bytes[i] = cpu_to_le64(ts->io_bytes[i]);
1536 p.ts.runtime[i] = cpu_to_le64(ts->runtime[i]);
1537 }
1538
1539 p.ts.total_run_time = cpu_to_le64(ts->total_run_time);
1540 p.ts.continue_on_error = cpu_to_le16(ts->continue_on_error);
1541 p.ts.total_err_count = cpu_to_le64(ts->total_err_count);
1542 p.ts.first_error = cpu_to_le32(ts->first_error);
1543 p.ts.kb_base = cpu_to_le32(ts->kb_base);
1544 p.ts.unit_base = cpu_to_le32(ts->unit_base);
1545
1546 p.ts.latency_depth = cpu_to_le32(ts->latency_depth);
1547 p.ts.latency_target = cpu_to_le64(ts->latency_target);
1548 p.ts.latency_window = cpu_to_le64(ts->latency_window);
1549 p.ts.latency_percentile.u.i = cpu_to_le64(fio_double_to_uint64(ts->latency_percentile.u.f));
1550
1551 p.ts.sig_figs = cpu_to_le32(ts->sig_figs);
1552
1553 p.ts.nr_block_infos = cpu_to_le64(ts->nr_block_infos);
1554 for (i = 0; i < p.ts.nr_block_infos; i++)
1555 p.ts.block_infos[i] = cpu_to_le32(ts->block_infos[i]);
1556
1557 p.ts.ss_dur = cpu_to_le64(ts->ss_dur);
1558 p.ts.ss_state = cpu_to_le32(ts->ss_state);
1559 p.ts.ss_head = cpu_to_le32(ts->ss_head);
1560 p.ts.ss_limit.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_limit.u.f));
1561 p.ts.ss_slope.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_slope.u.f));
1562 p.ts.ss_deviation.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_deviation.u.f));
1563 p.ts.ss_criterion.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_criterion.u.f));
1564
1565 convert_gs(&p.rs, rs);
1566
1567 dprint(FD_NET, "ts->ss_state = %d\n", ts->ss_state);
1568 if (ts->ss_state & FIO_SS_DATA) {
1569 dprint(FD_NET, "server sending steadystate ring buffers\n");
1570
1571 ss_buf = malloc(sizeof(p) + 2*ts->ss_dur*sizeof(uint64_t));
1572
1573 memcpy(ss_buf, &p, sizeof(p));
1574
1575 ss_iops = (uint64_t *) ((struct cmd_ts_pdu *)ss_buf + 1);
1576 ss_bw = ss_iops + (int) ts->ss_dur;
1577 for (i = 0; i < ts->ss_dur; i++) {
1578 ss_iops[i] = cpu_to_le64(ts->ss_iops_data[i]);
1579 ss_bw[i] = cpu_to_le64(ts->ss_bw_data[i]);
1580 }
1581
1582 fio_net_queue_cmd(FIO_NET_CMD_TS, ss_buf, sizeof(p) + 2*ts->ss_dur*sizeof(uint64_t), NULL, SK_F_COPY);
1583
1584 free(ss_buf);
1585 }
1586 else
1587 fio_net_queue_cmd(FIO_NET_CMD_TS, &p, sizeof(p), NULL, SK_F_COPY);
1588}
1589
1590void fio_server_send_gs(struct group_run_stats *rs)
1591{
1592 struct group_run_stats gs;
1593
1594 dprint(FD_NET, "server sending group run stats\n");
1595
1596 convert_gs(&gs, rs);
1597 fio_net_queue_cmd(FIO_NET_CMD_GS, &gs, sizeof(gs), NULL, SK_F_COPY);
1598}
1599
1600void fio_server_send_job_options(struct flist_head *opt_list,
1601 unsigned int gid)
1602{
1603 struct cmd_job_option pdu;
1604 struct flist_head *entry;
1605
1606 if (flist_empty(opt_list))
1607 return;
1608
1609 flist_for_each(entry, opt_list) {
1610 struct print_option *p;
1611 size_t len;
1612
1613 p = flist_entry(entry, struct print_option, list);
1614 memset(&pdu, 0, sizeof(pdu));
1615
1616 if (gid == -1U) {
1617 pdu.global = __cpu_to_le16(1);
1618 pdu.groupid = 0;
1619 } else {
1620 pdu.global = 0;
1621 pdu.groupid = cpu_to_le32(gid);
1622 }
1623 len = strlen(p->name);
1624 if (len >= sizeof(pdu.name)) {
1625 len = sizeof(pdu.name) - 1;
1626 pdu.truncated = __cpu_to_le16(1);
1627 }
1628 memcpy(pdu.name, p->name, len);
1629 if (p->value) {
1630 len = strlen(p->value);
1631 if (len >= sizeof(pdu.value)) {
1632 len = sizeof(pdu.value) - 1;
1633 pdu.truncated = __cpu_to_le16(1);
1634 }
1635 memcpy(pdu.value, p->value, len);
1636 }
1637 fio_net_queue_cmd(FIO_NET_CMD_JOB_OPT, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1638 }
1639}
1640
1641static void convert_agg(struct disk_util_agg *dst, struct disk_util_agg *src)
1642{
1643 int i;
1644
1645 for (i = 0; i < 2; i++) {
1646 dst->ios[i] = cpu_to_le64(src->ios[i]);
1647 dst->merges[i] = cpu_to_le64(src->merges[i]);
1648 dst->sectors[i] = cpu_to_le64(src->sectors[i]);
1649 dst->ticks[i] = cpu_to_le64(src->ticks[i]);
1650 }
1651
1652 dst->io_ticks = cpu_to_le64(src->io_ticks);
1653 dst->time_in_queue = cpu_to_le64(src->time_in_queue);
1654 dst->slavecount = cpu_to_le32(src->slavecount);
1655 dst->max_util.u.i = cpu_to_le64(fio_double_to_uint64(src->max_util.u.f));
1656}
1657
1658static void convert_dus(struct disk_util_stat *dst, struct disk_util_stat *src)
1659{
1660 int i;
1661
1662 dst->name[FIO_DU_NAME_SZ - 1] = '\0';
1663 strncpy((char *) dst->name, (char *) src->name, FIO_DU_NAME_SZ - 1);
1664
1665 for (i = 0; i < 2; i++) {
1666 dst->s.ios[i] = cpu_to_le64(src->s.ios[i]);
1667 dst->s.merges[i] = cpu_to_le64(src->s.merges[i]);
1668 dst->s.sectors[i] = cpu_to_le64(src->s.sectors[i]);
1669 dst->s.ticks[i] = cpu_to_le64(src->s.ticks[i]);
1670 }
1671
1672 dst->s.io_ticks = cpu_to_le64(src->s.io_ticks);
1673 dst->s.time_in_queue = cpu_to_le64(src->s.time_in_queue);
1674 dst->s.msec = cpu_to_le64(src->s.msec);
1675}
1676
1677void fio_server_send_du(void)
1678{
1679 struct disk_util *du;
1680 struct flist_head *entry;
1681 struct cmd_du_pdu pdu;
1682
1683 dprint(FD_NET, "server: sending disk_util %d\n", !flist_empty(&disk_list));
1684
1685 memset(&pdu, 0, sizeof(pdu));
1686
1687 flist_for_each(entry, &disk_list) {
1688 du = flist_entry(entry, struct disk_util, list);
1689
1690 convert_dus(&pdu.dus, &du->dus);
1691 convert_agg(&pdu.agg, &du->agg);
1692
1693 fio_net_queue_cmd(FIO_NET_CMD_DU, &pdu, sizeof(pdu), NULL, SK_F_COPY);
1694 }
1695}
1696
1697#ifdef CONFIG_ZLIB
1698
1699static inline void __fio_net_prep_tail(z_stream *stream, void *out_pdu,
1700 struct sk_entry **last_entry,
1701 struct sk_entry *first)
1702{
1703 unsigned int this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
1704
1705 *last_entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
1706 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
1707 if (*last_entry)
1708 flist_add_tail(&(*last_entry)->list, &first->next);
1709}
1710
1711/*
1712 * Deflates the next input given, creating as many new packets in the
1713 * linked list as necessary.
1714 */
1715static int __deflate_pdu_buffer(void *next_in, unsigned int next_sz, void **out_pdu,
1716 struct sk_entry **last_entry, z_stream *stream,
1717 struct sk_entry *first)
1718{
1719 int ret;
1720
1721 stream->next_in = next_in;
1722 stream->avail_in = next_sz;
1723 do {
1724 if (!stream->avail_out) {
1725 __fio_net_prep_tail(stream, *out_pdu, last_entry, first);
1726 if (*last_entry == NULL)
1727 return 1;
1728
1729 *out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1730
1731 stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1732 stream->next_out = *out_pdu;
1733 }
1734
1735 ret = deflate(stream, Z_BLOCK);
1736
1737 if (ret < 0) {
1738 free(*out_pdu);
1739 return 1;
1740 }
1741 } while (stream->avail_in);
1742
1743 return 0;
1744}
1745
1746static int __fio_append_iolog_gz_hist(struct sk_entry *first, struct io_log *log,
1747 struct io_logs *cur_log, z_stream *stream)
1748{
1749 struct sk_entry *entry;
1750 void *out_pdu;
1751 int ret, i, j;
1752 int sample_sz = log_entry_sz(log);
1753
1754 out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1755 stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1756 stream->next_out = out_pdu;
1757
1758 for (i = 0; i < cur_log->nr_samples; i++) {
1759 struct io_sample *s;
1760 struct io_u_plat_entry *cur_plat_entry, *prev_plat_entry;
1761 uint64_t *cur_plat, *prev_plat;
1762
1763 s = get_sample(log, cur_log, i);
1764 ret = __deflate_pdu_buffer(s, sample_sz, &out_pdu, &entry, stream, first);
1765 if (ret)
1766 return ret;
1767
1768 /* Do the subtraction on server side so that client doesn't have to
1769 * reconstruct our linked list from packets.
1770 */
1771 cur_plat_entry = s->data.plat_entry;
1772 prev_plat_entry = flist_first_entry(&cur_plat_entry->list, struct io_u_plat_entry, list);
1773 cur_plat = cur_plat_entry->io_u_plat;
1774 prev_plat = prev_plat_entry->io_u_plat;
1775
1776 for (j = 0; j < FIO_IO_U_PLAT_NR; j++) {
1777 cur_plat[j] -= prev_plat[j];
1778 }
1779
1780 flist_del(&prev_plat_entry->list);
1781 free(prev_plat_entry);
1782
1783 ret = __deflate_pdu_buffer(cur_plat_entry, sizeof(*cur_plat_entry),
1784 &out_pdu, &entry, stream, first);
1785
1786 if (ret)
1787 return ret;
1788 }
1789
1790 __fio_net_prep_tail(stream, out_pdu, &entry, first);
1791 return entry == NULL;
1792}
1793
1794static int __fio_append_iolog_gz(struct sk_entry *first, struct io_log *log,
1795 struct io_logs *cur_log, z_stream *stream)
1796{
1797 unsigned int this_len;
1798 void *out_pdu;
1799 int ret;
1800
1801 if (log->log_type == IO_LOG_TYPE_HIST)
1802 return __fio_append_iolog_gz_hist(first, log, cur_log, stream);
1803
1804 stream->next_in = (void *) cur_log->log;
1805 stream->avail_in = cur_log->nr_samples * log_entry_sz(log);
1806
1807 do {
1808 struct sk_entry *entry;
1809
1810 /*
1811 * Dirty - since the log is potentially huge, compress it into
1812 * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
1813 * side defragment it.
1814 */
1815 out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1816
1817 stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1818 stream->next_out = out_pdu;
1819 ret = deflate(stream, Z_BLOCK);
1820 /* may be Z_OK, or Z_STREAM_END */
1821 if (ret < 0) {
1822 free(out_pdu);
1823 return 1;
1824 }
1825
1826 this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
1827
1828 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
1829 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
1830 if (!entry) {
1831 free(out_pdu);
1832 return 1;
1833 }
1834 flist_add_tail(&entry->list, &first->next);
1835 } while (stream->avail_in);
1836
1837 return 0;
1838}
1839
1840static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
1841{
1842 z_stream stream = {
1843 .zalloc = Z_NULL,
1844 .zfree = Z_NULL,
1845 .opaque = Z_NULL,
1846 };
1847 int ret = 0;
1848
1849 if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK)
1850 return 1;
1851
1852 while (!flist_empty(&log->io_logs)) {
1853 struct io_logs *cur_log;
1854
1855 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
1856 flist_del_init(&cur_log->list);
1857
1858 ret = __fio_append_iolog_gz(first, log, cur_log, &stream);
1859 if (ret)
1860 break;
1861 }
1862
1863 ret = deflate(&stream, Z_FINISH);
1864
1865 while (ret != Z_STREAM_END) {
1866 struct sk_entry *entry;
1867 unsigned int this_len;
1868 void *out_pdu;
1869
1870 out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
1871 stream.avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
1872 stream.next_out = out_pdu;
1873
1874 ret = deflate(&stream, Z_FINISH);
1875 /* may be Z_OK, or Z_STREAM_END */
1876 if (ret < 0) {
1877 free(out_pdu);
1878 break;
1879 }
1880
1881 this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream.avail_out;
1882
1883 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
1884 NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
1885 if (!entry) {
1886 free(out_pdu);
1887 break;
1888 }
1889 flist_add_tail(&entry->list, &first->next);
1890 } while (ret != Z_STREAM_END);
1891
1892 ret = deflateEnd(&stream);
1893 if (ret == Z_OK)
1894 return 0;
1895
1896 return 1;
1897}
1898#else
1899static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
1900{
1901 return 1;
1902}
1903#endif
1904
1905static int fio_append_gz_chunks(struct sk_entry *first, struct io_log *log)
1906{
1907 struct sk_entry *entry;
1908 struct flist_head *node;
1909 int ret = 0;
1910
1911 pthread_mutex_lock(&log->chunk_lock);
1912 flist_for_each(node, &log->chunk_list) {
1913 struct iolog_compress *c;
1914
1915 c = flist_entry(node, struct iolog_compress, list);
1916 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, c->buf, c->len,
1917 NULL, SK_F_VEC | SK_F_INLINE);
1918 if (!entry) {
1919 ret = 1;
1920 break;
1921 }
1922 flist_add_tail(&entry->list, &first->next);
1923 }
1924 pthread_mutex_unlock(&log->chunk_lock);
1925 return ret;
1926}
1927
1928static int fio_append_text_log(struct sk_entry *first, struct io_log *log)
1929{
1930 struct sk_entry *entry;
1931 int ret = 0;
1932
1933 while (!flist_empty(&log->io_logs)) {
1934 struct io_logs *cur_log;
1935 size_t size;
1936
1937 cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
1938 flist_del_init(&cur_log->list);
1939
1940 size = cur_log->nr_samples * log_entry_sz(log);
1941
1942 entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, cur_log->log, size,
1943 NULL, SK_F_VEC | SK_F_INLINE);
1944 if (!entry) {
1945 ret = 1;
1946 break;
1947 }
1948 flist_add_tail(&entry->list, &first->next);
1949 }
1950
1951 return ret;
1952}
1953
1954int fio_send_iolog(struct thread_data *td, struct io_log *log, const char *name)
1955{
1956 struct cmd_iolog_pdu pdu = {
1957 .nr_samples = cpu_to_le64(iolog_nr_samples(log)),
1958 .thread_number = cpu_to_le32(td->thread_number),
1959 .log_type = cpu_to_le32(log->log_type),
1960 .log_hist_coarseness = cpu_to_le32(log->hist_coarseness),
1961 };
1962 struct sk_entry *first;
1963 struct flist_head *entry;
1964 int ret = 0;
1965
1966 if (!flist_empty(&log->chunk_list))
1967 pdu.compressed = __cpu_to_le32(STORE_COMPRESSED);
1968 else if (use_zlib)
1969 pdu.compressed = __cpu_to_le32(XMIT_COMPRESSED);
1970 else
1971 pdu.compressed = 0;
1972
1973 strncpy((char *) pdu.name, name, FIO_NET_NAME_MAX);
1974 pdu.name[FIO_NET_NAME_MAX - 1] = '\0';
1975
1976 /*
1977 * We can't do this for a pre-compressed log, but for that case,
1978 * log->nr_samples is zero anyway.
1979 */
1980 flist_for_each(entry, &log->io_logs) {
1981 struct io_logs *cur_log;
1982 int i;
1983
1984 cur_log = flist_entry(entry, struct io_logs, list);
1985
1986 for (i = 0; i < cur_log->nr_samples; i++) {
1987 struct io_sample *s = get_sample(log, cur_log, i);
1988
1989 s->time = cpu_to_le64(s->time);
1990 s->data.val = cpu_to_le64(s->data.val);
1991 s->__ddir = cpu_to_le32(s->__ddir);
1992 s->bs = cpu_to_le64(s->bs);
1993
1994 if (log->log_offset) {
1995 struct io_sample_offset *so = (void *) s;
1996
1997 so->offset = cpu_to_le64(so->offset);
1998 }
1999 }
2000 }
2001
2002 /*
2003 * Assemble header entry first
2004 */
2005 first = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, &pdu, sizeof(pdu), NULL, SK_F_VEC | SK_F_INLINE | SK_F_COPY);
2006 if (!first)
2007 return 1;
2008
2009 /*
2010 * Now append actual log entries. If log compression was enabled on
2011 * the job, just send out the compressed chunks directly. If we
2012 * have a plain log, compress if we can, then send. Otherwise, send
2013 * the plain text output.
2014 */
2015 if (!flist_empty(&log->chunk_list))
2016 ret = fio_append_gz_chunks(first, log);
2017 else if (use_zlib)
2018 ret = fio_append_iolog_gz(first, log);
2019 else
2020 ret = fio_append_text_log(first, log);
2021
2022 fio_net_queue_entry(first);
2023 return ret;
2024}
2025
2026void fio_server_send_add_job(struct thread_data *td)
2027{
2028 struct cmd_add_job_pdu pdu = {
2029 .thread_number = cpu_to_le32(td->thread_number),
2030 .groupid = cpu_to_le32(td->groupid),
2031 };
2032
2033 convert_thread_options_to_net(&pdu.top, &td->o);
2034
2035 fio_net_queue_cmd(FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), NULL,
2036 SK_F_COPY);
2037}
2038
2039void fio_server_send_start(struct thread_data *td)
2040{
2041 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
2042
2043 assert(sk_out->sk != -1);
2044
2045 fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, NULL, SK_F_SIMPLE);
2046}
2047
2048int fio_server_get_verify_state(const char *name, int threadnumber,
2049 void **datap)
2050{
2051 struct thread_io_list *s;
2052 struct cmd_sendfile out;
2053 struct cmd_reply *rep;
2054 uint64_t tag;
2055 void *data;
2056 int ret;
2057
2058 dprint(FD_NET, "server: request verify state\n");
2059
2060 rep = smalloc(sizeof(*rep));
2061 if (!rep)
2062 return ENOMEM;
2063
2064 __fio_sem_init(&rep->lock, FIO_SEM_LOCKED);
2065 rep->data = NULL;
2066 rep->error = 0;
2067
2068 verify_state_gen_name((char *) out.path, sizeof(out.path), name, me,
2069 threadnumber);
2070 tag = (uint64_t) (uintptr_t) rep;
2071 fio_net_queue_cmd(FIO_NET_CMD_SENDFILE, &out, sizeof(out), &tag,
2072 SK_F_COPY);
2073
2074 /*
2075 * Wait for the backend to receive the reply
2076 */
2077 if (fio_sem_down_timeout(&rep->lock, 10000)) {
2078 log_err("fio: timed out waiting for reply\n");
2079 ret = ETIMEDOUT;
2080 goto fail;
2081 }
2082
2083 if (rep->error) {
2084 log_err("fio: failure on receiving state file %s: %s\n",
2085 out.path, strerror(rep->error));
2086 ret = rep->error;
2087fail:
2088 *datap = NULL;
2089 sfree(rep);
2090 fio_net_queue_quit();
2091 return ret;
2092 }
2093
2094 /*
2095 * The format is verify_state_hdr, then thread_io_list. Verify
2096 * the header, and the thread_io_list checksum
2097 */
2098 s = rep->data + sizeof(struct verify_state_hdr);
2099 if (verify_state_hdr(rep->data, s)) {
2100 ret = EILSEQ;
2101 goto fail;
2102 }
2103
2104 /*
2105 * Don't need the header from now, copy just the thread_io_list
2106 */
2107 ret = 0;
2108 rep->size -= sizeof(struct verify_state_hdr);
2109 data = malloc(rep->size);
2110 memcpy(data, s, rep->size);
2111 *datap = data;
2112
2113 sfree(rep->data);
2114 __fio_sem_remove(&rep->lock);
2115 sfree(rep);
2116 return ret;
2117}
2118
2119static int fio_init_server_ip(void)
2120{
2121 struct sockaddr *addr;
2122 socklen_t socklen;
2123 char buf[80];
2124 const char *str;
2125 int sk, opt;
2126
2127 if (use_ipv6)
2128 sk = socket(AF_INET6, SOCK_STREAM, 0);
2129 else
2130 sk = socket(AF_INET, SOCK_STREAM, 0);
2131
2132 if (sk < 0) {
2133 log_err("fio: socket: %s\n", strerror(errno));
2134 return -1;
2135 }
2136
2137 opt = 1;
2138 if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, (void *)&opt, sizeof(opt)) < 0) {
2139 log_err("fio: setsockopt(REUSEADDR): %s\n", strerror(errno));
2140 close(sk);
2141 return -1;
2142 }
2143#ifdef SO_REUSEPORT
2144 /*
2145 * Not fatal if fails, so just ignore it if that happens
2146 */
2147 setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
2148#endif
2149
2150 if (use_ipv6) {
2151 void *src = &saddr_in6.sin6_addr;
2152
2153 addr = (struct sockaddr *) &saddr_in6;
2154 socklen = sizeof(saddr_in6);
2155 saddr_in6.sin6_family = AF_INET6;
2156 str = inet_ntop(AF_INET6, src, buf, sizeof(buf));
2157 } else {
2158 void *src = &saddr_in.sin_addr;
2159
2160 addr = (struct sockaddr *) &saddr_in;
2161 socklen = sizeof(saddr_in);
2162 saddr_in.sin_family = AF_INET;
2163 str = inet_ntop(AF_INET, src, buf, sizeof(buf));
2164 }
2165
2166 if (bind(sk, addr, socklen) < 0) {
2167 log_err("fio: bind: %s\n", strerror(errno));
2168 log_info("fio: failed with IPv%c %s\n", use_ipv6 ? '6' : '4', str);
2169 close(sk);
2170 return -1;
2171 }
2172
2173 return sk;
2174}
2175
2176static int fio_init_server_sock(void)
2177{
2178 struct sockaddr_un addr;
2179 socklen_t len;
2180 mode_t mode;
2181 int sk;
2182
2183 sk = socket(AF_UNIX, SOCK_STREAM, 0);
2184 if (sk < 0) {
2185 log_err("fio: socket: %s\n", strerror(errno));
2186 return -1;
2187 }
2188
2189 mode = umask(000);
2190
2191 memset(&addr, 0, sizeof(addr));
2192 addr.sun_family = AF_UNIX;
2193 strncpy(addr.sun_path, bind_sock, sizeof(addr.sun_path) - 1);
2194
2195 len = sizeof(addr.sun_family) + strlen(bind_sock) + 1;
2196
2197 if (bind(sk, (struct sockaddr *) &addr, len) < 0) {
2198 log_err("fio: bind: %s\n", strerror(errno));
2199 close(sk);
2200 return -1;
2201 }
2202
2203 umask(mode);
2204 return sk;
2205}
2206
2207static int fio_init_server_connection(void)
2208{
2209 char bind_str[128];
2210 int sk;
2211
2212 dprint(FD_NET, "starting server\n");
2213
2214 if (!bind_sock)
2215 sk = fio_init_server_ip();
2216 else
2217 sk = fio_init_server_sock();
2218
2219 if (sk < 0)
2220 return sk;
2221
2222 memset(bind_str, 0, sizeof(bind_str));
2223
2224 if (!bind_sock) {
2225 char *p, port[16];
2226 void *src;
2227 int af;
2228
2229 if (use_ipv6) {
2230 af = AF_INET6;
2231 src = &saddr_in6.sin6_addr;
2232 } else {
2233 af = AF_INET;
2234 src = &saddr_in.sin_addr;
2235 }
2236
2237 p = (char *) inet_ntop(af, src, bind_str, sizeof(bind_str));
2238
2239 sprintf(port, ",%u", fio_net_port);
2240 if (p)
2241 strcat(p, port);
2242 else
2243 strncpy(bind_str, port, sizeof(bind_str) - 1);
2244 } else
2245 strncpy(bind_str, bind_sock, sizeof(bind_str) - 1);
2246
2247 log_info("fio: server listening on %s\n", bind_str);
2248
2249 if (listen(sk, 4) < 0) {
2250 log_err("fio: listen: %s\n", strerror(errno));
2251 close(sk);
2252 return -1;
2253 }
2254
2255 return sk;
2256}
2257
2258int fio_server_parse_host(const char *host, int ipv6, struct in_addr *inp,
2259 struct in6_addr *inp6)
2260
2261{
2262 int ret = 0;
2263
2264 if (ipv6)
2265 ret = inet_pton(AF_INET6, host, inp6);
2266 else
2267 ret = inet_pton(AF_INET, host, inp);
2268
2269 if (ret != 1) {
2270 struct addrinfo *res, hints = {
2271 .ai_family = ipv6 ? AF_INET6 : AF_INET,
2272 .ai_socktype = SOCK_STREAM,
2273 };
2274
2275 ret = getaddrinfo(host, NULL, &hints, &res);
2276 if (ret) {
2277 log_err("fio: failed to resolve <%s> (%s)\n", host,
2278 gai_strerror(ret));
2279 return 1;
2280 }
2281
2282 if (ipv6)
2283 memcpy(inp6, &((struct sockaddr_in6 *) res->ai_addr)->sin6_addr, sizeof(*inp6));
2284 else
2285 memcpy(inp, &((struct sockaddr_in *) res->ai_addr)->sin_addr, sizeof(*inp));
2286
2287 ret = 1;
2288 freeaddrinfo(res);
2289 }
2290
2291 return !(ret == 1);
2292}
2293
2294/*
2295 * Parse a host/ip/port string. Reads from 'str'.
2296 *
2297 * Outputs:
2298 *
2299 * For IPv4:
2300 * *ptr is the host, *port is the port, inp is the destination.
2301 * For IPv6:
2302 * *ptr is the host, *port is the port, inp6 is the dest, and *ipv6 is 1.
2303 * For local domain sockets:
2304 * *ptr is the filename, *is_sock is 1.
2305 */
2306int fio_server_parse_string(const char *str, char **ptr, bool *is_sock,
2307 int *port, struct in_addr *inp,
2308 struct in6_addr *inp6, int *ipv6)
2309{
2310 const char *host = str;
2311 char *portp;
2312 int lport = 0;
2313
2314 *ptr = NULL;
2315 *is_sock = false;
2316 *port = fio_net_port;
2317 *ipv6 = 0;
2318
2319 if (!strncmp(str, "sock:", 5)) {
2320 *ptr = strdup(str + 5);
2321 *is_sock = true;
2322
2323 return 0;
2324 }
2325
2326 /*
2327 * Is it ip:<ip or host>:port
2328 */
2329 if (!strncmp(host, "ip:", 3))
2330 host += 3;
2331 else if (!strncmp(host, "ip4:", 4))
2332 host += 4;
2333 else if (!strncmp(host, "ip6:", 4)) {
2334 host += 4;
2335 *ipv6 = 1;
2336 } else if (host[0] == ':') {
2337 /* String is :port */
2338 host++;
2339 lport = atoi(host);
2340 if (!lport || lport > 65535) {
2341 log_err("fio: bad server port %u\n", lport);
2342 return 1;
2343 }
2344 /* no hostname given, we are done */
2345 *port = lport;
2346 return 0;
2347 }
2348
2349 /*
2350 * If no port seen yet, check if there's a last ',' at the end
2351 */
2352 if (!lport) {
2353 portp = strchr(host, ',');
2354 if (portp) {
2355 *portp = '\0';
2356 portp++;
2357 lport = atoi(portp);
2358 if (!lport || lport > 65535) {
2359 log_err("fio: bad server port %u\n", lport);
2360 return 1;
2361 }
2362 }
2363 }
2364
2365 if (lport)
2366 *port = lport;
2367
2368 if (!strlen(host))
2369 return 0;
2370
2371 *ptr = strdup(host);
2372
2373 if (fio_server_parse_host(*ptr, *ipv6, inp, inp6)) {
2374 free(*ptr);
2375 *ptr = NULL;
2376 return 1;
2377 }
2378
2379 if (*port == 0)
2380 *port = fio_net_port;
2381
2382 return 0;
2383}
2384
2385/*
2386 * Server arg should be one of:
2387 *
2388 * sock:/path/to/socket
2389 * ip:1.2.3.4
2390 * 1.2.3.4
2391 *
2392 * Where sock uses unix domain sockets, and ip binds the server to
2393 * a specific interface. If no arguments are given to the server, it
2394 * uses IP and binds to 0.0.0.0.
2395 *
2396 */
2397static int fio_handle_server_arg(void)
2398{
2399 int port = fio_net_port;
2400 bool is_sock;
2401 int ret = 0;
2402
2403 saddr_in.sin_addr.s_addr = htonl(INADDR_ANY);
2404
2405 if (!fio_server_arg)
2406 goto out;
2407
2408 ret = fio_server_parse_string(fio_server_arg, &bind_sock, &is_sock,
2409 &port, &saddr_in.sin_addr,
2410 &saddr_in6.sin6_addr, &use_ipv6);
2411
2412 if (!is_sock && bind_sock) {
2413 free(bind_sock);
2414 bind_sock = NULL;
2415 }
2416
2417out:
2418 fio_net_port = port;
2419 saddr_in.sin_port = htons(port);
2420 saddr_in6.sin6_port = htons(port);
2421 return ret;
2422}
2423
2424static void sig_int(int sig)
2425{
2426 if (bind_sock)
2427 unlink(bind_sock);
2428}
2429
2430static void set_sig_handlers(void)
2431{
2432 struct sigaction act = {
2433 .sa_handler = sig_int,
2434 .sa_flags = SA_RESTART,
2435 };
2436
2437 sigaction(SIGINT, &act, NULL);
2438}
2439
2440void fio_server_destroy_sk_key(void)
2441{
2442 pthread_key_delete(sk_out_key);
2443}
2444
2445int fio_server_create_sk_key(void)
2446{
2447 if (pthread_key_create(&sk_out_key, NULL)) {
2448 log_err("fio: can't create sk_out backend key\n");
2449 return 1;
2450 }
2451
2452 pthread_setspecific(sk_out_key, NULL);
2453 return 0;
2454}
2455
2456static int fio_server(void)
2457{
2458 int sk, ret;
2459
2460 dprint(FD_NET, "starting server\n");
2461
2462 if (fio_handle_server_arg())
2463 return -1;
2464
2465 sk = fio_init_server_connection();
2466 if (sk < 0)
2467 return -1;
2468
2469 set_sig_handlers();
2470
2471 ret = accept_loop(sk);
2472
2473 close(sk);
2474
2475 if (fio_server_arg) {
2476 free(fio_server_arg);
2477 fio_server_arg = NULL;
2478 }
2479 if (bind_sock)
2480 free(bind_sock);
2481
2482 return ret;
2483}
2484
2485void fio_server_got_signal(int signal)
2486{
2487 struct sk_out *sk_out = pthread_getspecific(sk_out_key);
2488
2489 assert(sk_out);
2490
2491 if (signal == SIGPIPE)
2492 sk_out->sk = -1;
2493 else {
2494 log_info("\nfio: terminating on signal %d\n", signal);
2495 exit_backend = true;
2496 }
2497}
2498
2499static int check_existing_pidfile(const char *pidfile)
2500{
2501 struct stat sb;
2502 char buf[16];
2503 pid_t pid;
2504 FILE *f;
2505
2506 if (stat(pidfile, &sb))
2507 return 0;
2508
2509 f = fopen(pidfile, "r");
2510 if (!f)
2511 return 0;
2512
2513 if (fread(buf, sb.st_size, 1, f) <= 0) {
2514 fclose(f);
2515 return 1;
2516 }
2517 fclose(f);
2518
2519 pid = atoi(buf);
2520 if (kill(pid, SIGCONT) < 0)
2521 return errno != ESRCH;
2522
2523 return 1;
2524}
2525
2526static int write_pid(pid_t pid, const char *pidfile)
2527{
2528 FILE *fpid;
2529
2530 fpid = fopen(pidfile, "w");
2531 if (!fpid) {
2532 log_err("fio: failed opening pid file %s\n", pidfile);
2533 return 1;
2534 }
2535
2536 fprintf(fpid, "%u\n", (unsigned int) pid);
2537 fclose(fpid);
2538 return 0;
2539}
2540
2541/*
2542 * If pidfile is specified, background us.
2543 */
2544int fio_start_server(char *pidfile)
2545{
2546 pid_t pid;
2547 int ret;
2548
2549#if defined(WIN32)
2550 WSADATA wsd;
2551 WSAStartup(MAKEWORD(2, 2), &wsd);
2552#endif
2553
2554 if (!pidfile)
2555 return fio_server();
2556
2557 if (check_existing_pidfile(pidfile)) {
2558 log_err("fio: pidfile %s exists and server appears alive\n",
2559 pidfile);
2560 free(pidfile);
2561 return -1;
2562 }
2563
2564 pid = fork();
2565 if (pid < 0) {
2566 log_err("fio: failed server fork: %s\n", strerror(errno));
2567 free(pidfile);
2568 return -1;
2569 } else if (pid) {
2570 ret = write_pid(pid, pidfile);
2571 free(pidfile);
2572 _exit(ret);
2573 }
2574
2575 setsid();
2576 openlog("fio", LOG_NDELAY|LOG_NOWAIT|LOG_PID, LOG_USER);
2577 log_syslog = true;
2578 close(STDIN_FILENO);
2579 close(STDOUT_FILENO);
2580 close(STDERR_FILENO);
2581 f_out = NULL;
2582 f_err = NULL;
2583
2584 ret = fio_server();
2585
2586 closelog();
2587 unlink(pidfile);
2588 free(pidfile);
2589 return ret;
2590}
2591
2592void fio_server_set_arg(const char *arg)
2593{
2594 fio_server_arg = strdup(arg);
2595}