X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=server.c;h=6d5d4ea36ebaf9a774753d6d62c5c50d519c55f7;hp=ab3bece1f61f47e3f7e8dd704a5acf64ac50223d;hb=c58390119ccf711ab66e53aff6aa31507c509900;hpb=8d7b618a261e7642769b8f0bf2cf6649bb383330 diff --git a/server.c b/server.c index ab3bece1..6d5d4ea3 100644 --- a/server.c +++ b/server.c @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -22,15 +21,46 @@ #endif #include "fio.h" +#include "options.h" #include "server.h" #include "crc/crc16.h" #include "lib/ieee754.h" +#include "verify.h" +#include "smalloc.h" int fio_net_port = FIO_NET_PORT; int exit_backend = 0; -static int server_fd = -1; +enum { + SK_F_FREE = 1, + SK_F_COPY = 2, + SK_F_SIMPLE = 4, + SK_F_VEC = 8, + SK_F_INLINE = 16, +}; + +struct sk_entry { + struct flist_head list; /* link on sk_out->list */ + int flags; /* SK_F_* */ + int opcode; /* Actual command fields */ + void *buf; + off_t size; + uint64_t tag; + struct flist_head next; /* Other sk_entry's, if linked command */ +}; + +struct sk_out { + unsigned int refs; /* frees sk_out when it drops to zero. + * protected by below ->lock */ + + int sk; /* socket fd to talk to client */ + struct fio_mutex lock; /* protects ref and below list */ + struct flist_head list; /* list of pending transmit work */ + struct fio_mutex wait; /* wake backend when items added to list */ + struct fio_mutex xmit; /* held while sending data */ +}; + static char *fio_server_arg; static char *bind_sock; static struct sockaddr_in saddr_in; @@ -42,6 +72,9 @@ static unsigned int has_zlib = 1; static unsigned int has_zlib = 0; #endif static unsigned int use_zlib; +static char me[128]; + +static pthread_key_t sk_out_key; struct fio_fork_item { struct flist_head list; @@ -51,6 +84,13 @@ struct fio_fork_item { pid_t pid; }; +struct cmd_reply { + struct fio_mutex lock; + void *data; + size_t size; + int error; +}; + static const char *fio_server_ops[FIO_NET_CMD_NR] = { "", "QUIT", @@ -68,10 +108,93 @@ static const char *fio_server_ops[FIO_NET_CMD_NR] = { "DISK_UTIL", "SERVER_START", "ADD_JOB", - "CMD_RUN", - "CMD_IOLOG", + "RUN", + "IOLOG", + "UPDATE_JOB", + "LOAD_FILE", + "VTRIGGER", + "SENDFILE", + "JOB_OPT", }; +static void sk_lock(struct sk_out *sk_out) +{ + fio_mutex_down(&sk_out->lock); +} + +static void sk_unlock(struct sk_out *sk_out) +{ + fio_mutex_up(&sk_out->lock); +} + +void sk_out_assign(struct sk_out *sk_out) +{ + if (!sk_out) + return; + + sk_lock(sk_out); + sk_out->refs++; + sk_unlock(sk_out); + pthread_setspecific(sk_out_key, sk_out); +} + +static void sk_out_free(struct sk_out *sk_out) +{ + __fio_mutex_remove(&sk_out->lock); + __fio_mutex_remove(&sk_out->wait); + __fio_mutex_remove(&sk_out->xmit); + sfree(sk_out); +} + +static int __sk_out_drop(struct sk_out *sk_out) +{ + if (sk_out) { + int refs; + + sk_lock(sk_out); + assert(sk_out->refs != 0); + refs = --sk_out->refs; + sk_unlock(sk_out); + + if (!refs) { + sk_out_free(sk_out); + pthread_setspecific(sk_out_key, NULL); + return 0; + } + } + + return 1; +} + +void sk_out_drop(void) +{ + struct sk_out *sk_out; + + sk_out = pthread_getspecific(sk_out_key); + __sk_out_drop(sk_out); +} + +static void __fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode, + uint32_t pdu_len, uint64_t tag) +{ + memset(cmd, 0, sizeof(*cmd)); + + cmd->version = __cpu_to_le16(FIO_SERVER_VER); + cmd->opcode = cpu_to_le16(opcode); + cmd->tag = cpu_to_le64(tag); + cmd->pdu_len = cpu_to_le32(pdu_len); +} + + +static void fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode, + const void *pdu, uint32_t pdu_len, uint64_t tag) +{ + __fio_init_net_cmd(cmd, opcode, pdu_len, tag); + + if (pdu) + memcpy(&cmd->payload, pdu, pdu_len); +} + const char *fio_server_op(unsigned int op) { static char buf[32]; @@ -128,13 +251,10 @@ static int fio_sendv_data(int sk, struct iovec *iov, int count) if (!total_len) return 0; - if (errno) - return -errno; - return 1; } -int fio_send_data(int sk, const void *p, unsigned int len) +static int fio_send_data(int sk, const void *p, unsigned int len) { struct iovec iov = { .iov_base = (void *) p, .iov_len = len }; @@ -143,10 +263,17 @@ int fio_send_data(int sk, const void *p, unsigned int len) return fio_sendv_data(sk, &iov, 1); } -int fio_recv_data(int sk, void *p, unsigned int len) +static int fio_recv_data(int sk, void *p, unsigned int len, bool wait) { + int flags; + + if (wait) + flags = MSG_WAITALL; + else + flags = OS_MSG_DONTWAIT; + do { - int ret = recv(sk, p, len, MSG_WAITALL); + int ret = recv(sk, p, len, flags); if (ret > 0) { len -= ret; @@ -156,9 +283,11 @@ int fio_recv_data(int sk, void *p, unsigned int len) continue; } else if (!ret) break; - else if (errno == EAGAIN || errno == EINTR) - continue; - else + else if (errno == EAGAIN || errno == EINTR) { + if (wait) + continue; + break; + } else break; } while (!exit_backend); @@ -207,16 +336,16 @@ static int verify_convert_cmd(struct fio_net_cmd *cmd) /* * Read (and defragment, if necessary) incoming commands */ -struct fio_net_cmd *fio_net_recv_cmd(int sk) +struct fio_net_cmd *fio_net_recv_cmd(int sk, bool wait) { - struct fio_net_cmd cmd, *cmdret = NULL; + struct fio_net_cmd cmd, *tmp, *cmdret = NULL; size_t cmd_size = 0, pdu_offset = 0; uint16_t crc; int ret, first = 1; void *pdu = NULL; do { - ret = fio_recv_data(sk, &cmd, sizeof(cmd)); + ret = fio_recv_data(sk, &cmd, sizeof(cmd), wait); if (ret) break; @@ -232,7 +361,19 @@ struct fio_net_cmd *fio_net_recv_cmd(int sk) } else cmd_size += cmd.pdu_len; - cmdret = realloc(cmdret, cmd_size); + if (cmd_size / 1024 > FIO_SERVER_MAX_CMD_MB * 1024) { + log_err("fio: cmd+pdu too large (%llu)\n", (unsigned long long) cmd_size); + ret = 1; + break; + } + + tmp = realloc(cmdret, cmd_size); + if (!tmp) { + log_err("fio: server failed allocating cmd\n"); + ret = 1; + break; + } + cmdret = tmp; if (first) memcpy(cmdret, &cmd, sizeof(cmd)); @@ -248,7 +389,7 @@ struct fio_net_cmd *fio_net_recv_cmd(int sk) /* There's payload, get it */ pdu = (void *) cmdret->payload + pdu_offset; - ret = fio_recv_data(sk, pdu, cmd.pdu_len); + ret = fio_recv_data(sk, pdu, cmd.pdu_len, wait); if (ret) break; @@ -274,14 +415,14 @@ struct fio_net_cmd *fio_net_recv_cmd(int sk) /* zero-terminate text input */ if (cmdret->pdu_len) { if (cmdret->opcode == FIO_NET_CMD_TEXT) { - struct cmd_text_pdu *pdu = (struct cmd_text_pdu *) cmdret->payload; - char *buf = (char *) pdu->buf; + struct cmd_text_pdu *__pdu = (struct cmd_text_pdu *) cmdret->payload; + char *buf = (char *) __pdu->buf; - buf[pdu->buf_len] = '\0'; + buf[__pdu->buf_len] = '\0'; } else if (cmdret->opcode == FIO_NET_CMD_JOB) { - struct cmd_job_pdu *pdu = (struct cmd_job_pdu *) cmdret->payload; - char *buf = (char *) pdu->buf; - int len = le32_to_cpu(pdu->buf_len); + struct cmd_job_pdu *__pdu = (struct cmd_job_pdu *) cmdret->payload; + char *buf = (char *) __pdu->buf; + int len = le32_to_cpu(__pdu->buf_len); buf[len] = '\0'; } @@ -308,7 +449,7 @@ static uint64_t alloc_reply(uint64_t tag, uint16_t opcode) reply = calloc(1, sizeof(*reply)); INIT_FLIST_HEAD(&reply->list); - gettimeofday(&reply->tv, NULL); + fio_gettime(&reply->tv, NULL); reply->saved_tag = tag; reply->opcode = opcode; @@ -323,7 +464,7 @@ static void free_reply(uint64_t tag) free(reply); } -void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu) +static void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu) { uint32_t pdu_len; @@ -333,7 +474,7 @@ void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu) cmd->pdu_crc16 = __cpu_to_le16(fio_crc16(pdu, pdu_len)); } -void fio_net_cmd_crc(struct fio_net_cmd *cmd) +static void fio_net_cmd_crc(struct fio_net_cmd *cmd) { fio_net_cmd_crc_pdu(cmd, cmd->payload); } @@ -390,6 +531,61 @@ int fio_net_send_cmd(int fd, uint16_t opcode, const void *buf, off_t size, return ret; } +static struct sk_entry *fio_net_prep_cmd(uint16_t opcode, void *buf, + size_t size, uint64_t *tagptr, + int flags) +{ + struct sk_entry *entry; + + entry = smalloc(sizeof(*entry)); + INIT_FLIST_HEAD(&entry->next); + entry->opcode = opcode; + if (flags & SK_F_COPY) { + entry->buf = smalloc(size); + memcpy(entry->buf, buf, size); + } else + entry->buf = buf; + + entry->size = size; + if (tagptr) + entry->tag = *tagptr; + else + entry->tag = 0; + entry->flags = flags; + return entry; +} + +static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry); + +static void fio_net_queue_entry(struct sk_entry *entry) +{ + struct sk_out *sk_out = pthread_getspecific(sk_out_key); + + if (entry->flags & SK_F_INLINE) + handle_sk_entry(sk_out, entry); + else { + sk_lock(sk_out); + flist_add_tail(&entry->list, &sk_out->list); + sk_unlock(sk_out); + + fio_mutex_up(&sk_out->wait); + } +} + +static int fio_net_queue_cmd(uint16_t opcode, void *buf, off_t size, + uint64_t *tagptr, int flags) +{ + struct sk_entry *entry; + + entry = fio_net_prep_cmd(opcode, buf, size, tagptr, flags); + if (entry) { + fio_net_queue_entry(entry); + return 0; + } + + return 1; +} + static int fio_net_send_simple_stack_cmd(int sk, uint16_t opcode, uint64_t tag) { struct fio_net_cmd cmd; @@ -426,6 +622,13 @@ int fio_net_send_simple_cmd(int sk, uint16_t opcode, uint64_t tag, return 0; } +static int fio_net_queue_quit(void) +{ + dprint(FD_NET, "server: sending quit\n"); + + return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, NULL, SK_F_SIMPLE); +} + int fio_net_send_quit(int sk) { dprint(FD_NET, "server: sending quit\n"); @@ -433,8 +636,7 @@ int fio_net_send_quit(int sk) return fio_net_send_simple_cmd(sk, FIO_NET_CMD_QUIT, 0, NULL); } -static int fio_net_send_ack(int sk, struct fio_net_cmd *cmd, int error, - int signal) +static int fio_net_send_ack(struct fio_net_cmd *cmd, int error, int signal) { struct cmd_end_pdu epdu; uint64_t tag = 0; @@ -444,13 +646,13 @@ static int fio_net_send_ack(int sk, struct fio_net_cmd *cmd, int error, epdu.error = __cpu_to_le32(error); epdu.signal = __cpu_to_le32(signal); - return fio_net_send_cmd(sk, FIO_NET_CMD_STOP, &epdu, sizeof(epdu), &tag, NULL); + return fio_net_queue_cmd(FIO_NET_CMD_STOP, &epdu, sizeof(epdu), &tag, SK_F_COPY); } -int fio_net_send_stop(int sk, int error, int signal) +static int fio_net_queue_stop(int error, int signal) { dprint(FD_NET, "server: sending stop (%d, %d)\n", error, signal); - return fio_net_send_ack(sk, NULL, error, signal); + return fio_net_send_ack(NULL, error, signal); } static void fio_server_add_fork_item(pid_t pid, struct flist_head *list) @@ -501,20 +703,23 @@ static void fio_server_check_fork_item(struct fio_fork_item *ffi) } } -static void fio_server_fork_item_done(struct fio_fork_item *ffi) +static void fio_server_fork_item_done(struct fio_fork_item *ffi, bool stop) { dprint(FD_NET, "pid %u exited, sig=%u, exitval=%d\n", (int) ffi->pid, ffi->signal, ffi->exitval); /* * Fold STOP and QUIT... */ - fio_net_send_stop(server_fd, ffi->exitval, ffi->signal); - fio_net_send_quit(server_fd); + if (stop) { + fio_net_queue_stop(ffi->exitval, ffi->signal); + fio_net_queue_quit(); + } + flist_del(&ffi->list); free(ffi); } -static void fio_server_check_fork_items(struct flist_head *list) +static void fio_server_check_fork_items(struct flist_head *list, bool stop) { struct flist_head *entry, *tmp; struct fio_fork_item *ffi; @@ -525,25 +730,51 @@ static void fio_server_check_fork_items(struct flist_head *list) fio_server_check_fork_item(ffi); if (ffi->exited) - fio_server_fork_item_done(ffi); + fio_server_fork_item_done(ffi, stop); } } static void fio_server_check_jobs(struct flist_head *job_list) { - fio_server_check_fork_items(job_list); + fio_server_check_fork_items(job_list, true); } static void fio_server_check_conns(struct flist_head *conn_list) { - fio_server_check_fork_items(conn_list); + fio_server_check_fork_items(conn_list, false); } -static int handle_run_cmd(struct flist_head *job_list, struct fio_net_cmd *cmd) +static int handle_load_file_cmd(struct fio_net_cmd *cmd) +{ + struct cmd_load_file_pdu *pdu = (struct cmd_load_file_pdu *) cmd->payload; + void *file_name = pdu->file; + struct cmd_start_pdu spdu; + + dprint(FD_NET, "server: loading local file %s\n", (char *) file_name); + + pdu->name_len = le16_to_cpu(pdu->name_len); + pdu->client_type = le16_to_cpu(pdu->client_type); + + if (parse_jobs_ini(file_name, 0, 0, pdu->client_type)) { + fio_net_queue_quit(); + return -1; + } + + spdu.jobs = cpu_to_le32(thread_number); + spdu.stat_outputs = cpu_to_le32(stat_number); + fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY); + return 0; +} + +static int handle_run_cmd(struct sk_out *sk_out, struct flist_head *job_list, + struct fio_net_cmd *cmd) { pid_t pid; int ret; + sk_out_assign(sk_out); + + fio_time_init(); set_genesis_time(); pid = fork(); @@ -552,8 +783,9 @@ static int handle_run_cmd(struct flist_head *job_list, struct fio_net_cmd *cmd) return 0; } - ret = fio_backend(); + ret = fio_backend(sk_out); free_threads_shm(); + sk_out_drop(); _exit(ret); } @@ -567,13 +799,14 @@ static int handle_job_cmd(struct fio_net_cmd *cmd) pdu->client_type = le32_to_cpu(pdu->client_type); if (parse_jobs_ini(buf, 1, 0, pdu->client_type)) { - fio_net_send_quit(server_fd); + fio_net_queue_quit(); return -1; } spdu.jobs = cpu_to_le32(thread_number); spdu.stat_outputs = cpu_to_le32(stat_number); - fio_net_send_cmd(server_fd, FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, NULL); + + fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY); return 0; } @@ -604,7 +837,7 @@ static int handle_jobline_cmd(struct fio_net_cmd *cmd) } if (parse_cmd_line(clp->lines, argv, clp->client_type)) { - fio_net_send_quit(server_fd); + fio_net_queue_quit(); free(argv); return -1; } @@ -613,7 +846,8 @@ static int handle_jobline_cmd(struct fio_net_cmd *cmd) spdu.jobs = cpu_to_le32(thread_number); spdu.stat_outputs = cpu_to_le32(stat_number); - fio_net_send_cmd(server_fd, FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, NULL); + + fio_net_queue_cmd(FIO_NET_CMD_START, &spdu, sizeof(spdu), NULL, SK_F_COPY); return 0; } @@ -625,6 +859,8 @@ static int handle_probe_cmd(struct fio_net_cmd *cmd) dprint(FD_NET, "server: sending probe reply\n"); + strcpy(me, (char *) pdu->server); + memset(&probe, 0, sizeof(probe)); gethostname((char *) probe.hostname, sizeof(probe.hostname)); #ifdef CONFIG_BIG_ENDIAN @@ -648,63 +884,60 @@ static int handle_probe_cmd(struct fio_net_cmd *cmd) use_zlib = 0; } - return fio_net_send_cmd(server_fd, FIO_NET_CMD_PROBE, &probe, sizeof(probe), &tag, NULL); + return fio_net_queue_cmd(FIO_NET_CMD_PROBE, &probe, sizeof(probe), &tag, SK_F_COPY); } static int handle_send_eta_cmd(struct fio_net_cmd *cmd) { struct jobs_eta *je; - size_t size; uint64_t tag = cmd->tag; + size_t size; int i; - if (!thread_number) - return 0; + dprint(FD_NET, "server sending status\n"); - size = sizeof(*je) + thread_number * sizeof(char) + 1; - je = malloc(size); - memset(je, 0, size); + /* + * Fake ETA return if we don't have a local one, otherwise the client + * will end up timing out waiting for a response to the ETA request + */ + je = get_jobs_eta(true, &size); + if (!je) { + size = sizeof(*je); + je = calloc(1, size); + } else { + je->nr_running = cpu_to_le32(je->nr_running); + je->nr_ramp = cpu_to_le32(je->nr_ramp); + je->nr_pending = cpu_to_le32(je->nr_pending); + je->nr_setting_up = cpu_to_le32(je->nr_setting_up); + je->files_open = cpu_to_le32(je->files_open); + + for (i = 0; i < DDIR_RWDIR_CNT; i++) { + je->m_rate[i] = cpu_to_le64(je->m_rate[i]); + je->t_rate[i] = cpu_to_le64(je->t_rate[i]); + je->m_iops[i] = cpu_to_le32(je->m_iops[i]); + je->t_iops[i] = cpu_to_le32(je->t_iops[i]); + je->rate[i] = cpu_to_le64(je->rate[i]); + je->iops[i] = cpu_to_le32(je->iops[i]); + } - if (!calc_thread_status(je, 1)) { - free(je); - return 0; + je->elapsed_sec = cpu_to_le64(je->elapsed_sec); + je->eta_sec = cpu_to_le64(je->eta_sec); + je->nr_threads = cpu_to_le32(je->nr_threads); + je->is_pow2 = cpu_to_le32(je->is_pow2); + je->unit_base = cpu_to_le32(je->unit_base); } - dprint(FD_NET, "server sending status\n"); - - je->nr_running = cpu_to_le32(je->nr_running); - je->nr_ramp = cpu_to_le32(je->nr_ramp); - je->nr_pending = cpu_to_le32(je->nr_pending); - je->nr_setting_up = cpu_to_le32(je->nr_setting_up); - je->files_open = cpu_to_le32(je->files_open); - - for (i = 0; i < DDIR_RWDIR_CNT; i++) { - je->m_rate[i] = cpu_to_le32(je->m_rate[i]); - je->t_rate[i] = cpu_to_le32(je->t_rate[i]); - je->m_iops[i] = cpu_to_le32(je->m_iops[i]); - je->t_iops[i] = cpu_to_le32(je->t_iops[i]); - je->rate[i] = cpu_to_le32(je->rate[i]); - je->iops[i] = cpu_to_le32(je->iops[i]); - } - - je->elapsed_sec = cpu_to_le64(je->elapsed_sec); - je->eta_sec = cpu_to_le64(je->eta_sec); - je->nr_threads = cpu_to_le32(je->nr_threads); - je->is_pow2 = cpu_to_le32(je->is_pow2); - je->unit_base = cpu_to_le32(je->unit_base); - - fio_net_send_cmd(server_fd, FIO_NET_CMD_ETA, je, size, &tag, NULL); - free(je); + fio_net_queue_cmd(FIO_NET_CMD_ETA, je, size, &tag, SK_F_FREE); return 0; } -static int send_update_job_reply(int fd, uint64_t __tag, int error) +static int send_update_job_reply(uint64_t __tag, int error) { uint64_t tag = __tag; uint32_t pdu_error; pdu_error = __cpu_to_le32(error); - return fio_net_send_cmd(fd, FIO_NET_CMD_UPDATE_JOB, &pdu_error, sizeof(pdu_error), &tag, NULL); + return fio_net_queue_cmd(FIO_NET_CMD_UPDATE_JOB, &pdu_error, sizeof(pdu_error), &tag, SK_F_COPY); } static int handle_update_job_cmd(struct fio_net_cmd *cmd) @@ -718,17 +951,41 @@ static int handle_update_job_cmd(struct fio_net_cmd *cmd) dprint(FD_NET, "server: updating options for job %u\n", tnumber); if (!tnumber || tnumber > thread_number) { - send_update_job_reply(server_fd, cmd->tag, ENODEV); + send_update_job_reply(cmd->tag, ENODEV); return 0; } td = &threads[tnumber - 1]; convert_thread_options_to_cpu(&td->o, &pdu->top); - send_update_job_reply(server_fd, cmd->tag, 0); + send_update_job_reply(cmd->tag, 0); return 0; } -static int handle_command(struct flist_head *job_list, struct fio_net_cmd *cmd) +static int handle_trigger_cmd(struct fio_net_cmd *cmd) +{ + struct cmd_vtrigger_pdu *pdu = (struct cmd_vtrigger_pdu *) cmd->payload; + char *buf = (char *) pdu->cmd; + struct all_io_list *rep; + size_t sz; + + pdu->len = le16_to_cpu(pdu->len); + buf[pdu->len] = '\0'; + + rep = get_all_io_list(IO_LIST_ALL, &sz); + if (!rep) { + struct all_io_list state; + + state.threads = cpu_to_le64((uint64_t) 0); + fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY | SK_F_INLINE); + } else + fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE | SK_F_INLINE); + + exec_trigger(buf); + return 0; +} + +static int handle_command(struct sk_out *sk_out, struct flist_head *job_list, + struct fio_net_cmd *cmd) { int ret; @@ -739,10 +996,14 @@ static int handle_command(struct flist_head *job_list, struct fio_net_cmd *cmd) switch (cmd->opcode) { case FIO_NET_CMD_QUIT: fio_terminate_threads(TERMINATE_ALL); - return -1; + ret = 0; + break; case FIO_NET_CMD_EXIT: exit_backend = 1; return -1; + case FIO_NET_CMD_LOAD_FILE: + ret = handle_load_file_cmd(cmd); + break; case FIO_NET_CMD_JOB: ret = handle_job_cmd(cmd); break; @@ -756,11 +1017,40 @@ static int handle_command(struct flist_head *job_list, struct fio_net_cmd *cmd) ret = handle_send_eta_cmd(cmd); break; case FIO_NET_CMD_RUN: - ret = handle_run_cmd(job_list, cmd); + ret = handle_run_cmd(sk_out, job_list, cmd); break; case FIO_NET_CMD_UPDATE_JOB: ret = handle_update_job_cmd(cmd); break; + case FIO_NET_CMD_VTRIGGER: + ret = handle_trigger_cmd(cmd); + break; + case FIO_NET_CMD_SENDFILE: { + struct cmd_sendfile_reply *in; + struct cmd_reply *rep; + + rep = (struct cmd_reply *) (uintptr_t) cmd->tag; + + in = (struct cmd_sendfile_reply *) cmd->payload; + in->size = le32_to_cpu(in->size); + in->error = le32_to_cpu(in->error); + if (in->error) { + ret = 1; + rep->error = in->error; + } else { + ret = 0; + rep->data = smalloc(in->size); + if (!rep->data) { + ret = 1; + rep->error = ENOMEM; + } else { + rep->size = in->size; + memcpy(rep->data, in->data, in->size); + } + } + fio_mutex_up(&rep->lock); + break; + } default: log_err("fio: unknown opcode: %s\n", fio_server_op(cmd->opcode)); ret = 1; @@ -769,19 +1059,151 @@ static int handle_command(struct flist_head *job_list, struct fio_net_cmd *cmd) return ret; } -static int handle_connection(int sk) +/* + * Send a command with a separate PDU, not inlined in the command + */ +static int fio_send_cmd_ext_pdu(int sk, uint16_t opcode, const void *buf, + off_t size, uint64_t tag, uint32_t flags) +{ + struct fio_net_cmd cmd; + struct iovec iov[2]; + size_t this_len; + int ret; + + iov[0].iov_base = (void *) &cmd; + iov[0].iov_len = sizeof(cmd); + + do { + uint32_t this_flags = flags; + + this_len = size; + if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU) + this_len = FIO_SERVER_MAX_FRAGMENT_PDU; + + if (this_len < size) + this_flags |= FIO_NET_CMD_F_MORE; + + __fio_init_net_cmd(&cmd, opcode, this_len, tag); + cmd.flags = __cpu_to_le32(this_flags); + fio_net_cmd_crc_pdu(&cmd, buf); + + iov[1].iov_base = (void *) buf; + iov[1].iov_len = this_len; + + ret = fio_sendv_data(sk, iov, 2); + size -= this_len; + buf += this_len; + } while (!ret && size); + + return ret; +} + +static void finish_entry(struct sk_entry *entry) +{ + if (entry->flags & SK_F_FREE) + free(entry->buf); + else if (entry->flags & SK_F_COPY) + sfree(entry->buf); + + sfree(entry); +} + +static void entry_set_flags(struct sk_entry *entry, struct flist_head *list, + unsigned int *flags) +{ + if (!flist_empty(list)) + *flags = FIO_NET_CMD_F_MORE; + else + *flags = 0; +} + +static int send_vec_entry(struct sk_out *sk_out, struct sk_entry *first) +{ + unsigned int flags; + int ret; + + entry_set_flags(first, &first->next, &flags); + + ret = fio_send_cmd_ext_pdu(sk_out->sk, first->opcode, first->buf, + first->size, first->tag, flags); + + while (!flist_empty(&first->next)) { + struct sk_entry *next; + + next = flist_first_entry(&first->next, struct sk_entry, list); + flist_del_init(&next->list); + + entry_set_flags(next, &first->next, &flags); + + ret += fio_send_cmd_ext_pdu(sk_out->sk, next->opcode, next->buf, + next->size, next->tag, flags); + finish_entry(next); + } + + return ret; +} + +static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry) +{ + int ret; + + fio_mutex_down(&sk_out->xmit); + + if (entry->flags & SK_F_VEC) + ret = send_vec_entry(sk_out, entry); + else if (entry->flags & SK_F_SIMPLE) { + ret = fio_net_send_simple_cmd(sk_out->sk, entry->opcode, + entry->tag, NULL); + } else { + ret = fio_net_send_cmd(sk_out->sk, entry->opcode, entry->buf, + entry->size, &entry->tag, NULL); + } + + fio_mutex_up(&sk_out->xmit); + + if (ret) + log_err("fio: failed handling cmd %s\n", fio_server_op(entry->opcode)); + + finish_entry(entry); + return ret; +} + +static int handle_xmits(struct sk_out *sk_out) +{ + struct sk_entry *entry; + FLIST_HEAD(list); + int ret = 0; + + sk_lock(sk_out); + if (flist_empty(&sk_out->list)) { + sk_unlock(sk_out); + return 0; + } + + flist_splice_init(&sk_out->list, &list); + sk_unlock(sk_out); + + while (!flist_empty(&list)) { + entry = flist_entry(list.next, struct sk_entry, list); + flist_del(&entry->list); + ret += handle_sk_entry(sk_out, entry); + } + + return ret; +} + +static int handle_connection(struct sk_out *sk_out) { struct fio_net_cmd *cmd = NULL; FLIST_HEAD(job_list); int ret = 0; reset_fio_state(); - server_fd = sk; /* read forever */ while (!exit_backend) { struct pollfd pfd = { - .fd = sk, + .fd = sk_out->sk, .events = POLLIN, }; @@ -792,7 +1214,9 @@ static int handle_connection(int sk) if (!flist_empty(&job_list)) timeout = 100; - ret = poll(&pfd, 1, timeout); + handle_xmits(sk_out); + + ret = poll(&pfd, 1, 0); if (ret < 0) { if (errno == EINTR) break; @@ -800,6 +1224,7 @@ static int handle_connection(int sk) break; } else if (!ret) { fio_server_check_jobs(&job_list); + fio_mutex_down_timeout(&sk_out->wait, timeout); continue; } @@ -816,13 +1241,13 @@ static int handle_connection(int sk) if (ret < 0) break; - cmd = fio_net_recv_cmd(sk); + cmd = fio_net_recv_cmd(sk_out->sk, true); if (!cmd) { ret = -1; break; } - ret = handle_command(&job_list, cmd); + ret = handle_command(sk_out, &job_list, cmd); if (ret) break; @@ -833,26 +1258,66 @@ static int handle_connection(int sk) if (cmd) free(cmd); - close(sk); + handle_xmits(sk_out); + + close(sk_out->sk); + sk_out->sk = -1; + __sk_out_drop(sk_out); _exit(ret); } +/* get the address on this host bound by the input socket, + * whether it is ipv6 or ipv4 */ + +static int get_my_addr_str(int sk) +{ + struct sockaddr_in6 myaddr6 = { 0, }; + struct sockaddr_in myaddr4 = { 0, }; + struct sockaddr *sockaddr_p; + char *net_addr; + socklen_t len; + int ret; + + if (use_ipv6) { + len = sizeof(myaddr6); + sockaddr_p = (struct sockaddr * )&myaddr6; + net_addr = (char * )&myaddr6.sin6_addr; + } else { + len = sizeof(myaddr4); + sockaddr_p = (struct sockaddr * )&myaddr4; + net_addr = (char * )&myaddr4.sin_addr; + } + + ret = getsockname(sk, sockaddr_p, &len); + if (ret) { + log_err("fio: getsockaddr: %s\n", strerror(errno)); + return -1; + } + + if (!inet_ntop(use_ipv6?AF_INET6:AF_INET, net_addr, client_sockaddr_str, INET6_ADDRSTRLEN - 1)) { + log_err("inet_ntop: failed to convert addr to string\n"); + return -1; + } + + dprint(FD_NET, "fio server bound to addr %s\n", client_sockaddr_str); + return 0; +} + static int accept_loop(int listen_sk) { struct sockaddr_in addr; struct sockaddr_in6 addr6; socklen_t len = use_ipv6 ? sizeof(addr6) : sizeof(addr); struct pollfd pfd; - int ret = 0, sk, flags, exitval = 0; + int ret = 0, sk, exitval = 0; FLIST_HEAD(conn_list); dprint(FD_NET, "server enter accept loop\n"); - flags = fcntl(listen_sk, F_GETFL); - flags |= O_NONBLOCK; - fcntl(listen_sk, F_SETFL, flags); + fio_set_fd_nonblocking(listen_sk, "server"); while (!exit_backend) { + struct sk_out *sk_out; const char *from; char buf[64]; pid_t pid; @@ -902,6 +1367,13 @@ static int accept_loop(int listen_sk) dprint(FD_NET, "server: connect from %s\n", from); + sk_out = smalloc(sizeof(*sk_out)); + sk_out->sk = sk; + INIT_FLIST_HEAD(&sk_out->list); + __fio_mutex_init(&sk_out->lock, FIO_MUTEX_UNLOCKED); + __fio_mutex_init(&sk_out->wait, FIO_MUTEX_LOCKED); + __fio_mutex_init(&sk_out->xmit, FIO_MUTEX_UNLOCKED); + pid = fork(); if (pid) { close(sk); @@ -909,8 +1381,15 @@ static int accept_loop(int listen_sk) continue; } - /* exits */ - handle_connection(sk); + /* if error, it's already logged, non-fatal */ + get_my_addr_str(sk); + + /* + * Assign sk_out here, it'll be dropped in handle_connection() + * since that function calls _exit() when done + */ + sk_out_assign(sk_out); + handle_connection(sk_out); } return exitval; @@ -918,12 +1397,13 @@ static int accept_loop(int listen_sk) int fio_server_text_output(int level, const char *buf, size_t len) { + struct sk_out *sk_out = pthread_getspecific(sk_out_key); struct cmd_text_pdu *pdu; unsigned int tlen; struct timeval tv; - if (server_fd == -1) - return log_local_buf(buf, len); + if (!sk_out || sk_out->sk == -1) + return -1; tlen = sizeof(*pdu) + len; pdu = malloc(tlen); @@ -937,7 +1417,7 @@ int fio_server_text_output(int level, const char *buf, size_t len) memcpy(pdu->buf, buf, len); - fio_net_send_cmd(server_fd, FIO_NET_CMD_TEXT, pdu, tlen, NULL, NULL); + fio_net_queue_cmd(FIO_NET_CMD_TEXT, pdu, tlen, NULL, SK_F_COPY); free(pdu); return len; } @@ -951,8 +1431,8 @@ static void convert_io_stat(struct io_stat *dst, struct io_stat *src) /* * Encode to IEEE 754 for network transfer */ - dst->mean.u.i = __cpu_to_le64(fio_double_to_uint64(src->mean.u.f)); - dst->S.u.i = __cpu_to_le64(fio_double_to_uint64(src->S.u.f)); + dst->mean.u.i = cpu_to_le64(fio_double_to_uint64(src->mean.u.f)); + dst->S.u.i = cpu_to_le64(fio_double_to_uint64(src->S.u.f)); } static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src) @@ -964,7 +1444,7 @@ static void convert_gs(struct group_run_stats *dst, struct group_run_stats *src) dst->min_run[i] = cpu_to_le64(src->min_run[i]); dst->max_bw[i] = cpu_to_le64(src->max_bw[i]); dst->min_bw[i] = cpu_to_le64(src->min_bw[i]); - dst->io_kb[i] = cpu_to_le64(src->io_kb[i]); + dst->iobytes[i] = cpu_to_le64(src->iobytes[i]); dst->agg[i] = cpu_to_le64(src->agg[i]); } @@ -982,6 +1462,8 @@ void fio_server_send_ts(struct thread_stat *ts, struct group_run_stats *rs) { struct cmd_ts_pdu p; int i, j; + void *ss_buf; + uint64_t *ss_iops, *ss_bw; dprint(FD_NET, "server sending end stats\n"); @@ -1011,12 +1493,13 @@ void fio_server_send_ts(struct thread_stat *ts, struct group_run_stats *rs) p.ts.minf = cpu_to_le64(ts->minf); p.ts.majf = cpu_to_le64(ts->majf); p.ts.clat_percentiles = cpu_to_le64(ts->clat_percentiles); + p.ts.percentile_precision = cpu_to_le64(ts->percentile_precision); for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) { fio_fp64_t *src = &ts->percentile_list[i]; fio_fp64_t *dst = &p.ts.percentile_list[i]; - dst->u.i = __cpu_to_le64(fio_double_to_uint64(src->u.f)); + dst->u.i = cpu_to_le64(fio_double_to_uint64(src->u.f)); } for (i = 0; i < FIO_IO_U_MAP_NR; i++) { @@ -1037,6 +1520,7 @@ void fio_server_send_ts(struct thread_stat *ts, struct group_run_stats *rs) for (i = 0; i < DDIR_RWDIR_CNT; i++) { p.ts.total_io_u[i] = cpu_to_le64(ts->total_io_u[i]); p.ts.short_io_u[i] = cpu_to_le64(ts->short_io_u[i]); + p.ts.drop_io_u[i] = cpu_to_le64(ts->drop_io_u[i]); } p.ts.total_submit = cpu_to_le64(ts->total_submit); @@ -1057,11 +1541,43 @@ void fio_server_send_ts(struct thread_stat *ts, struct group_run_stats *rs) p.ts.latency_depth = cpu_to_le32(ts->latency_depth); p.ts.latency_target = cpu_to_le64(ts->latency_target); p.ts.latency_window = cpu_to_le64(ts->latency_window); - p.ts.latency_percentile.u.i = __cpu_to_le64(fio_double_to_uint64(ts->latency_percentile.u.f)); + p.ts.latency_percentile.u.i = cpu_to_le64(fio_double_to_uint64(ts->latency_percentile.u.f)); + + p.ts.nr_block_infos = cpu_to_le64(ts->nr_block_infos); + for (i = 0; i < p.ts.nr_block_infos; i++) + p.ts.block_infos[i] = cpu_to_le32(ts->block_infos[i]); + + p.ts.ss_dur = cpu_to_le64(ts->ss_dur); + p.ts.ss_state = cpu_to_le32(ts->ss_state); + p.ts.ss_head = cpu_to_le32(ts->ss_head); + p.ts.ss_limit.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_limit.u.f)); + p.ts.ss_slope.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_slope.u.f)); + p.ts.ss_deviation.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_deviation.u.f)); + p.ts.ss_criterion.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_criterion.u.f)); convert_gs(&p.rs, rs); - fio_net_send_cmd(server_fd, FIO_NET_CMD_TS, &p, sizeof(p), NULL, NULL); + dprint(FD_NET, "ts->ss_state = %d\n", ts->ss_state); + if (ts->ss_state & __FIO_SS_DATA) { + dprint(FD_NET, "server sending steadystate ring buffers\n"); + + ss_buf = malloc(sizeof(p) + 2*ts->ss_dur*sizeof(uint64_t)); + + memcpy(ss_buf, &p, sizeof(p)); + + ss_iops = (uint64_t *) ((struct cmd_ts_pdu *)ss_buf + 1); + ss_bw = ss_iops + (int) ts->ss_dur; + for (i = 0; i < ts->ss_dur; i++) { + ss_iops[i] = cpu_to_le64(ts->ss_iops_data[i]); + ss_bw[i] = cpu_to_le64(ts->ss_bw_data[i]); + } + + fio_net_queue_cmd(FIO_NET_CMD_TS, ss_buf, sizeof(p) + 2*ts->ss_dur*sizeof(uint64_t), NULL, SK_F_COPY); + + free(ss_buf); + } + else + fio_net_queue_cmd(FIO_NET_CMD_TS, &p, sizeof(p), NULL, SK_F_COPY); } void fio_server_send_gs(struct group_run_stats *rs) @@ -1071,7 +1587,48 @@ void fio_server_send_gs(struct group_run_stats *rs) dprint(FD_NET, "server sending group run stats\n"); convert_gs(&gs, rs); - fio_net_send_cmd(server_fd, FIO_NET_CMD_GS, &gs, sizeof(gs), NULL, NULL); + fio_net_queue_cmd(FIO_NET_CMD_GS, &gs, sizeof(gs), NULL, SK_F_COPY); +} + +void fio_server_send_job_options(struct flist_head *opt_list, + unsigned int groupid) +{ + struct cmd_job_option pdu; + struct flist_head *entry; + + if (flist_empty(opt_list)) + return; + + flist_for_each(entry, opt_list) { + struct print_option *p; + size_t len; + + p = flist_entry(entry, struct print_option, list); + memset(&pdu, 0, sizeof(pdu)); + + if (groupid == -1U) { + pdu.global = __cpu_to_le16(1); + pdu.groupid = 0; + } else { + pdu.global = 0; + pdu.groupid = cpu_to_le32(groupid); + } + len = strlen(p->name); + if (len >= sizeof(pdu.name)) { + len = sizeof(pdu.name) - 1; + pdu.truncated = __cpu_to_le16(1); + } + memcpy(pdu.name, p->name, len); + if (p->value) { + len = strlen(p->value); + if (len >= sizeof(pdu.value)) { + len = sizeof(pdu.value) - 1; + pdu.truncated = __cpu_to_le16(1); + } + memcpy(pdu.value, p->value, len); + } + fio_net_queue_cmd(FIO_NET_CMD_JOB_OPT, &pdu, sizeof(pdu), NULL, SK_F_COPY); + } } static void convert_agg(struct disk_util_agg *dst, struct disk_util_agg *src) @@ -1079,16 +1636,16 @@ static void convert_agg(struct disk_util_agg *dst, struct disk_util_agg *src) int i; for (i = 0; i < 2; i++) { - dst->ios[i] = cpu_to_le32(src->ios[i]); - dst->merges[i] = cpu_to_le32(src->merges[i]); + dst->ios[i] = cpu_to_le64(src->ios[i]); + dst->merges[i] = cpu_to_le64(src->merges[i]); dst->sectors[i] = cpu_to_le64(src->sectors[i]); - dst->ticks[i] = cpu_to_le32(src->ticks[i]); + dst->ticks[i] = cpu_to_le64(src->ticks[i]); } - dst->io_ticks = cpu_to_le32(src->io_ticks); - dst->time_in_queue = cpu_to_le32(src->time_in_queue); + dst->io_ticks = cpu_to_le64(src->io_ticks); + dst->time_in_queue = cpu_to_le64(src->time_in_queue); dst->slavecount = cpu_to_le32(src->slavecount); - dst->max_util.u.i = __cpu_to_le64(fio_double_to_uint64(src->max_util.u.f)); + dst->max_util.u.i = cpu_to_le64(fio_double_to_uint64(src->max_util.u.f)); } static void convert_dus(struct disk_util_stat *dst, struct disk_util_stat *src) @@ -1099,14 +1656,14 @@ static void convert_dus(struct disk_util_stat *dst, struct disk_util_stat *src) strncpy((char *) dst->name, (char *) src->name, FIO_DU_NAME_SZ - 1); for (i = 0; i < 2; i++) { - dst->s.ios[i] = cpu_to_le32(src->s.ios[i]); - dst->s.merges[i] = cpu_to_le32(src->s.merges[i]); + dst->s.ios[i] = cpu_to_le64(src->s.ios[i]); + dst->s.merges[i] = cpu_to_le64(src->s.merges[i]); dst->s.sectors[i] = cpu_to_le64(src->s.sectors[i]); - dst->s.ticks[i] = cpu_to_le32(src->s.ticks[i]); + dst->s.ticks[i] = cpu_to_le64(src->s.ticks[i]); } - dst->s.io_ticks = cpu_to_le32(src->s.io_ticks); - dst->s.time_in_queue = cpu_to_le32(src->s.time_in_queue); + dst->s.io_ticks = cpu_to_le64(src->s.io_ticks); + dst->s.time_in_queue = cpu_to_le64(src->s.time_in_queue); dst->s.msec = cpu_to_le64(src->s.msec); } @@ -1126,123 +1683,319 @@ void fio_server_send_du(void) convert_dus(&pdu.dus, &du->dus); convert_agg(&pdu.agg, &du->agg); - fio_net_send_cmd(server_fd, FIO_NET_CMD_DU, &pdu, sizeof(pdu), NULL, NULL); + fio_net_queue_cmd(FIO_NET_CMD_DU, &pdu, sizeof(pdu), NULL, SK_F_COPY); } } +#ifdef CONFIG_ZLIB + +static inline void __fio_net_prep_tail(z_stream *stream, void *out_pdu, + struct sk_entry **last_entry, + struct sk_entry *first) +{ + unsigned int this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out; + + *last_entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len, + NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE); + flist_add_tail(&(*last_entry)->list, &first->next); + +} + /* - * Send a command with a separate PDU, not inlined in the command + * Deflates the next input given, creating as many new packets in the + * linked list as necessary. */ -static int fio_send_cmd_ext_pdu(int sk, uint16_t opcode, const void *buf, - off_t size, uint64_t tag, uint32_t flags) +static int __deflate_pdu_buffer(void *next_in, unsigned int next_sz, void **out_pdu, + struct sk_entry **last_entry, z_stream *stream, + struct sk_entry *first) { - struct fio_net_cmd cmd; - struct iovec iov[2]; + int ret; - iov[0].iov_base = (void *) &cmd; - iov[0].iov_len = sizeof(cmd); - iov[1].iov_base = (void *) buf; - iov[1].iov_len = size; + stream->next_in = next_in; + stream->avail_in = next_sz; + do { + if (! stream->avail_out) { + + __fio_net_prep_tail(stream, *out_pdu, last_entry, first); + + *out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU); + + stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU; + stream->next_out = *out_pdu; + } - __fio_init_net_cmd(&cmd, opcode, size, tag); - cmd.flags = __cpu_to_le32(flags); - fio_net_cmd_crc_pdu(&cmd, buf); + ret = deflate(stream, Z_BLOCK); - return fio_sendv_data(sk, iov, 2); + if (ret < 0) { + free(*out_pdu); + return 1; + } + } while (stream->avail_in); + + return 0; } -static int fio_send_iolog_gz(struct cmd_iolog_pdu *pdu, struct io_log *log) +static int __fio_append_iolog_gz_hist(struct sk_entry *first, struct io_log *log, + struct io_logs *cur_log, z_stream *stream) { - int ret = 0; -#ifdef CONFIG_ZLIB - z_stream stream; + struct sk_entry *entry; void *out_pdu; + int ret, i, j; + int sample_sz = log_entry_sz(log); - /* - * Dirty - since the log is potentially huge, compress it into - * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving - * side defragment it. - */ out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU); + stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU; + stream->next_out = out_pdu; + + for (i = 0; i < cur_log->nr_samples; i++) { + struct io_sample *s; + struct io_u_plat_entry *cur_plat_entry, *prev_plat_entry; + unsigned int *cur_plat, *prev_plat; + + s = get_sample(log, cur_log, i); + ret = __deflate_pdu_buffer(s, sample_sz, &out_pdu, &entry, stream, first); + if (ret) + return ret; + + /* Do the subtraction on server side so that client doesn't have to + * reconstruct our linked list from packets. + */ + cur_plat_entry = s->data.plat_entry; + prev_plat_entry = flist_first_entry(&cur_plat_entry->list, struct io_u_plat_entry, list); + cur_plat = cur_plat_entry->io_u_plat; + prev_plat = prev_plat_entry->io_u_plat; + + for (j = 0; j < FIO_IO_U_PLAT_NR; j++) { + cur_plat[j] -= prev_plat[j]; + } + + flist_del(&prev_plat_entry->list); + free(prev_plat_entry); + + ret = __deflate_pdu_buffer(cur_plat_entry, sizeof(*cur_plat_entry), + &out_pdu, &entry, stream, first); + + if (ret) + return ret; + } + + __fio_net_prep_tail(stream, out_pdu, &entry, first); + + return 0; +} + +static int __fio_append_iolog_gz(struct sk_entry *first, struct io_log *log, + struct io_logs *cur_log, z_stream *stream) +{ + unsigned int this_len; + void *out_pdu; + int ret; + + if (log->log_type == IO_LOG_TYPE_HIST) + return __fio_append_iolog_gz_hist(first, log, cur_log, stream); + + stream->next_in = (void *) cur_log->log; + stream->avail_in = cur_log->nr_samples * log_entry_sz(log); + + do { + struct sk_entry *entry; + + /* + * Dirty - since the log is potentially huge, compress it into + * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving + * side defragment it. + */ + out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU); + + stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU; + stream->next_out = out_pdu; + ret = deflate(stream, Z_BLOCK); + /* may be Z_OK, or Z_STREAM_END */ + if (ret < 0) { + free(out_pdu); + return 1; + } + this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out; + + entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len, + NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE); + flist_add_tail(&entry->list, &first->next); + } while (stream->avail_in); + + return 0; +} + +static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log) +{ + int ret = 0; + z_stream stream; + + memset(&stream, 0, sizeof(stream)); stream.zalloc = Z_NULL; stream.zfree = Z_NULL; stream.opaque = Z_NULL; - if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK) { - ret = 1; - goto err; + if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK) + return 1; + + while (!flist_empty(&log->io_logs)) { + struct io_logs *cur_log; + + cur_log = flist_first_entry(&log->io_logs, struct io_logs, list); + flist_del_init(&cur_log->list); + + ret = __fio_append_iolog_gz(first, log, cur_log, &stream); + if (ret) + break; } - stream.next_in = (void *) log->log; - stream.avail_in = log->nr_samples * sizeof(struct io_sample); + ret = deflate(&stream, Z_FINISH); - do { - unsigned int this_len, flags = 0; - int ret; + while (ret != Z_STREAM_END) { + struct sk_entry *entry; + unsigned int this_len; + void *out_pdu; + out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU); stream.avail_out = FIO_SERVER_MAX_FRAGMENT_PDU; stream.next_out = out_pdu; + ret = deflate(&stream, Z_FINISH); /* may be Z_OK, or Z_STREAM_END */ - if (ret < 0) - goto err_zlib; + if (ret < 0) { + free(out_pdu); + break; + } this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream.avail_out; - if (stream.avail_in) - flags = FIO_NET_CMD_F_MORE; + entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len, + NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE); + flist_add_tail(&entry->list, &first->next); + } while (ret != Z_STREAM_END); - ret = fio_send_cmd_ext_pdu(server_fd, FIO_NET_CMD_IOLOG, - out_pdu, this_len, 0, flags); - if (ret) - goto err_zlib; - } while (stream.avail_in); + ret = deflateEnd(&stream); + if (ret == Z_OK) + return 0; -err_zlib: - deflateEnd(&stream); -err: - free(out_pdu); + return 1; +} +#else +static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log) +{ + return 1; +} #endif - return ret; + +static int fio_append_gz_chunks(struct sk_entry *first, struct io_log *log) +{ + struct sk_entry *entry; + struct flist_head *node; + + pthread_mutex_lock(&log->chunk_lock); + flist_for_each(node, &log->chunk_list) { + struct iolog_compress *c; + + c = flist_entry(node, struct iolog_compress, list); + entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, c->buf, c->len, + NULL, SK_F_VEC | SK_F_INLINE); + flist_add_tail(&entry->list, &first->next); + } + pthread_mutex_unlock(&log->chunk_lock); + + return 0; +} + +static int fio_append_text_log(struct sk_entry *first, struct io_log *log) +{ + struct sk_entry *entry; + + while (!flist_empty(&log->io_logs)) { + struct io_logs *cur_log; + size_t size; + + cur_log = flist_first_entry(&log->io_logs, struct io_logs, list); + flist_del_init(&cur_log->list); + + size = cur_log->nr_samples * log_entry_sz(log); + + entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, cur_log->log, size, + NULL, SK_F_VEC | SK_F_INLINE); + flist_add_tail(&entry->list, &first->next); + } + + return 0; } int fio_send_iolog(struct thread_data *td, struct io_log *log, const char *name) { struct cmd_iolog_pdu pdu; - int i, ret = 0; + struct sk_entry *first; + struct flist_head *entry; + int ret = 0; + pdu.nr_samples = cpu_to_le64(iolog_nr_samples(log)); pdu.thread_number = cpu_to_le32(td->thread_number); - pdu.nr_samples = __cpu_to_le32(log->nr_samples); pdu.log_type = cpu_to_le32(log->log_type); - pdu.compressed = cpu_to_le32(use_zlib); - strcpy((char *) pdu.name, name); + pdu.log_hist_coarseness = cpu_to_le32(log->hist_coarseness); + + if (!flist_empty(&log->chunk_list)) + pdu.compressed = __cpu_to_le32(STORE_COMPRESSED); + else if (use_zlib) + pdu.compressed = __cpu_to_le32(XMIT_COMPRESSED); + else + pdu.compressed = 0; - for (i = 0; i < log->nr_samples; i++) { - struct io_sample *s = &log->log[i]; + strncpy((char *) pdu.name, name, FIO_NET_NAME_MAX); + pdu.name[FIO_NET_NAME_MAX - 1] = '\0'; + + /* + * We can't do this for a pre-compressed log, but for that case, + * log->nr_samples is zero anyway. + */ + flist_for_each(entry, &log->io_logs) { + struct io_logs *cur_log; + int i; - s->time = cpu_to_le64(s->time); - s->val = cpu_to_le64(s->val); - s->ddir = cpu_to_le32(s->ddir); - s->bs = cpu_to_le32(s->bs); + cur_log = flist_entry(entry, struct io_logs, list); + + for (i = 0; i < cur_log->nr_samples; i++) { + struct io_sample *s = get_sample(log, cur_log, i); + + s->time = cpu_to_le64(s->time); + s->data.val = cpu_to_le64(s->data.val); + s->__ddir = cpu_to_le32(s->__ddir); + s->bs = cpu_to_le32(s->bs); + + if (log->log_offset) { + struct io_sample_offset *so = (void *) s; + + so->offset = cpu_to_le64(so->offset); + } + } } /* - * Send header first, it's not compressed. + * Assemble header entry first */ - ret = fio_send_cmd_ext_pdu(server_fd, FIO_NET_CMD_IOLOG, &pdu, - sizeof(pdu), 0, FIO_NET_CMD_F_MORE); - if (ret) - return ret; + first = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, &pdu, sizeof(pdu), NULL, SK_F_VEC | SK_F_INLINE | SK_F_COPY); /* - * Now send actual log, compress if we can, otherwise just plain + * Now append actual log entries. If log compression was enabled on + * the job, just send out the compressed chunks directly. If we + * have a plain log, compress if we can, then send. Otherwise, send + * the plain text output. */ - if (use_zlib) - return fio_send_iolog_gz(&pdu, log); + if (!flist_empty(&log->chunk_list)) + ret = fio_append_gz_chunks(first, log); + else if (use_zlib) + ret = fio_append_iolog_gz(first, log); + else + ret = fio_append_text_log(first, log); - return fio_send_cmd_ext_pdu(server_fd, FIO_NET_CMD_IOLOG, log->log, - log->nr_samples * sizeof(struct io_sample), 0, 0); + fio_net_queue_entry(first); + return ret; } void fio_server_send_add_job(struct thread_data *td) @@ -1254,14 +2007,88 @@ void fio_server_send_add_job(struct thread_data *td) pdu.groupid = cpu_to_le32(td->groupid); convert_thread_options_to_net(&pdu.top, &td->o); - fio_net_send_cmd(server_fd, FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), NULL, NULL); + fio_net_queue_cmd(FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), NULL, + SK_F_COPY); } void fio_server_send_start(struct thread_data *td) { - assert(server_fd != -1); + struct sk_out *sk_out = pthread_getspecific(sk_out_key); + + assert(sk_out->sk != -1); - fio_net_send_simple_cmd(server_fd, FIO_NET_CMD_SERVER_START, 0, NULL); + fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, NULL, SK_F_SIMPLE); +} + +int fio_server_get_verify_state(const char *name, int threadnumber, + void **datap) +{ + struct thread_io_list *s; + struct cmd_sendfile out; + struct cmd_reply *rep; + uint64_t tag; + void *data; + int ret; + + dprint(FD_NET, "server: request verify state\n"); + + rep = smalloc(sizeof(*rep)); + if (!rep) + return ENOMEM; + + __fio_mutex_init(&rep->lock, FIO_MUTEX_LOCKED); + rep->data = NULL; + rep->error = 0; + + verify_state_gen_name((char *) out.path, sizeof(out.path), name, me, + threadnumber); + tag = (uint64_t) (uintptr_t) rep; + fio_net_queue_cmd(FIO_NET_CMD_SENDFILE, &out, sizeof(out), &tag, + SK_F_COPY); + + /* + * Wait for the backend to receive the reply + */ + if (fio_mutex_down_timeout(&rep->lock, 10000)) { + log_err("fio: timed out waiting for reply\n"); + ret = ETIMEDOUT; + goto fail; + } + + if (rep->error) { + log_err("fio: failure on receiving state file %s: %s\n", + out.path, strerror(rep->error)); + ret = rep->error; +fail: + *datap = NULL; + sfree(rep); + fio_net_queue_quit(); + return ret; + } + + /* + * The format is verify_state_hdr, then thread_io_list. Verify + * the header, and the thread_io_list checksum + */ + s = rep->data + sizeof(struct verify_state_hdr); + if (verify_state_hdr(rep->data, s)) { + ret = EILSEQ; + goto fail; + } + + /* + * Don't need the header from now, copy just the thread_io_list + */ + ret = 0; + rep->size -= sizeof(struct verify_state_hdr); + data = malloc(rep->size); + memcpy(data, s, rep->size); + *datap = data; + + sfree(rep->data); + __fio_mutex_remove(&rep->lock); + sfree(rep); + return ret; } static int fio_init_server_ip(void) @@ -1284,16 +2111,15 @@ static int fio_init_server_ip(void) opt = 1; if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, (void *)&opt, sizeof(opt)) < 0) { - log_err("fio: setsockopt: %s\n", strerror(errno)); + log_err("fio: setsockopt(REUSEADDR): %s\n", strerror(errno)); close(sk); return -1; } #ifdef SO_REUSEPORT - if (setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) { - log_err("fio: setsockopt: %s\n", strerror(errno)); - close(sk); - return -1; - } + /* + * Not fatal if fails, so just ignore it if that happens + */ + setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)); #endif if (use_ipv6) { @@ -1395,7 +2221,7 @@ static int fio_init_server_connection(void) log_info("fio: server listening on %s\n", bind_str); - if (listen(sk, 0) < 0) { + if (listen(sk, 4) < 0) { log_err("fio: listen: %s\n", strerror(errno)); close(sk); return -1; @@ -1586,6 +2412,22 @@ static void set_sig_handlers(void) sigaction(SIGINT, &act, NULL); } +void fio_server_destroy_sk_key(void) +{ + pthread_key_delete(sk_out_key); +} + +int fio_server_create_sk_key(void) +{ + if (pthread_key_create(&sk_out_key, NULL)) { + log_err("fio: can't create sk_out backend key\n"); + return 1; + } + + pthread_setspecific(sk_out_key, NULL); + return 0; +} + static int fio_server(void) { int sk, ret; @@ -1617,8 +2459,12 @@ static int fio_server(void) void fio_server_got_signal(int signal) { + struct sk_out *sk_out = pthread_getspecific(sk_out_key); + + assert(sk_out); + if (signal == SIGPIPE) - server_fd = -1; + sk_out->sk = -1; else { log_info("\nfio: terminating on signal %d\n", signal); exit_backend = 1; @@ -1692,14 +2538,13 @@ int fio_start_server(char *pidfile) pid = fork(); if (pid < 0) { - log_err("fio: failed server fork: %s", strerror(errno)); + log_err("fio: failed server fork: %s\n", strerror(errno)); free(pidfile); return -1; } else if (pid) { - int ret = write_pid(pid, pidfile); - + ret = write_pid(pid, pidfile); free(pidfile); - exit(ret); + _exit(ret); } setsid();