SK_F_COPY = 2,
SK_F_SIMPLE = 4,
SK_F_VEC = 8,
+ SK_F_INLINE = 16,
};
struct sk_entry {
int opcode; /* Actual command fields */
void *buf;
off_t size;
- uint64_t *tagptr;
+ uint64_t tag;
struct flist_head next; /* Other sk_entry's, if linked command */
};
-struct sk_out {
- unsigned int refs; /* frees sk_out when it drops to zero.
- * protected by below ->lock */
-
- int sk; /* socket fd to talk to client */
- struct fio_mutex *lock; /* protects ref and below list */
- struct flist_head list; /* list of pending transmit work */
- struct fio_mutex *wait; /* wake backend when items added to list */
-};
-
static char *fio_server_arg;
static char *bind_sock;
static struct sockaddr_in saddr_in;
"LOAD_FILE",
"VTRIGGER",
"SENDFILE",
+ "JOB_OPT",
};
static void sk_lock(struct sk_out *sk_out)
{
- fio_mutex_down(sk_out->lock);
+ fio_mutex_down(&sk_out->lock);
}
static void sk_unlock(struct sk_out *sk_out)
{
- fio_mutex_up(sk_out->lock);
+ fio_mutex_up(&sk_out->lock);
}
void sk_out_assign(struct sk_out *sk_out)
static void sk_out_free(struct sk_out *sk_out)
{
- fio_mutex_remove(sk_out->lock);
- fio_mutex_remove(sk_out->wait);
+ __fio_mutex_remove(&sk_out->lock);
+ __fio_mutex_remove(&sk_out->wait);
+ __fio_mutex_remove(&sk_out->xmit);
sfree(sk_out);
}
int refs;
sk_lock(sk_out);
+ assert(sk_out->refs != 0);
refs = --sk_out->refs;
sk_unlock(sk_out);
if (!refs) {
sk_out_free(sk_out);
+ pthread_setspecific(sk_out_key, NULL);
return 0;
}
}
struct sk_out *sk_out;
sk_out = pthread_getspecific(sk_out_key);
- if (!__sk_out_drop(sk_out))
- pthread_setspecific(sk_out_key, NULL);
+ __sk_out_drop(sk_out);
}
static void __fio_init_net_cmd(struct fio_net_cmd *cmd, uint16_t opcode,
return fio_sendv_data(sk, &iov, 1);
}
-static int fio_recv_data(int sk, void *p, unsigned int len)
+static int fio_recv_data(int sk, void *p, unsigned int len, bool wait)
{
+ int flags;
+
+ if (wait)
+ flags = MSG_WAITALL;
+ else
+ flags = OS_MSG_DONTWAIT;
+
do {
- int ret = recv(sk, p, len, MSG_WAITALL);
+ int ret = recv(sk, p, len, flags);
if (ret > 0) {
len -= ret;
continue;
} else if (!ret)
break;
- else if (errno == EAGAIN || errno == EINTR)
- continue;
- else
+ else if (errno == EAGAIN || errno == EINTR) {
+ if (wait)
+ continue;
+ break;
+ } else
break;
} while (!exit_backend);
/*
* Read (and defragment, if necessary) incoming commands
*/
-struct fio_net_cmd *fio_net_recv_cmd(int sk)
+struct fio_net_cmd *fio_net_recv_cmd(int sk, bool wait)
{
struct fio_net_cmd cmd, *tmp, *cmdret = NULL;
size_t cmd_size = 0, pdu_offset = 0;
void *pdu = NULL;
do {
- ret = fio_recv_data(sk, &cmd, sizeof(cmd));
+ ret = fio_recv_data(sk, &cmd, sizeof(cmd), wait);
if (ret)
break;
/* There's payload, get it */
pdu = (void *) cmdret->payload + pdu_offset;
- ret = fio_recv_data(sk, pdu, cmd.pdu_len);
+ ret = fio_recv_data(sk, pdu, cmd.pdu_len, wait);
if (ret)
break;
reply = calloc(1, sizeof(*reply));
INIT_FLIST_HEAD(&reply->list);
- fio_gettime(&reply->tv, NULL);
+ fio_gettime(&reply->ts, NULL);
reply->saved_tag = tag;
reply->opcode = opcode;
return ret;
}
-static struct sk_entry *fio_net_prep_cmd(uint16_t opcode, void *buf, off_t size,
- uint64_t *tagptr, int flags)
+static struct sk_entry *fio_net_prep_cmd(uint16_t opcode, void *buf,
+ size_t size, uint64_t *tagptr,
+ int flags)
{
struct sk_entry *entry;
memcpy(entry->buf, buf, size);
} else
entry->buf = buf;
+
entry->size = size;
- entry->tagptr = tagptr;
+ if (tagptr)
+ entry->tag = *tagptr;
+ else
+ entry->tag = 0;
entry->flags = flags;
-
return entry;
}
+static int handle_sk_entry(struct sk_out *sk_out, struct sk_entry *entry);
+
static void fio_net_queue_entry(struct sk_entry *entry)
{
struct sk_out *sk_out = pthread_getspecific(sk_out_key);
- sk_lock(sk_out);
- flist_add_tail(&entry->list, &sk_out->list);
- sk_unlock(sk_out);
+ if (entry->flags & SK_F_INLINE)
+ handle_sk_entry(sk_out, entry);
+ else {
+ sk_lock(sk_out);
+ flist_add_tail(&entry->list, &sk_out->list);
+ sk_unlock(sk_out);
- fio_mutex_up(sk_out->wait);
+ fio_mutex_up(&sk_out->wait);
+ }
}
static int fio_net_queue_cmd(uint16_t opcode, void *buf, off_t size,
struct sk_entry *entry;
entry = fio_net_prep_cmd(opcode, buf, size, tagptr, flags);
- fio_net_queue_entry(entry);
- return 0;
+ if (entry) {
+ fio_net_queue_entry(entry);
+ return 0;
+ }
+
+ return 1;
}
static int fio_net_send_simple_stack_cmd(int sk, uint16_t opcode, uint64_t tag)
{
dprint(FD_NET, "server: sending quit\n");
- return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, 0, SK_F_SIMPLE);
+ return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, NULL, SK_F_SIMPLE);
}
int fio_net_send_quit(int sk)
pid_t pid;
int ret;
+ sk_out_assign(sk_out);
+
fio_time_init();
set_genesis_time();
ret = fio_backend(sk_out);
free_threads_shm();
+ sk_out_drop();
_exit(ret);
}
je->files_open = cpu_to_le32(je->files_open);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- je->m_rate[i] = cpu_to_le32(je->m_rate[i]);
- je->t_rate[i] = cpu_to_le32(je->t_rate[i]);
+ je->m_rate[i] = cpu_to_le64(je->m_rate[i]);
+ je->t_rate[i] = cpu_to_le64(je->t_rate[i]);
je->m_iops[i] = cpu_to_le32(je->m_iops[i]);
je->t_iops[i] = cpu_to_le32(je->t_iops[i]);
- je->rate[i] = cpu_to_le32(je->rate[i]);
+ je->rate[i] = cpu_to_le64(je->rate[i]);
je->iops[i] = cpu_to_le32(je->iops[i]);
}
struct all_io_list state;
state.threads = cpu_to_le64((uint64_t) 0);
- fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY);
+ fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY | SK_F_INLINE);
} else
- fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE);
+ fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE | SK_F_INLINE);
exec_trigger(buf);
return 0;
{
struct fio_net_cmd cmd;
struct iovec iov[2];
+ size_t this_len;
+ int ret;
iov[0].iov_base = (void *) &cmd;
iov[0].iov_len = sizeof(cmd);
- iov[1].iov_base = (void *) buf;
- iov[1].iov_len = size;
- __fio_init_net_cmd(&cmd, opcode, size, tag);
- cmd.flags = __cpu_to_le32(flags);
- fio_net_cmd_crc_pdu(&cmd, buf);
+ do {
+ uint32_t this_flags = flags;
+
+ this_len = size;
+ if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
+ this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
+
+ if (this_len < size)
+ this_flags |= FIO_NET_CMD_F_MORE;
+
+ __fio_init_net_cmd(&cmd, opcode, this_len, tag);
+ cmd.flags = __cpu_to_le32(this_flags);
+ fio_net_cmd_crc_pdu(&cmd, buf);
+
+ iov[1].iov_base = (void *) buf;
+ iov[1].iov_len = this_len;
+
+ ret = fio_sendv_data(sk, iov, 2);
+ size -= this_len;
+ buf += this_len;
+ } while (!ret && size);
- return fio_sendv_data(sk, iov, 2);
+ return ret;
}
static void finish_entry(struct sk_entry *entry)
sfree(entry);
}
-static void entry_set_flags_tag(struct sk_entry *entry, struct flist_head *list,
- unsigned int *flags, uint64_t *tag)
+static void entry_set_flags(struct sk_entry *entry, struct flist_head *list,
+ unsigned int *flags)
{
if (!flist_empty(list))
*flags = FIO_NET_CMD_F_MORE;
else
*flags = 0;
-
- if (entry->tagptr)
- *tag = *entry->tagptr;
- else
- *tag = 0;
}
static int send_vec_entry(struct sk_out *sk_out, struct sk_entry *first)
{
unsigned int flags;
- uint64_t tag;
int ret;
- entry_set_flags_tag(first, &first->next, &flags, &tag);
+ entry_set_flags(first, &first->next, &flags);
- ret = fio_send_cmd_ext_pdu(sk_out->sk, first->opcode, first->buf, first->size, tag, flags);
+ ret = fio_send_cmd_ext_pdu(sk_out->sk, first->opcode, first->buf,
+ first->size, first->tag, flags);
while (!flist_empty(&first->next)) {
struct sk_entry *next;
next = flist_first_entry(&first->next, struct sk_entry, list);
flist_del_init(&next->list);
- entry_set_flags_tag(next, &first->next, &flags, &tag);
+ entry_set_flags(next, &first->next, &flags);
- ret += fio_send_cmd_ext_pdu(sk_out->sk, next->opcode, next->buf, next->size, tag, flags);
+ ret += fio_send_cmd_ext_pdu(sk_out->sk, next->opcode, next->buf,
+ next->size, next->tag, flags);
finish_entry(next);
}
{
int ret;
+ fio_mutex_down(&sk_out->xmit);
+
if (entry->flags & SK_F_VEC)
ret = send_vec_entry(sk_out, entry);
- if (entry->flags & SK_F_SIMPLE) {
- uint64_t tag = 0;
-
- if (entry->tagptr)
- tag = *entry->tagptr;
+ else if (entry->flags & SK_F_SIMPLE) {
+ ret = fio_net_send_simple_cmd(sk_out->sk, entry->opcode,
+ entry->tag, NULL);
+ } else {
+ ret = fio_net_send_cmd(sk_out->sk, entry->opcode, entry->buf,
+ entry->size, &entry->tag, NULL);
+ }
- ret = fio_net_send_simple_cmd(sk_out->sk, entry->opcode, tag, NULL);
- } else
- ret = fio_net_send_cmd(sk_out->sk, entry->opcode, entry->buf, entry->size, entry->tagptr, NULL);
+ fio_mutex_up(&sk_out->xmit);
if (ret)
log_err("fio: failed handling cmd %s\n", fio_server_op(entry->opcode));
break;
} else if (!ret) {
fio_server_check_jobs(&job_list);
- fio_mutex_down_timeout(sk_out->wait, timeout);
+ fio_mutex_down_timeout(&sk_out->wait, timeout);
continue;
}
if (ret < 0)
break;
- cmd = fio_net_recv_cmd(sk_out->sk);
+ cmd = fio_net_recv_cmd(sk_out->sk, true);
if (!cmd) {
ret = -1;
break;
ret = getsockname(sk, sockaddr_p, &len);
if (ret) {
- log_err("fio: getsockaddr: %s\n", strerror(errno));
+ log_err("fio: getsockname: %s\n", strerror(errno));
return -1;
}
sk_out = smalloc(sizeof(*sk_out));
sk_out->sk = sk;
INIT_FLIST_HEAD(&sk_out->list);
- sk_out->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
- sk_out->wait = fio_mutex_init(FIO_MUTEX_LOCKED);
+ __fio_mutex_init(&sk_out->lock, FIO_MUTEX_UNLOCKED);
+ __fio_mutex_init(&sk_out->wait, FIO_MUTEX_LOCKED);
+ __fio_mutex_init(&sk_out->xmit, FIO_MUTEX_UNLOCKED);
pid = fork();
if (pid) {
dst->min_run[i] = cpu_to_le64(src->min_run[i]);
dst->max_bw[i] = cpu_to_le64(src->max_bw[i]);
dst->min_bw[i] = cpu_to_le64(src->min_bw[i]);
- dst->io_kb[i] = cpu_to_le64(src->io_kb[i]);
+ dst->iobytes[i] = cpu_to_le64(src->iobytes[i]);
dst->agg[i] = cpu_to_le64(src->agg[i]);
}
{
struct cmd_ts_pdu p;
int i, j;
+ void *ss_buf;
+ uint64_t *ss_iops, *ss_bw;
dprint(FD_NET, "server sending end stats\n");
p.ts.io_u_complete[i] = cpu_to_le32(ts->io_u_complete[i]);
}
- for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
+ for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
p.ts.io_u_lat_u[i] = cpu_to_le32(ts->io_u_lat_u[i]);
+ for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
p.ts.io_u_lat_m[i] = cpu_to_le32(ts->io_u_lat_m[i]);
- }
for (i = 0; i < DDIR_RWDIR_CNT; i++)
for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
p.ts.latency_window = cpu_to_le64(ts->latency_window);
p.ts.latency_percentile.u.i = cpu_to_le64(fio_double_to_uint64(ts->latency_percentile.u.f));
- p.ts.nr_block_infos = le64_to_cpu(ts->nr_block_infos);
+ p.ts.nr_block_infos = cpu_to_le64(ts->nr_block_infos);
for (i = 0; i < p.ts.nr_block_infos; i++)
- p.ts.block_infos[i] = le32_to_cpu(ts->block_infos[i]);
+ p.ts.block_infos[i] = cpu_to_le32(ts->block_infos[i]);
+
+ p.ts.ss_dur = cpu_to_le64(ts->ss_dur);
+ p.ts.ss_state = cpu_to_le32(ts->ss_state);
+ p.ts.ss_head = cpu_to_le32(ts->ss_head);
+ p.ts.ss_limit.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_limit.u.f));
+ p.ts.ss_slope.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_slope.u.f));
+ p.ts.ss_deviation.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_deviation.u.f));
+ p.ts.ss_criterion.u.i = cpu_to_le64(fio_double_to_uint64(ts->ss_criterion.u.f));
convert_gs(&p.rs, rs);
- fio_net_queue_cmd(FIO_NET_CMD_TS, &p, sizeof(p), NULL, SK_F_COPY);
+ dprint(FD_NET, "ts->ss_state = %d\n", ts->ss_state);
+ if (ts->ss_state & __FIO_SS_DATA) {
+ dprint(FD_NET, "server sending steadystate ring buffers\n");
+
+ ss_buf = malloc(sizeof(p) + 2*ts->ss_dur*sizeof(uint64_t));
+
+ memcpy(ss_buf, &p, sizeof(p));
+
+ ss_iops = (uint64_t *) ((struct cmd_ts_pdu *)ss_buf + 1);
+ ss_bw = ss_iops + (int) ts->ss_dur;
+ for (i = 0; i < ts->ss_dur; i++) {
+ ss_iops[i] = cpu_to_le64(ts->ss_iops_data[i]);
+ ss_bw[i] = cpu_to_le64(ts->ss_bw_data[i]);
+ }
+
+ fio_net_queue_cmd(FIO_NET_CMD_TS, ss_buf, sizeof(p) + 2*ts->ss_dur*sizeof(uint64_t), NULL, SK_F_COPY);
+
+ free(ss_buf);
+ }
+ else
+ fio_net_queue_cmd(FIO_NET_CMD_TS, &p, sizeof(p), NULL, SK_F_COPY);
}
void fio_server_send_gs(struct group_run_stats *rs)
flist_for_each(entry, opt_list) {
struct print_option *p;
+ size_t len;
p = flist_entry(entry, struct print_option, list);
memset(&pdu, 0, sizeof(pdu));
+
if (groupid == -1U) {
pdu.global = __cpu_to_le16(1);
pdu.groupid = 0;
} else {
pdu.global = 0;
- pdu.groupid = __cpu_to_le16(groupid);
+ pdu.groupid = cpu_to_le32(groupid);
+ }
+ len = strlen(p->name);
+ if (len >= sizeof(pdu.name)) {
+ len = sizeof(pdu.name) - 1;
+ pdu.truncated = __cpu_to_le16(1);
+ }
+ memcpy(pdu.name, p->name, len);
+ if (p->value) {
+ len = strlen(p->value);
+ if (len >= sizeof(pdu.value)) {
+ len = sizeof(pdu.value) - 1;
+ pdu.truncated = __cpu_to_le16(1);
+ }
+ memcpy(pdu.value, p->value, len);
}
- memcpy(pdu.name, p->name, strlen(p->name));
- if (p->value)
- memcpy(pdu.value, p->value, strlen(p->value));
fio_net_queue_cmd(FIO_NET_CMD_JOB_OPT, &pdu, sizeof(pdu), NULL, SK_F_COPY);
}
}
}
}
-static int fio_send_iolog_gz(struct sk_entry *first, struct io_log *log)
-{
- int ret = 0;
#ifdef CONFIG_ZLIB
+
+static inline void __fio_net_prep_tail(z_stream *stream, void *out_pdu,
+ struct sk_entry **last_entry,
+ struct sk_entry *first)
+{
+ unsigned int this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
+
+ *last_entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
+ NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
+ flist_add_tail(&(*last_entry)->list, &first->next);
+
+}
+
+/*
+ * Deflates the next input given, creating as many new packets in the
+ * linked list as necessary.
+ */
+static int __deflate_pdu_buffer(void *next_in, unsigned int next_sz, void **out_pdu,
+ struct sk_entry **last_entry, z_stream *stream,
+ struct sk_entry *first)
+{
+ int ret;
+
+ stream->next_in = next_in;
+ stream->avail_in = next_sz;
+ do {
+ if (! stream->avail_out) {
+
+ __fio_net_prep_tail(stream, *out_pdu, last_entry, first);
+
+ *out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
+
+ stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
+ stream->next_out = *out_pdu;
+ }
+
+ ret = deflate(stream, Z_BLOCK);
+
+ if (ret < 0) {
+ free(*out_pdu);
+ return 1;
+ }
+ } while (stream->avail_in);
+
+ return 0;
+}
+
+static int __fio_append_iolog_gz_hist(struct sk_entry *first, struct io_log *log,
+ struct io_logs *cur_log, z_stream *stream)
+{
struct sk_entry *entry;
- z_stream stream;
void *out_pdu;
+ int ret, i, j;
+ int sample_sz = log_entry_sz(log);
- /*
- * Dirty - since the log is potentially huge, compress it into
- * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
- * side defragment it.
- */
out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
+ stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
+ stream->next_out = out_pdu;
+
+ for (i = 0; i < cur_log->nr_samples; i++) {
+ struct io_sample *s;
+ struct io_u_plat_entry *cur_plat_entry, *prev_plat_entry;
+ unsigned int *cur_plat, *prev_plat;
+
+ s = get_sample(log, cur_log, i);
+ ret = __deflate_pdu_buffer(s, sample_sz, &out_pdu, &entry, stream, first);
+ if (ret)
+ return ret;
+
+ /* Do the subtraction on server side so that client doesn't have to
+ * reconstruct our linked list from packets.
+ */
+ cur_plat_entry = s->data.plat_entry;
+ prev_plat_entry = flist_first_entry(&cur_plat_entry->list, struct io_u_plat_entry, list);
+ cur_plat = cur_plat_entry->io_u_plat;
+ prev_plat = prev_plat_entry->io_u_plat;
+
+ for (j = 0; j < FIO_IO_U_PLAT_NR; j++) {
+ cur_plat[j] -= prev_plat[j];
+ }
+
+ flist_del(&prev_plat_entry->list);
+ free(prev_plat_entry);
+
+ ret = __deflate_pdu_buffer(cur_plat_entry, sizeof(*cur_plat_entry),
+ &out_pdu, &entry, stream, first);
+
+ if (ret)
+ return ret;
+ }
+
+ __fio_net_prep_tail(stream, out_pdu, &entry, first);
+
+ return 0;
+}
+
+static int __fio_append_iolog_gz(struct sk_entry *first, struct io_log *log,
+ struct io_logs *cur_log, z_stream *stream)
+{
+ unsigned int this_len;
+ void *out_pdu;
+ int ret;
+
+ if (log->log_type == IO_LOG_TYPE_HIST)
+ return __fio_append_iolog_gz_hist(first, log, cur_log, stream);
+
+ stream->next_in = (void *) cur_log->log;
+ stream->avail_in = cur_log->nr_samples * log_entry_sz(log);
+
+ do {
+ struct sk_entry *entry;
+
+ /*
+ * Dirty - since the log is potentially huge, compress it into
+ * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
+ * side defragment it.
+ */
+ out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
+
+ stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
+ stream->next_out = out_pdu;
+ ret = deflate(stream, Z_BLOCK);
+ /* may be Z_OK, or Z_STREAM_END */
+ if (ret < 0) {
+ free(out_pdu);
+ return 1;
+ }
+
+ this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
+
+ entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
+ NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
+ flist_add_tail(&entry->list, &first->next);
+ } while (stream->avail_in);
+
+ return 0;
+}
+
+static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
+{
+ int ret = 0;
+ z_stream stream;
+ memset(&stream, 0, sizeof(stream));
stream.zalloc = Z_NULL;
stream.zfree = Z_NULL;
stream.opaque = Z_NULL;
- if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK) {
- ret = 1;
- goto err;
+ if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK)
+ return 1;
+
+ while (!flist_empty(&log->io_logs)) {
+ struct io_logs *cur_log;
+
+ cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
+ flist_del_init(&cur_log->list);
+
+ ret = __fio_append_iolog_gz(first, log, cur_log, &stream);
+ if (ret)
+ break;
}
- stream.next_in = (void *) log->log;
- stream.avail_in = log->nr_samples * log_entry_sz(log);
+ ret = deflate(&stream, Z_FINISH);
- do {
+ while (ret != Z_STREAM_END) {
+ struct sk_entry *entry;
unsigned int this_len;
+ void *out_pdu;
+ out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
stream.avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
stream.next_out = out_pdu;
+
ret = deflate(&stream, Z_FINISH);
/* may be Z_OK, or Z_STREAM_END */
- if (ret < 0)
- goto err_zlib;
+ if (ret < 0) {
+ free(out_pdu);
+ break;
+ }
this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream.avail_out;
entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
- NULL, SK_F_FREE | SK_F_VEC);
+ NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
flist_add_tail(&entry->list, &first->next);
- } while (stream.avail_in);
+ } while (ret != Z_STREAM_END);
+
+ ret = deflateEnd(&stream);
+ if (ret == Z_OK)
+ return 0;
-err_zlib:
- deflateEnd(&stream);
-err:
- free(out_pdu);
+ return 1;
+}
+#else
+static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
+{
+ return 1;
+}
#endif
- return ret;
+
+static int fio_append_gz_chunks(struct sk_entry *first, struct io_log *log)
+{
+ struct sk_entry *entry;
+ struct flist_head *node;
+
+ pthread_mutex_lock(&log->chunk_lock);
+ flist_for_each(node, &log->chunk_list) {
+ struct iolog_compress *c;
+
+ c = flist_entry(node, struct iolog_compress, list);
+ entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, c->buf, c->len,
+ NULL, SK_F_VEC | SK_F_INLINE);
+ flist_add_tail(&entry->list, &first->next);
+ }
+ pthread_mutex_unlock(&log->chunk_lock);
+
+ return 0;
+}
+
+static int fio_append_text_log(struct sk_entry *first, struct io_log *log)
+{
+ struct sk_entry *entry;
+
+ while (!flist_empty(&log->io_logs)) {
+ struct io_logs *cur_log;
+ size_t size;
+
+ cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
+ flist_del_init(&cur_log->list);
+
+ size = cur_log->nr_samples * log_entry_sz(log);
+
+ entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, cur_log->log, size,
+ NULL, SK_F_VEC | SK_F_INLINE);
+ flist_add_tail(&entry->list, &first->next);
+ }
+
+ return 0;
}
int fio_send_iolog(struct thread_data *td, struct io_log *log, const char *name)
{
struct cmd_iolog_pdu pdu;
struct sk_entry *first;
- int i, ret = 0;
+ struct flist_head *entry;
+ int ret = 0;
- pdu.nr_samples = cpu_to_le64(log->nr_samples);
+ pdu.nr_samples = cpu_to_le64(iolog_nr_samples(log));
pdu.thread_number = cpu_to_le32(td->thread_number);
pdu.log_type = cpu_to_le32(log->log_type);
- pdu.compressed = cpu_to_le32(use_zlib);
+ pdu.log_hist_coarseness = cpu_to_le32(log->hist_coarseness);
+
+ if (!flist_empty(&log->chunk_list))
+ pdu.compressed = __cpu_to_le32(STORE_COMPRESSED);
+ else if (use_zlib)
+ pdu.compressed = __cpu_to_le32(XMIT_COMPRESSED);
+ else
+ pdu.compressed = 0;
strncpy((char *) pdu.name, name, FIO_NET_NAME_MAX);
pdu.name[FIO_NET_NAME_MAX - 1] = '\0';
- for (i = 0; i < log->nr_samples; i++) {
- struct io_sample *s = get_sample(log, i);
+ /*
+ * We can't do this for a pre-compressed log, but for that case,
+ * log->nr_samples is zero anyway.
+ */
+ flist_for_each(entry, &log->io_logs) {
+ struct io_logs *cur_log;
+ int i;
+
+ cur_log = flist_entry(entry, struct io_logs, list);
+
+ for (i = 0; i < cur_log->nr_samples; i++) {
+ struct io_sample *s = get_sample(log, cur_log, i);
- s->time = cpu_to_le64(s->time);
- s->val = cpu_to_le64(s->val);
- s->__ddir = cpu_to_le32(s->__ddir);
- s->bs = cpu_to_le32(s->bs);
+ s->time = cpu_to_le64(s->time);
+ s->data.val = cpu_to_le64(s->data.val);
+ s->__ddir = cpu_to_le32(s->__ddir);
+ s->bs = cpu_to_le32(s->bs);
- if (log->log_offset) {
- struct io_sample_offset *so = (void *) s;
+ if (log->log_offset) {
+ struct io_sample_offset *so = (void *) s;
- so->offset = cpu_to_le64(so->offset);
+ so->offset = cpu_to_le64(so->offset);
+ }
}
}
/*
* Assemble header entry first
*/
- first = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, &pdu, sizeof(pdu), NULL, SK_F_COPY | SK_F_VEC);
+ first = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, &pdu, sizeof(pdu), NULL, SK_F_VEC | SK_F_INLINE | SK_F_COPY);
/*
- * Now append actual log entries. Compress if we can, otherwise just
- * plain text output.
+ * Now append actual log entries. If log compression was enabled on
+ * the job, just send out the compressed chunks directly. If we
+ * have a plain log, compress if we can, then send. Otherwise, send
+ * the plain text output.
*/
- if (use_zlib)
- ret = fio_send_iolog_gz(first, log);
- else {
- struct sk_entry *entry;
-
- entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, log->log,
- log->nr_samples * log_entry_sz(log),
- NULL, SK_F_FREE | SK_F_VEC);
- flist_add_tail(&entry->list, &first->next);
- }
+ if (!flist_empty(&log->chunk_list))
+ ret = fio_append_gz_chunks(first, log);
+ else if (use_zlib)
+ ret = fio_append_iolog_gz(first, log);
+ else
+ ret = fio_append_text_log(first, log);
+ fio_net_queue_entry(first);
return ret;
}
pdu.groupid = cpu_to_le32(td->groupid);
convert_thread_options_to_net(&pdu.top, &td->o);
- fio_net_queue_cmd(FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), NULL, SK_F_COPY);
+ fio_net_queue_cmd(FIO_NET_CMD_ADD_JOB, &pdu, sizeof(pdu), NULL,
+ SK_F_COPY);
}
void fio_server_send_start(struct thread_data *td)
assert(sk_out->sk != -1);
- fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, 0, SK_F_SIMPLE);
+ fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, NULL, SK_F_SIMPLE);
}
int fio_server_get_verify_state(const char *name, int threadnumber,
- void **datap, int *version)
+ void **datap)
{
struct thread_io_list *s;
struct cmd_sendfile out;
struct cmd_reply *rep;
uint64_t tag;
void *data;
+ int ret;
dprint(FD_NET, "server: request verify state\n");
rep = smalloc(sizeof(*rep));
- if (!rep) {
- log_err("fio: smalloc pool too small\n");
- return 1;
- }
+ if (!rep)
+ return ENOMEM;
__fio_mutex_init(&rep->lock, FIO_MUTEX_LOCKED);
rep->data = NULL;
verify_state_gen_name((char *) out.path, sizeof(out.path), name, me,
threadnumber);
tag = (uint64_t) (uintptr_t) rep;
- fio_net_queue_cmd(FIO_NET_CMD_SENDFILE, &out, sizeof(out), &tag, SK_F_COPY);
+ fio_net_queue_cmd(FIO_NET_CMD_SENDFILE, &out, sizeof(out), &tag,
+ SK_F_COPY);
/*
* Wait for the backend to receive the reply
*/
if (fio_mutex_down_timeout(&rep->lock, 10000)) {
log_err("fio: timed out waiting for reply\n");
+ ret = ETIMEDOUT;
goto fail;
}
if (rep->error) {
- log_err("fio: failure on receiving state file: %s\n", strerror(rep->error));
+ log_err("fio: failure on receiving state file %s: %s\n",
+ out.path, strerror(rep->error));
+ ret = rep->error;
fail:
*datap = NULL;
sfree(rep);
fio_net_queue_quit();
- return 1;
+ return ret;
}
/*
* the header, and the thread_io_list checksum
*/
s = rep->data + sizeof(struct verify_state_hdr);
- if (verify_state_hdr(rep->data, s, version))
+ if (verify_state_hdr(rep->data, s)) {
+ ret = EILSEQ;
goto fail;
+ }
/*
* Don't need the header from now, copy just the thread_io_list
*/
+ ret = 0;
rep->size -= sizeof(struct verify_state_hdr);
data = malloc(rep->size);
memcpy(data, s, rep->size);
sfree(rep->data);
__fio_mutex_remove(&rep->lock);
sfree(rep);
- return 0;
+ return ret;
}
static int fio_init_server_ip(void)
return -1;
}
#ifdef SO_REUSEPORT
- if (setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
- log_err("fio: setsockopt(REUSEPORT): %s\n", strerror(errno));
- close(sk);
- return -1;
- }
+ /*
+ * Not fatal if fails, so just ignore it if that happens
+ */
+ setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
#endif
if (use_ipv6) {
sigaction(SIGINT, &act, NULL);
}
-static int fio_server(void)
+void fio_server_destroy_sk_key(void)
{
- int sk, ret;
+ pthread_key_delete(sk_out_key);
+}
+int fio_server_create_sk_key(void)
+{
if (pthread_key_create(&sk_out_key, NULL)) {
log_err("fio: can't create sk_out backend key\n");
- return -1;
+ return 1;
}
pthread_setspecific(sk_out_key, NULL);
+ return 0;
+}
+
+static int fio_server(void)
+{
+ int sk, ret;
dprint(FD_NET, "starting server\n");
pid = fork();
if (pid < 0) {
- log_err("fio: failed server fork: %s", strerror(errno));
+ log_err("fio: failed server fork: %s\n", strerror(errno));
free(pidfile);
return -1;
} else if (pid) {