"LOAD_FILE",
"VTRIGGER",
"SENDFILE",
+ "JOB_OPT",
};
static void sk_lock(struct sk_out *sk_out)
return 1;
}
+ if (cmd->pdu_len > FIO_SERVER_MAX_FRAGMENT_PDU) {
+ log_err("fio: command payload too large: %u\n", cmd->pdu_len);
+ return 1;
+ }
+
return 0;
}
struct sk_entry *entry;
entry = fio_net_prep_cmd(opcode, buf, size, tagptr, flags);
- fio_net_queue_entry(entry);
- return 0;
+ if (entry) {
+ fio_net_queue_entry(entry);
+ return 0;
+ }
+
+ return 1;
}
static int fio_net_send_simple_stack_cmd(int sk, uint16_t opcode, uint64_t tag)
{
dprint(FD_NET, "server: sending quit\n");
- return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, 0, SK_F_SIMPLE);
+ return fio_net_queue_cmd(FIO_NET_CMD_QUIT, NULL, 0, NULL, SK_F_SIMPLE);
}
int fio_net_send_quit(int sk)
struct all_io_list state;
state.threads = cpu_to_le64((uint64_t) 0);
- fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY);
+ fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, &state, sizeof(state), NULL, SK_F_COPY | SK_F_INLINE);
} else
- fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE);
+ fio_net_queue_cmd(FIO_NET_CMD_VTRIGGER, rep, sz, NULL, SK_F_FREE | SK_F_INLINE);
exec_trigger(buf);
return 0;
{
struct fio_net_cmd cmd;
struct iovec iov[2];
+ size_t this_len;
+ int ret;
iov[0].iov_base = (void *) &cmd;
iov[0].iov_len = sizeof(cmd);
- iov[1].iov_base = (void *) buf;
- iov[1].iov_len = size;
- __fio_init_net_cmd(&cmd, opcode, size, tag);
- cmd.flags = __cpu_to_le32(flags);
- fio_net_cmd_crc_pdu(&cmd, buf);
+ do {
+ uint32_t this_flags = flags;
+
+ this_len = size;
+ if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU)
+ this_len = FIO_SERVER_MAX_FRAGMENT_PDU;
+
+ if (this_len < size)
+ this_flags |= FIO_NET_CMD_F_MORE;
+
+ __fio_init_net_cmd(&cmd, opcode, this_len, tag);
+ cmd.flags = __cpu_to_le32(this_flags);
+ fio_net_cmd_crc_pdu(&cmd, buf);
+
+ iov[1].iov_base = (void *) buf;
+ iov[1].iov_len = this_len;
- return fio_sendv_data(sk, iov, 2);
+ ret = fio_sendv_data(sk, iov, 2);
+ size -= this_len;
+ buf += this_len;
+ } while (!ret && size);
+
+ return ret;
}
static void finish_entry(struct sk_entry *entry)
}
}
-static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
-{
- int ret = 0;
#ifdef CONFIG_ZLIB
+
+static inline void __fio_net_prep_tail(z_stream *stream, void *out_pdu,
+ struct sk_entry **last_entry,
+ struct sk_entry *first)
+{
+ unsigned int this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
+
+ *last_entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
+ NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
+ flist_add_tail(&(*last_entry)->list, &first->next);
+
+}
+
+/*
+ * Deflates the next input given, creating as many new packets in the
+ * linked list as necessary.
+ */
+static int __deflate_pdu_buffer(void *next_in, unsigned int next_sz, void **out_pdu,
+ struct sk_entry **last_entry, z_stream *stream,
+ struct sk_entry *first)
+{
+ int ret;
+
+ stream->next_in = next_in;
+ stream->avail_in = next_sz;
+ do {
+ if (! stream->avail_out) {
+
+ __fio_net_prep_tail(stream, *out_pdu, last_entry, first);
+
+ *out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
+
+ stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
+ stream->next_out = *out_pdu;
+ }
+
+ ret = deflate(stream, Z_BLOCK);
+
+ if (ret < 0) {
+ free(*out_pdu);
+ return 1;
+ }
+ } while (stream->avail_in);
+
+ return 0;
+}
+
+static int __fio_append_iolog_gz_hist(struct sk_entry *first, struct io_log *log,
+ struct io_logs *cur_log, z_stream *stream)
+{
struct sk_entry *entry;
- z_stream stream;
void *out_pdu;
+ int ret, i, j;
+ int sample_sz = log_entry_sz(log);
- /*
- * Dirty - since the log is potentially huge, compress it into
- * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
- * side defragment it.
- */
out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
+ stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
+ stream->next_out = out_pdu;
+
+ for (i = 0; i < cur_log->nr_samples; i++) {
+ struct io_sample *s;
+ struct io_u_plat_entry *cur_plat_entry, *prev_plat_entry;
+ unsigned int *cur_plat, *prev_plat;
+
+ s = get_sample(log, cur_log, i);
+ ret = __deflate_pdu_buffer(s, sample_sz, &out_pdu, &entry, stream, first);
+ if (ret)
+ return ret;
+
+ /* Do the subtraction on server side so that client doesn't have to
+ * reconstruct our linked list from packets.
+ */
+ cur_plat_entry = s->plat_entry;
+ prev_plat_entry = flist_first_entry(&cur_plat_entry->list, struct io_u_plat_entry, list);
+ cur_plat = cur_plat_entry->io_u_plat;
+ prev_plat = prev_plat_entry->io_u_plat;
+
+ for (j = 0; j < FIO_IO_U_PLAT_NR; j++) {
+ cur_plat[j] -= prev_plat[j];
+ }
+
+ flist_del(&prev_plat_entry->list);
+ free(prev_plat_entry);
+
+ ret = __deflate_pdu_buffer(cur_plat_entry, sizeof(*cur_plat_entry),
+ &out_pdu, &entry, stream, first);
+
+ if (ret)
+ return ret;
+ }
+
+ __fio_net_prep_tail(stream, out_pdu, &entry, first);
+
+ return 0;
+}
+static int __fio_append_iolog_gz(struct sk_entry *first, struct io_log *log,
+ struct io_logs *cur_log, z_stream *stream)
+{
+ unsigned int this_len;
+ void *out_pdu;
+ int ret;
+
+ if (log->log_type == IO_LOG_TYPE_HIST)
+ return __fio_append_iolog_gz_hist(first, log, cur_log, stream);
+
+ stream->next_in = (void *) cur_log->log;
+ stream->avail_in = cur_log->nr_samples * log_entry_sz(log);
+
+ do {
+ struct sk_entry *entry;
+
+ /*
+ * Dirty - since the log is potentially huge, compress it into
+ * FIO_SERVER_MAX_FRAGMENT_PDU chunks and let the receiving
+ * side defragment it.
+ */
+ out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
+
+ stream->avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
+ stream->next_out = out_pdu;
+ ret = deflate(stream, Z_BLOCK);
+ /* may be Z_OK, or Z_STREAM_END */
+ if (ret < 0) {
+ free(out_pdu);
+ return 1;
+ }
+
+ this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream->avail_out;
+
+ entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
+ NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
+ flist_add_tail(&entry->list, &first->next);
+ } while (stream->avail_in);
+
+ return 0;
+}
+
+static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
+{
+ int ret = 0;
+ z_stream stream;
+
+ memset(&stream, 0, sizeof(stream));
stream.zalloc = Z_NULL;
stream.zfree = Z_NULL;
stream.opaque = Z_NULL;
- if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK) {
- ret = 1;
- goto err;
+ if (deflateInit(&stream, Z_DEFAULT_COMPRESSION) != Z_OK)
+ return 1;
+
+ while (!flist_empty(&log->io_logs)) {
+ struct io_logs *cur_log;
+
+ cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
+ flist_del_init(&cur_log->list);
+
+ ret = __fio_append_iolog_gz(first, log, cur_log, &stream);
+ if (ret)
+ break;
}
- stream.next_in = (void *) log->log;
- stream.avail_in = log->nr_samples * log_entry_sz(log);
+ ret = deflate(&stream, Z_FINISH);
- do {
+ while (ret != Z_STREAM_END) {
+ struct sk_entry *entry;
unsigned int this_len;
+ void *out_pdu;
+ out_pdu = malloc(FIO_SERVER_MAX_FRAGMENT_PDU);
stream.avail_out = FIO_SERVER_MAX_FRAGMENT_PDU;
stream.next_out = out_pdu;
+
ret = deflate(&stream, Z_FINISH);
/* may be Z_OK, or Z_STREAM_END */
- if (ret < 0)
- goto err_zlib;
+ if (ret < 0) {
+ free(out_pdu);
+ break;
+ }
this_len = FIO_SERVER_MAX_FRAGMENT_PDU - stream.avail_out;
entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, out_pdu, this_len,
- NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
- out_pdu = NULL;
+ NULL, SK_F_VEC | SK_F_INLINE | SK_F_FREE);
flist_add_tail(&entry->list, &first->next);
- } while (stream.avail_in);
+ } while (ret != Z_STREAM_END);
-err_zlib:
- deflateEnd(&stream);
-err:
- free(out_pdu);
-#endif
- return ret;
+ ret = deflateEnd(&stream);
+ if (ret == Z_OK)
+ return 0;
+
+ return 1;
}
+#else
+static int fio_append_iolog_gz(struct sk_entry *first, struct io_log *log)
+{
+ return 1;
+}
+#endif
static int fio_append_gz_chunks(struct sk_entry *first, struct io_log *log)
{
static int fio_append_text_log(struct sk_entry *first, struct io_log *log)
{
struct sk_entry *entry;
- size_t size = log->nr_samples * log_entry_sz(log);
- entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, log->log, size,
- NULL, SK_F_VEC | SK_F_INLINE);
- flist_add_tail(&entry->list, &first->next);
+ while (!flist_empty(&log->io_logs)) {
+ struct io_logs *cur_log;
+ size_t size;
+
+ cur_log = flist_first_entry(&log->io_logs, struct io_logs, list);
+ flist_del_init(&cur_log->list);
+
+ size = cur_log->nr_samples * log_entry_sz(log);
+
+ entry = fio_net_prep_cmd(FIO_NET_CMD_IOLOG, cur_log->log, size,
+ NULL, SK_F_VEC | SK_F_INLINE);
+ flist_add_tail(&entry->list, &first->next);
+ }
+
return 0;
}
{
struct cmd_iolog_pdu pdu;
struct sk_entry *first;
- int i, ret = 0;
+ struct flist_head *entry;
+ int ret = 0;
- pdu.nr_samples = cpu_to_le64(log->nr_samples);
+ pdu.nr_samples = cpu_to_le64(iolog_nr_samples(log));
pdu.thread_number = cpu_to_le32(td->thread_number);
pdu.log_type = cpu_to_le32(log->log_type);
+ pdu.log_hist_coarseness = cpu_to_le32(log->hist_coarseness);
if (!flist_empty(&log->chunk_list))
pdu.compressed = __cpu_to_le32(STORE_COMPRESSED);
* We can't do this for a pre-compressed log, but for that case,
* log->nr_samples is zero anyway.
*/
- for (i = 0; i < log->nr_samples; i++) {
- struct io_sample *s = get_sample(log, i);
+ flist_for_each(entry, &log->io_logs) {
+ struct io_logs *cur_log;
+ int i;
+
+ cur_log = flist_entry(entry, struct io_logs, list);
+
+ for (i = 0; i < cur_log->nr_samples; i++) {
+ struct io_sample *s = get_sample(log, cur_log, i);
- s->time = cpu_to_le64(s->time);
- s->val = cpu_to_le64(s->val);
- s->__ddir = cpu_to_le32(s->__ddir);
- s->bs = cpu_to_le32(s->bs);
+ s->time = cpu_to_le64(s->time);
+ s->val = cpu_to_le64(s->val);
+ s->__ddir = cpu_to_le32(s->__ddir);
+ s->bs = cpu_to_le32(s->bs);
- if (log->log_offset) {
- struct io_sample_offset *so = (void *) s;
+ if (log->log_offset) {
+ struct io_sample_offset *so = (void *) s;
- so->offset = cpu_to_le64(so->offset);
+ so->offset = cpu_to_le64(so->offset);
+ }
}
}
assert(sk_out->sk != -1);
- fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, 0, SK_F_SIMPLE);
+ fio_net_queue_cmd(FIO_NET_CMD_SERVER_START, NULL, 0, NULL, SK_F_SIMPLE);
}
int fio_server_get_verify_state(const char *name, int threadnumber,
- void **datap, int *version)
+ void **datap)
{
struct thread_io_list *s;
struct cmd_sendfile out;
dprint(FD_NET, "server: request verify state\n");
rep = smalloc(sizeof(*rep));
- if (!rep) {
- log_err("fio: smalloc pool too small\n");
+ if (!rep)
return ENOMEM;
- }
__fio_mutex_init(&rep->lock, FIO_MUTEX_LOCKED);
rep->data = NULL;
* the header, and the thread_io_list checksum
*/
s = rep->data + sizeof(struct verify_state_hdr);
- if (verify_state_hdr(rep->data, s, version)) {
+ if (verify_state_hdr(rep->data, s)) {
ret = EILSEQ;
goto fail;
}
return -1;
}
#ifdef SO_REUSEPORT
- if (setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) < 0) {
- log_err("fio: setsockopt(REUSEPORT): %s\n", strerror(errno));
- close(sk);
- return -1;
- }
+ /*
+ * Not fatal if fails, so just ignore it if that happens
+ */
+ setsockopt(sk, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
#endif
if (use_ipv6) {