stream->opaque = Z_NULL;
stream->next_in = Z_NULL;
+ /*
+ * zlib magic - add 32 for auto-detection of gz header or not,
+ * if we decide to store files in a gzip friendly format.
+ */
if (gz_hdr)
wbits += 32;
return 0;
}
-struct flush_chunk_iter {
+struct inflate_chunk_iter {
unsigned int seq;
+ int err;
void *buf;
size_t buf_size;
size_t buf_used;
};
static void finish_chunk(z_stream *stream, FILE *f,
- struct flush_chunk_iter *iter)
+ struct inflate_chunk_iter *iter)
{
int ret;
iter->buf_size = iter->buf_used = 0;
}
-static int flush_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
- z_stream *stream, struct flush_chunk_iter *iter)
+/*
+ * Iterative chunk inflation. Handles cases where we cross into a new
+ * sequence, doing flush finish of previous chunk if needed.
+ */
+static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f,
+ z_stream *stream, struct inflate_chunk_iter *iter)
{
if (ic->seq != iter->seq) {
if (iter->seq)
err = inflate(stream, Z_NO_FLUSH);
if (err < 0) {
log_err("fio: failed inflating log: %d\n", err);
+ iter->err = err;
break;
}
break;
}
- return 0;
+ return (void *) stream->next_in - ic->buf;
}
-static void flush_gz_chunks(struct io_log *log, FILE *f)
+/*
+ * Inflate stored compressed chunks, or write them directly to the log
+ * file if so instructed.
+ */
+static int inflate_gz_chunks(struct io_log *log, FILE *f)
{
- struct flush_chunk_iter iter = { .chunk_sz = log->log_gz, };
+ struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, };
z_stream stream;
while (!flist_empty(&log->chunk_list)) {
ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list);
flist_del(&ic->list);
- if (log->log_gz_store)
- fwrite(ic->buf, ic->len, 1, f);
- else
- flush_chunk(ic, log->log_gz_store, f, &stream, &iter);
+ if (log->log_gz_store) {
+ size_t ret;
+
+ ret = fwrite(ic->buf, ic->len, 1, f);
+ if (ret != 1 || ferror(f)) {
+ iter.err = errno;
+ log_err("fio: error writing compressed log\n");
+ }
+ } else
+ inflate_chunk(ic, log->log_gz_store, f, &stream, &iter);
free_chunk(ic);
}
finish_chunk(&stream, f, &iter);
free(iter.buf);
}
+
+ return iter.err;
}
+/*
+ * Open compressed log file and decompress the stored chunks and
+ * write them to stdout. The chunks are stored sequentially in the
+ * file, so we iterate over them and do them one-by-one.
+ */
int iolog_file_inflate(const char *file)
{
- struct flush_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
+ struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, };
struct iolog_compress ic;
z_stream stream;
struct stat sb;
ssize_t ret;
+ size_t total;
+ void *buf;
FILE *f;
f = fopen(file, "r");
return 1;
}
- ic.buf = malloc(sb.st_size);
+ ic.buf = buf = malloc(sb.st_size);
ic.len = sb.st_size;
ic.seq = 1;
fclose(f);
- flush_chunk(&ic, 1, stdout, &stream, &iter);
+ /*
+ * Each chunk will return Z_STREAM_END. We don't know how many
+ * chunks are in the file, so we just keep looping and incrementing
+ * the sequence number until we have consumed the whole compressed
+ * file.
+ */
+ total = ic.len;
+ do {
+ size_t ret;
+
+ ret = inflate_chunk(&ic, 1, stdout, &stream, &iter);
+ total -= ret;
+ if (!total)
+ break;
+ if (iter.err)
+ break;
+
+ ic.seq++;
+ ic.len -= ret;
+ ic.buf += ret;
+ } while (1);
if (iter.seq) {
finish_chunk(&stream, stdout, &iter);
free(iter.buf);
}
- free(ic.buf);
- return 0;
+ free(buf);
+ return iter.err;
}
#else
-static void flush_gz_chunks(struct io_log *log, FILE *f)
+static int inflate_gz_chunks(struct io_log *log, FILE *f)
+{
+ return 0;
+}
+
+int iolog_file_inflate(const char *file)
{
+ log_err("fio: log inflation not possible without zlib\n");
+ return 1;
}
#endif
buf = set_file_buffer(f);
- flush_gz_chunks(log, f);
+ inflate_gz_chunks(log, f);
flush_samples(f, log->log, log->nr_samples * log_entry_sz(log));
#ifdef CONFIG_ZLIB
+/*
+ * Invoked from our compress helper thread, when logging would have exceeded
+ * the specified memory limitation. Compresses the previously stored
+ * entries.
+ */
static int gz_work(struct tp_work *work)
{
struct iolog_flush_data *data;
ret = deflate(&stream, Z_NO_FLUSH);
if (ret < 0) {
log_err("fio: deflate log (%d)\n", ret);
- break;
+ free_chunk(c);
+ goto err;
}
c->len = GZ_CHUNK - stream.avail_out;
pthread_mutex_unlock(&data->log->chunk_lock);
}
+ ret = 0;
+done:
if (work->wait) {
work->done = 1;
pthread_cond_signal(&work->cv);
} else
free(data);
- return 0;
+ return ret;
+err:
+ while (!flist_empty(&list)) {
+ c = flist_first_entry(list.next, struct iolog_compress, list);
+ flist_del(&c->list);
+ free_chunk(c);
+ }
+ ret = 1;
+ goto done;
}
+/*
+ * Queue work item to compress the existing log entries. We copy the
+ * samples, and reset the log sample count to 0 (so the logging will
+ * continue to use the memory associated with the log). If called with
+ * wait == 1, will not return until the log compression has completed.
+ */
int iolog_flush(struct io_log *log, int wait)
{
struct tp_data *tdat = log->td->tp_data;