X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=iolog.c;h=6576ca55237e58668bb892d1f4b1ec227eaef515;hp=a8bead11292c824eb3371f4f2c3e7a8545108cf7;hb=0caccfa716b44baecb197dd8fe4280935ca1413f;hpb=cb7e0ace739cbd5ca8a434a40fef4de469d59a5e diff --git a/iolog.c b/iolog.c index a8bead11..6576ca55 100644 --- a/iolog.c +++ b/iolog.c @@ -6,11 +6,21 @@ #include #include #include +#include +#include +#include +#ifdef CONFIG_ZLIB +#include +#endif + #include "flist.h" #include "fio.h" #include "verify.h" #include "trim.h" #include "filelock.h" +#include "smalloc.h" + +static int iolog_flush(struct io_log *log); static const char iolog_ver2[] = "fio version 2 iolog"; @@ -20,19 +30,14 @@ void queue_io_piece(struct thread_data *td, struct io_piece *ipo) td->total_io_size += ipo->len; } -void log_io_u(struct thread_data *td, struct io_u *io_u) +void log_io_u(const struct thread_data *td, const struct io_u *io_u) { - const char *act[] = { "read", "write", "sync", "datasync", - "sync_file_range", "wait", "trim" }; - - assert(io_u->ddir <= 6); - if (!td->o.write_iolog_file) return; fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name, - act[io_u->ddir], io_u->offset, - io_u->buflen); + io_ddir_name(io_u->ddir), + io_u->offset, io_u->buflen); } void log_file(struct thread_data *td, struct fio_file *f, @@ -57,20 +62,22 @@ void log_file(struct thread_data *td, struct fio_file *f, static void iolog_delay(struct thread_data *td, unsigned long delay) { - unsigned long usec = utime_since_now(&td->last_issue); - unsigned long this_delay; + uint64_t usec = utime_since_now(&td->last_issue); + uint64_t this_delay; + struct timeval tv; + + if (delay < td->time_offset) { + td->time_offset = 0; + return; + } + delay -= td->time_offset; if (delay < usec) return; delay -= usec; - /* - * less than 100 usec delay, just regard it as noise - */ - if (delay < 100) - return; - + fio_gettime(&tv, NULL); while (delay && !td->terminate) { this_delay = delay; if (this_delay > 500000) @@ -79,6 +86,12 @@ static void iolog_delay(struct thread_data *td, unsigned long delay) usec_sleep(td, this_delay); delay -= this_delay; } + + usec = utime_since_now(&tv); + if (usec > delay) + td->time_offset = usec - delay; + else + td->time_offset = 0; } static int ipo_special(struct thread_data *td, struct io_piece *ipo) @@ -105,7 +118,7 @@ static int ipo_special(struct thread_data *td, struct io_piece *ipo) td_io_close_file(td, f); break; case FIO_LOG_UNLINK_FILE: - unlink(f->file_name); + td_io_unlink_file(td, f); break; default: log_err("fio: bad file action %d\n", ipo->file_action); @@ -123,7 +136,7 @@ int read_iolog_get(struct thread_data *td, struct io_u *io_u) while (!flist_empty(&td->io_log_list)) { int ret; - ipo = flist_entry(td->io_log_list.next, struct io_piece, list); + ipo = flist_first_entry(&td->io_log_list, struct io_piece, list); flist_del(&ipo->list); remove_trim_entry(td, ipo); @@ -176,7 +189,7 @@ void prune_io_piece_log(struct thread_data *td) } while (!flist_empty(&td->io_hist_list)) { - ipo = flist_entry(td->io_hist_list.next, struct io_piece, list); + ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list); flist_del(&ipo->list); remove_trim_entry(td, ipo); td->io_hist_len--; @@ -239,6 +252,7 @@ restart: p = &td->io_hist_tree.rb_node; parent = NULL; while (*p) { + int overlap = 0; parent = *p; __ipo = rb_entry(parent, struct io_piece, rb_node); @@ -246,11 +260,18 @@ restart: p = &(*p)->rb_left; else if (ipo->file > __ipo->file) p = &(*p)->rb_right; - else if (ipo->offset < __ipo->offset) + else if (ipo->offset < __ipo->offset) { p = &(*p)->rb_left; - else if (ipo->offset > __ipo->offset) + overlap = ipo->offset + ipo->len > __ipo->offset; + } + else if (ipo->offset > __ipo->offset) { p = &(*p)->rb_right; - else { + overlap = __ipo->offset + __ipo->len > ipo->offset; + } + else + overlap = 1; + + if (overlap) { dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu", __ipo->offset, __ipo->len, ipo->offset, ipo->len); @@ -272,6 +293,18 @@ void unlog_io_piece(struct thread_data *td, struct io_u *io_u) { struct io_piece *ipo = io_u->ipo; + if (td->ts.nr_block_infos) { + uint32_t *info = io_u_block_info(td, io_u); + if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) { + if (io_u->ddir == DDIR_TRIM) + *info = BLOCK_INFO_SET_STATE(*info, + BLOCK_STATE_TRIM_FAILURE); + else if (io_u->ddir == DDIR_WRITE) + *info = BLOCK_INFO_SET_STATE(*info, + BLOCK_STATE_WRITE_FAILURE); + } + } + if (!ipo) return; @@ -285,7 +318,7 @@ void unlog_io_piece(struct thread_data *td, struct io_u *io_u) td->io_hist_len--; } -void trim_io_piece(struct thread_data *td, struct io_u *io_u) +void trim_io_piece(struct thread_data *td, const struct io_u *io_u) { struct io_piece *ipo = io_u->ipo; @@ -539,19 +572,57 @@ int init_iolog(struct thread_data *td) return ret; } -void setup_log(struct io_log **log, unsigned long avg_msec, int log_type, - int log_offset, const char *filename) +void setup_log(struct io_log **log, struct log_params *p, + const char *filename) { - struct io_log *l = malloc(sizeof(*l)); - - memset(l, 0, sizeof(*l)); - l->nr_samples = 0; - l->max_samples = 1024; - l->log_type = log_type; - l->log_offset = log_offset; - l->log = malloc(l->max_samples * log_entry_sz(l)); - l->avg_msec = avg_msec; + struct io_log *l; + int i; + struct io_u_plat_entry *entry; + struct flist_head *list; + + l = scalloc(1, sizeof(*l)); + INIT_FLIST_HEAD(&l->io_logs); + l->log_type = p->log_type; + l->log_offset = p->log_offset; + l->log_gz = p->log_gz; + l->log_gz_store = p->log_gz_store; + l->avg_msec = p->avg_msec; + l->hist_msec = p->hist_msec; + l->hist_coarseness = p->hist_coarseness; l->filename = strdup(filename); + l->td = p->td; + + /* Initialize histogram lists for each r/w direction, + * with initial io_u_plat of all zeros: + */ + for (i = 0; i < DDIR_RWDIR_CNT; i++) { + list = &l->hist_window[i].list; + INIT_FLIST_HEAD(list); + entry = calloc(1, sizeof(struct io_u_plat_entry)); + flist_add(&entry->list, list); + } + + if (l->td && l->td->o.io_submit_mode != IO_MODE_OFFLOAD) { + struct io_logs *p; + + p = calloc(1, sizeof(*l->pending)); + p->max_samples = DEF_LOG_ENTRIES; + p->log = calloc(p->max_samples, log_entry_sz(l)); + l->pending = p; + } + + if (l->log_offset) + l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT; + + INIT_FLIST_HEAD(&l->chunk_list); + + if (l->log_gz && !p->td) + l->log_gz = 0; + else if (l->log_gz || l->log_gz_store) { + mutex_init_pshared(&l->chunk_lock); + p->td->flags |= TD_F_COMPRESS_LOG; + } + *log = l; } @@ -581,117 +652,831 @@ static void clear_file_buffer(void *buf) } #endif -static void free_log(struct io_log *log) +void free_log(struct io_log *log) { - free(log->log); + while (!flist_empty(&log->io_logs)) { + struct io_logs *cur_log; + + cur_log = flist_first_entry(&log->io_logs, struct io_logs, list); + flist_del_init(&cur_log->list); + free(cur_log->log); + sfree(cur_log); + } + + if (log->pending) { + free(log->pending->log); + free(log->pending); + log->pending = NULL; + } + + free(log->pending); free(log->filename); - free(log); + sfree(log); } -void __finish_log(struct io_log *log) +inline unsigned long hist_sum(int j, int stride, unsigned int *io_u_plat, + unsigned int *io_u_plat_last) { - uint64_t i; - void *buf; - FILE *f; + unsigned long sum; + int k; + + if (io_u_plat_last) { + for (k = sum = 0; k < stride; k++) + sum += io_u_plat[j + k] - io_u_plat_last[j + k]; + } else { + for (k = sum = 0; k < stride; k++) + sum += io_u_plat[j + k]; + } - f = fopen(log->filename, "a"); - if (!f) { - perror("fopen log"); + return sum; +} + +static void flush_hist_samples(FILE *f, int hist_coarseness, void *samples, + uint64_t sample_size) +{ + struct io_sample *s; + int log_offset; + uint64_t i, j, nr_samples; + struct io_u_plat_entry *entry, *entry_before; + unsigned int *io_u_plat; + unsigned int *io_u_plat_before; + + int stride = 1 << hist_coarseness; + + if (!sample_size) return; + + s = __get_sample(samples, 0, 0); + log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0; + + nr_samples = sample_size / __log_entry_sz(log_offset); + + for (i = 0; i < nr_samples; i++) { + s = __get_sample(samples, log_offset, i); + + entry = (struct io_u_plat_entry *) (uintptr_t) s->val; + io_u_plat = entry->io_u_plat; + + entry_before = flist_first_entry(&entry->list, struct io_u_plat_entry, list); + io_u_plat_before = entry_before->io_u_plat; + + fprintf(f, "%lu, %u, %u, ", (unsigned long) s->time, + io_sample_ddir(s), s->bs); + for (j = 0; j < FIO_IO_U_PLAT_NR - stride; j += stride) { + fprintf(f, "%lu, ", hist_sum(j, stride, io_u_plat, + io_u_plat_before)); + } + fprintf(f, "%lu\n", (unsigned long) + hist_sum(FIO_IO_U_PLAT_NR - stride, stride, io_u_plat, + io_u_plat_before)); + + flist_del(&entry_before->list); + free(entry_before); } +} - buf = set_file_buffer(f); +void flush_samples(FILE *f, void *samples, uint64_t sample_size) +{ + struct io_sample *s; + int log_offset; + uint64_t i, nr_samples; - for (i = 0; i < log->nr_samples; i++) { - struct io_sample *s = get_sample(log, i); + if (!sample_size) + return; - if (!log->log_offset) { + s = __get_sample(samples, 0, 0); + log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0; + + nr_samples = sample_size / __log_entry_sz(log_offset); + + for (i = 0; i < nr_samples; i++) { + s = __get_sample(samples, log_offset, i); + + if (!log_offset) { fprintf(f, "%lu, %lu, %u, %u\n", (unsigned long) s->time, (unsigned long) s->val, - s->ddir, s->bs); + io_sample_ddir(s), s->bs); } else { struct io_sample_offset *so = (void *) s; fprintf(f, "%lu, %lu, %u, %u, %llu\n", (unsigned long) s->time, (unsigned long) s->val, - s->ddir, s->bs, + io_sample_ddir(s), s->bs, (unsigned long long) so->offset); } } +} + +#ifdef CONFIG_ZLIB + +struct iolog_flush_data { + struct workqueue_work work; + struct io_log *log; + void *samples; + uint32_t nr_samples; + bool free; +}; + +#define GZ_CHUNK 131072 + +static struct iolog_compress *get_new_chunk(unsigned int seq) +{ + struct iolog_compress *c; + + c = malloc(sizeof(*c)); + INIT_FLIST_HEAD(&c->list); + c->buf = malloc(GZ_CHUNK); + c->len = 0; + c->seq = seq; + return c; +} + +static void free_chunk(struct iolog_compress *ic) +{ + free(ic->buf); + free(ic); +} + +static int z_stream_init(z_stream *stream, int gz_hdr) +{ + int wbits = 15; + + memset(stream, 0, sizeof(*stream)); + stream->zalloc = Z_NULL; + stream->zfree = Z_NULL; + stream->opaque = Z_NULL; + stream->next_in = Z_NULL; + + /* + * zlib magic - add 32 for auto-detection of gz header or not, + * if we decide to store files in a gzip friendly format. + */ + if (gz_hdr) + wbits += 32; + + if (inflateInit2(stream, wbits) != Z_OK) + return 1; + + return 0; +} + +struct inflate_chunk_iter { + unsigned int seq; + int err; + void *buf; + size_t buf_size; + size_t buf_used; + size_t chunk_sz; +}; + +static void finish_chunk(z_stream *stream, FILE *f, + struct inflate_chunk_iter *iter) +{ + int ret; + + ret = inflateEnd(stream); + if (ret != Z_OK) + log_err("fio: failed to end log inflation seq %d (%d)\n", + iter->seq, ret); + + flush_samples(f, iter->buf, iter->buf_used); + free(iter->buf); + iter->buf = NULL; + iter->buf_size = iter->buf_used = 0; +} + +/* + * Iterative chunk inflation. Handles cases where we cross into a new + * sequence, doing flush finish of previous chunk if needed. + */ +static size_t inflate_chunk(struct iolog_compress *ic, int gz_hdr, FILE *f, + z_stream *stream, struct inflate_chunk_iter *iter) +{ + size_t ret; + + dprint(FD_COMPRESS, "inflate chunk size=%lu, seq=%u\n", + (unsigned long) ic->len, ic->seq); + + if (ic->seq != iter->seq) { + if (iter->seq) + finish_chunk(stream, f, iter); + + z_stream_init(stream, gz_hdr); + iter->seq = ic->seq; + } + + stream->avail_in = ic->len; + stream->next_in = ic->buf; + + if (!iter->buf_size) { + iter->buf_size = iter->chunk_sz; + iter->buf = malloc(iter->buf_size); + } + + while (stream->avail_in) { + size_t this_out = iter->buf_size - iter->buf_used; + int err; + + stream->avail_out = this_out; + stream->next_out = iter->buf + iter->buf_used; + + err = inflate(stream, Z_NO_FLUSH); + if (err < 0) { + log_err("fio: failed inflating log: %d\n", err); + iter->err = err; + break; + } + + iter->buf_used += this_out - stream->avail_out; + + if (!stream->avail_out) { + iter->buf_size += iter->chunk_sz; + iter->buf = realloc(iter->buf, iter->buf_size); + continue; + } + + if (err == Z_STREAM_END) + break; + } + + ret = (void *) stream->next_in - ic->buf; + + dprint(FD_COMPRESS, "inflated to size=%lu\n", (unsigned long) iter->buf_size); + + return ret; +} + +/* + * Inflate stored compressed chunks, or write them directly to the log + * file if so instructed. + */ +static int inflate_gz_chunks(struct io_log *log, FILE *f) +{ + struct inflate_chunk_iter iter = { .chunk_sz = log->log_gz, }; + z_stream stream; + + while (!flist_empty(&log->chunk_list)) { + struct iolog_compress *ic; + + ic = flist_first_entry(&log->chunk_list, struct iolog_compress, list); + flist_del(&ic->list); + + if (log->log_gz_store) { + size_t ret; + + dprint(FD_COMPRESS, "log write chunk size=%lu, " + "seq=%u\n", (unsigned long) ic->len, ic->seq); + + ret = fwrite(ic->buf, ic->len, 1, f); + if (ret != 1 || ferror(f)) { + iter.err = errno; + log_err("fio: error writing compressed log\n"); + } + } else + inflate_chunk(ic, log->log_gz_store, f, &stream, &iter); + + free_chunk(ic); + } + + if (iter.seq) { + finish_chunk(&stream, f, &iter); + free(iter.buf); + } + + return iter.err; +} + +/* + * Open compressed log file and decompress the stored chunks and + * write them to stdout. The chunks are stored sequentially in the + * file, so we iterate over them and do them one-by-one. + */ +int iolog_file_inflate(const char *file) +{ + struct inflate_chunk_iter iter = { .chunk_sz = 64 * 1024 * 1024, }; + struct iolog_compress ic; + z_stream stream; + struct stat sb; + ssize_t ret; + size_t total; + void *buf; + FILE *f; + + f = fopen(file, "r"); + if (!f) { + perror("fopen"); + return 1; + } + + if (stat(file, &sb) < 0) { + fclose(f); + perror("stat"); + return 1; + } + + ic.buf = buf = malloc(sb.st_size); + ic.len = sb.st_size; + ic.seq = 1; + + ret = fread(ic.buf, ic.len, 1, f); + if (ret < 0) { + perror("fread"); + fclose(f); + free(buf); + return 1; + } else if (ret != 1) { + log_err("fio: short read on reading log\n"); + fclose(f); + free(buf); + return 1; + } + + fclose(f); + + /* + * Each chunk will return Z_STREAM_END. We don't know how many + * chunks are in the file, so we just keep looping and incrementing + * the sequence number until we have consumed the whole compressed + * file. + */ + total = ic.len; + do { + size_t iret; + + iret = inflate_chunk(&ic, 1, stdout, &stream, &iter); + total -= iret; + if (!total) + break; + if (iter.err) + break; + + ic.seq++; + ic.len -= iret; + ic.buf += iret; + } while (1); + + if (iter.seq) { + finish_chunk(&stream, stdout, &iter); + free(iter.buf); + } + + free(buf); + return iter.err; +} + +#else + +static int inflate_gz_chunks(struct io_log *log, FILE *f) +{ + return 0; +} + +int iolog_file_inflate(const char *file) +{ + log_err("fio: log inflation not possible without zlib\n"); + return 1; +} + +#endif + +void flush_log(struct io_log *log, bool do_append) +{ + void *buf; + FILE *f; + + if (!do_append) + f = fopen(log->filename, "w"); + else + f = fopen(log->filename, "a"); + if (!f) { + perror("fopen log"); + return; + } + + buf = set_file_buffer(f); + + inflate_gz_chunks(log, f); + + while (!flist_empty(&log->io_logs)) { + struct io_logs *cur_log; + + cur_log = flist_first_entry(&log->io_logs, struct io_logs, list); + flist_del_init(&cur_log->list); + + if (log == log->td->clat_hist_log) + flush_hist_samples(f, log->hist_coarseness, cur_log->log, + log_sample_sz(log, cur_log)); + else + flush_samples(f, cur_log->log, log_sample_sz(log, cur_log)); + + sfree(cur_log); + } fclose(f); clear_file_buffer(buf); - free_log(log); } static int finish_log(struct thread_data *td, struct io_log *log, int trylock) { + if (td->flags & TD_F_COMPRESS_LOG) + iolog_flush(log); + if (trylock) { if (fio_trylock_file(log->filename)) return 1; } else fio_lock_file(log->filename); - if (td->client_type == FIO_CLIENT_TYPE_GUI) { + if (td->client_type == FIO_CLIENT_TYPE_GUI || is_backend) fio_send_iolog(td, log, log->filename); - free_log(log); - } else - __finish_log(log); + else + flush_log(log, !td->o.per_job_logs); fio_unlock_file(log->filename); + free_log(log); return 0; } -static int write_iops_log(struct thread_data *td, int try) +size_t log_chunk_sizes(struct io_log *log) { - struct io_log *log = td->iops_log; + struct flist_head *entry; + size_t ret; - if (!log) + if (flist_empty(&log->chunk_list)) return 0; - return finish_log(td, log, try); + ret = 0; + pthread_mutex_lock(&log->chunk_lock); + flist_for_each(entry, &log->chunk_list) { + struct iolog_compress *c; + + c = flist_entry(entry, struct iolog_compress, list); + ret += c->len; + } + pthread_mutex_unlock(&log->chunk_lock); + return ret; } -static int write_slat_log(struct thread_data *td, int try) +#ifdef CONFIG_ZLIB + +static int gz_work(struct iolog_flush_data *data) { - struct io_log *log = td->slat_log; + struct iolog_compress *c = NULL; + struct flist_head list; + unsigned int seq; + z_stream stream; + size_t total = 0; + int ret; + + INIT_FLIST_HEAD(&list); + + memset(&stream, 0, sizeof(stream)); + stream.zalloc = Z_NULL; + stream.zfree = Z_NULL; + stream.opaque = Z_NULL; + + ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION); + if (ret != Z_OK) { + log_err("fio: failed to init gz stream\n"); + goto err; + } + + seq = ++data->log->chunk_seq; + + stream.next_in = (void *) data->samples; + stream.avail_in = data->nr_samples * log_entry_sz(data->log); + + dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u, log=%s\n", + (unsigned long) stream.avail_in, seq, + data->log->filename); + do { + if (c) + dprint(FD_COMPRESS, "seq=%d, chunk=%lu\n", seq, + (unsigned long) c->len); + c = get_new_chunk(seq); + stream.avail_out = GZ_CHUNK; + stream.next_out = c->buf; + ret = deflate(&stream, Z_NO_FLUSH); + if (ret < 0) { + log_err("fio: deflate log (%d)\n", ret); + free_chunk(c); + goto err; + } + + c->len = GZ_CHUNK - stream.avail_out; + flist_add_tail(&c->list, &list); + total += c->len; + } while (stream.avail_in); + + stream.next_out = c->buf + c->len; + stream.avail_out = GZ_CHUNK - c->len; + + ret = deflate(&stream, Z_FINISH); + if (ret < 0) { + /* + * Z_BUF_ERROR is special, it just means we need more + * output space. We'll handle that below. Treat any other + * error as fatal. + */ + if (ret != Z_BUF_ERROR) { + log_err("fio: deflate log (%d)\n", ret); + flist_del(&c->list); + free_chunk(c); + goto err; + } + } + + total -= c->len; + c->len = GZ_CHUNK - stream.avail_out; + total += c->len; + dprint(FD_COMPRESS, "seq=%d, chunk=%lu\n", seq, (unsigned long) c->len); + + if (ret != Z_STREAM_END) { + do { + c = get_new_chunk(seq); + stream.avail_out = GZ_CHUNK; + stream.next_out = c->buf; + ret = deflate(&stream, Z_FINISH); + c->len = GZ_CHUNK - stream.avail_out; + total += c->len; + flist_add_tail(&c->list, &list); + dprint(FD_COMPRESS, "seq=%d, chunk=%lu\n", seq, + (unsigned long) c->len); + } while (ret != Z_STREAM_END); + } + + dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total); - if (!log) + ret = deflateEnd(&stream); + if (ret != Z_OK) + log_err("fio: deflateEnd %d\n", ret); + + free(data->samples); + + if (!flist_empty(&list)) { + pthread_mutex_lock(&data->log->chunk_lock); + flist_splice_tail(&list, &data->log->chunk_list); + pthread_mutex_unlock(&data->log->chunk_lock); + } + + ret = 0; +done: + if (data->free) + free(data); + return ret; +err: + while (!flist_empty(&list)) { + c = flist_first_entry(list.next, struct iolog_compress, list); + flist_del(&c->list); + free_chunk(c); + } + ret = 1; + goto done; +} + +/* + * Invoked from our compress helper thread, when logging would have exceeded + * the specified memory limitation. Compresses the previously stored + * entries. + */ +static int gz_work_async(struct submit_worker *sw, struct workqueue_work *work) +{ + return gz_work(container_of(work, struct iolog_flush_data, work)); +} + +static int gz_init_worker(struct submit_worker *sw) +{ + struct thread_data *td = sw->wq->td; + + if (!fio_option_is_set(&td->o, log_gz_cpumask)) return 0; - return finish_log(td, log, try); + if (fio_setaffinity(gettid(), td->o.log_gz_cpumask) == -1) { + log_err("gz: failed to set CPU affinity\n"); + return 1; + } + + return 0; } -static int write_clat_log(struct thread_data *td, int try) +static struct workqueue_ops log_compress_wq_ops = { + .fn = gz_work_async, + .init_worker_fn = gz_init_worker, + .nice = 1, +}; + +int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out) { - struct io_log *log = td->clat_log; + if (!(td->flags & TD_F_COMPRESS_LOG)) + return 0; - if (!log) + workqueue_init(td, &td->log_compress_wq, &log_compress_wq_ops, 1, sk_out); + return 0; +} + +void iolog_compress_exit(struct thread_data *td) +{ + if (!(td->flags & TD_F_COMPRESS_LOG)) + return; + + workqueue_exit(&td->log_compress_wq); +} + +/* + * Queue work item to compress the existing log entries. We reset the + * current log to a small size, and reference the existing log in the + * data that we queue for compression. Once compression has been done, + * this old log is freed. If called with finish == true, will not return + * until the log compression has completed, and will flush all previous + * logs too + */ +static int iolog_flush(struct io_log *log) +{ + struct iolog_flush_data *data; + + data = malloc(sizeof(*data)); + if (!data) + return 1; + + data->log = log; + data->free = false; + + while (!flist_empty(&log->io_logs)) { + struct io_logs *cur_log; + + cur_log = flist_first_entry(&log->io_logs, struct io_logs, list); + flist_del_init(&cur_log->list); + + data->samples = cur_log->log; + data->nr_samples = cur_log->nr_samples; + + sfree(cur_log); + + gz_work(data); + } + + free(data); + return 0; +} + +int iolog_cur_flush(struct io_log *log, struct io_logs *cur_log) +{ + struct iolog_flush_data *data; + + data = malloc(sizeof(*data)); + if (!data) + return 1; + + data->log = log; + + data->samples = cur_log->log; + data->nr_samples = cur_log->nr_samples; + data->free = true; + + cur_log->nr_samples = cur_log->max_samples = 0; + cur_log->log = NULL; + + workqueue_enqueue(&log->td->log_compress_wq, &data->work); + return 0; +} +#else + +static int iolog_flush(struct io_log *log) +{ + return 1; +} + +int iolog_cur_flush(struct io_log *log, struct io_logs *cur_log) +{ + return 1; +} + +int iolog_compress_init(struct thread_data *td, struct sk_out *sk_out) +{ + return 0; +} + +void iolog_compress_exit(struct thread_data *td) +{ +} + +#endif + +struct io_logs *iolog_cur_log(struct io_log *log) +{ + if (flist_empty(&log->io_logs)) + return NULL; + + return flist_last_entry(&log->io_logs, struct io_logs, list); +} + +uint64_t iolog_nr_samples(struct io_log *iolog) +{ + struct flist_head *entry; + uint64_t ret = 0; + + flist_for_each(entry, &iolog->io_logs) { + struct io_logs *cur_log; + + cur_log = flist_entry(entry, struct io_logs, list); + ret += cur_log->nr_samples; + } + + return ret; +} + +static int __write_log(struct thread_data *td, struct io_log *log, int try) +{ + if (log) + return finish_log(td, log, try); + + return 0; +} + +static int write_iops_log(struct thread_data *td, int try, bool unit_log) +{ + int ret; + + if (per_unit_log(td->iops_log) != unit_log) return 0; - return finish_log(td, log, try); + ret = __write_log(td, td->iops_log, try); + if (!ret) + td->iops_log = NULL; + + return ret; } -static int write_lat_log(struct thread_data *td, int try) +static int write_slat_log(struct thread_data *td, int try, bool unit_log) { - struct io_log *log = td->lat_log; + int ret; - if (!log) + if (!unit_log) return 0; - return finish_log(td, log, try); + ret = __write_log(td, td->slat_log, try); + if (!ret) + td->slat_log = NULL; + + return ret; } -static int write_bandw_log(struct thread_data *td, int try) +static int write_clat_log(struct thread_data *td, int try, bool unit_log) { - struct io_log *log = td->bw_log; + int ret; - if (!log) + if (!unit_log) return 0; - return finish_log(td, log, try); + ret = __write_log(td, td->clat_log, try); + if (!ret) + td->clat_log = NULL; + + return ret; +} + +static int write_clat_hist_log(struct thread_data *td, int try, bool unit_log) +{ + int ret; + + if (!unit_log) + return 0; + + ret = __write_log(td, td->clat_hist_log, try); + if (!ret) + td->clat_hist_log = NULL; + + return ret; +} + +static int write_lat_log(struct thread_data *td, int try, bool unit_log) +{ + int ret; + + if (!unit_log) + return 0; + + ret = __write_log(td, td->lat_log, try); + if (!ret) + td->lat_log = NULL; + + return ret; +} + +static int write_bandw_log(struct thread_data *td, int try, bool unit_log) +{ + int ret; + + if (per_unit_log(td->bw_log) != unit_log) + return 0; + + ret = __write_log(td, td->bw_log, try); + if (!ret) + td->bw_log = NULL; + + return ret; } enum { @@ -700,13 +1485,14 @@ enum { SLAT_LOG_MASK = 4, CLAT_LOG_MASK = 8, IOPS_LOG_MASK = 16, + CLAT_HIST_LOG_MASK = 32, - ALL_LOG_NR = 5, + ALL_LOG_NR = 6, }; struct log_type { unsigned int mask; - int (*fn)(struct thread_data *, int); + int (*fn)(struct thread_data *, int, bool); }; static struct log_type log_types[] = { @@ -730,9 +1516,13 @@ static struct log_type log_types[] = { .mask = IOPS_LOG_MASK, .fn = write_iops_log, }, + { + .mask = CLAT_HIST_LOG_MASK, + .fn = write_clat_hist_log, + } }; -void fio_writeout_logs(struct thread_data *td) +void td_writeout_logs(struct thread_data *td, bool unit_logs) { unsigned int log_mask = 0; unsigned int log_left = ALL_LOG_NR; @@ -740,7 +1530,7 @@ void fio_writeout_logs(struct thread_data *td) old_state = td_bump_runstate(td, TD_FINISHING); - finalize_logs(td); + finalize_logs(td, unit_logs); while (log_left) { int prev_log_left = log_left; @@ -750,7 +1540,7 @@ void fio_writeout_logs(struct thread_data *td) int ret; if (!(log_mask & lt->mask)) { - ret = lt->fn(td, log_left != 1); + ret = lt->fn(td, log_left != 1, unit_logs); if (!ret) { log_left--; log_mask |= lt->mask; @@ -764,3 +1554,12 @@ void fio_writeout_logs(struct thread_data *td) td_restore_runstate(td, old_state); } + +void fio_writeout_logs(bool unit_logs) +{ + struct thread_data *td; + int i; + + for_each_td(td, i) + td_writeout_logs(td, unit_logs); +}