}
/*
- * We don't need to sort the entries, if:
+ * We don't need to sort the entries if we only performed sequential
+ * writes. In this case, just reading back data in the order we wrote
+ * it out is the faster but still safe.
*
- * Sequential writes, or
- * Random writes that lay out the file as it goes along
- *
- * For both these cases, just reading back data in the order we
- * wrote it out is the fastest.
- *
- * One exception is if we don't have a random map AND we are doing
- * verifies, in that case we need to check for duplicate blocks and
- * drop the old one, which we rely on the rb insert/lookup for
- * handling.
+ * One exception is if we don't have a random map in which case we need
+ * to check for duplicate blocks and drop the old one, which we rely on
+ * the rb insert/lookup for handling.
*/
- if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
- (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
+ if (((!td->o.verifysort) || !td_random(td)) &&
+ file_randommap(td, ipo->file)) {
INIT_FLIST_HEAD(&ipo->list);
flist_add_tail(&ipo->list, &td->io_hist_list);
ipo->flags |= IP_F_ONLIST;
td->io_hist_len--;
rb_erase(parent, &td->io_hist_tree);
remove_trim_entry(td, __ipo);
- free(__ipo);
+ if (!(__ipo->flags & IP_F_IN_FLIGHT))
+ free(__ipo);
goto restart;
}
}
l->log_gz = 0;
else if (l->log_gz || l->log_gz_store) {
mutex_init_pshared(&l->chunk_lock);
+ mutex_init_pshared(&l->deferred_free_lock);
p->td->flags |= TD_F_COMPRESS_LOG;
}
#ifdef CONFIG_ZLIB
+static bool warned_on_drop;
+
+static void iolog_put_deferred(struct io_log *log, void *ptr)
+{
+ if (!ptr)
+ return;
+
+ pthread_mutex_lock(&log->deferred_free_lock);
+ if (log->deferred < IOLOG_MAX_DEFER) {
+ log->deferred_items[log->deferred] = ptr;
+ log->deferred++;
+ } else if (!warned_on_drop) {
+ log_err("fio: had to drop log entry free\n");
+ warned_on_drop = true;
+ }
+ pthread_mutex_unlock(&log->deferred_free_lock);
+}
+
+static void iolog_free_deferred(struct io_log *log)
+{
+ int i;
+
+ if (!log->deferred)
+ return;
+
+ pthread_mutex_lock(&log->deferred_free_lock);
+
+ for (i = 0; i < log->deferred; i++) {
+ free(log->deferred_items[i]);
+ log->deferred_items[i] = NULL;
+ }
+
+ log->deferred = 0;
+ pthread_mutex_unlock(&log->deferred_free_lock);
+}
+
static int gz_work(struct iolog_flush_data *data)
{
struct iolog_compress *c = NULL;
if (ret != Z_OK)
log_err("fio: deflateEnd %d\n", ret);
- free(data->samples);
+ iolog_put_deferred(data->log, data->samples);
if (!flist_empty(&list)) {
pthread_mutex_lock(&data->log->chunk_lock);
ret = 0;
done:
if (data->free)
- free(data);
+ sfree(data);
return ret;
err:
while (!flist_empty(&list)) {
{
struct iolog_flush_data *data;
- data = malloc(sizeof(*data));
+ data = smalloc(sizeof(*data));
if (!data)
return 1;
cur_log->log = NULL;
workqueue_enqueue(&log->td->log_compress_wq, &data->work);
+
+ iolog_free_deferred(log);
+
return 0;
}
#else