*/
#include <stdio.h>
#include <stdlib.h>
-#include <libgen.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "flist.h"
#include "fio.h"
-#include "verify.h"
#include "trim.h"
#include "filelock.h"
#include "smalloc.h"
#include "blktrace.h"
+#include "pshared.h"
static int iolog_flush(struct io_log *log);
static void iolog_delay(struct thread_data *td, unsigned long delay)
{
uint64_t usec = utime_since_now(&td->last_issue);
+ unsigned long orig_delay = delay;
uint64_t this_delay;
struct timespec ts;
}
usec = utime_since_now(&ts);
- if (usec > delay)
- td->time_offset = usec - delay;
+ if (usec > orig_delay)
+ td->time_offset = usec - orig_delay;
else
td->time_offset = 0;
}
void prune_io_piece_log(struct thread_data *td)
{
struct io_piece *ipo;
- struct rb_node *n;
+ struct fio_rb_node *n;
while ((n = rb_first(&td->io_hist_tree)) != NULL) {
ipo = rb_entry(n, struct io_piece, rb_node);
*/
void log_io_piece(struct thread_data *td, struct io_u *io_u)
{
- struct rb_node **p, *parent;
+ struct fio_rb_node **p, *parent;
struct io_piece *ipo, *__ipo;
ipo = malloc(sizeof(struct io_piece));
}
/*
- * We don't need to sort the entries, if:
+ * We don't need to sort the entries if we only performed sequential
+ * writes. In this case, just reading back data in the order we wrote
+ * it out is the faster but still safe.
*
- * Sequential writes, or
- * Random writes that lay out the file as it goes along
- *
- * For both these cases, just reading back data in the order we
- * wrote it out is the fastest.
- *
- * One exception is if we don't have a random map AND we are doing
- * verifies, in that case we need to check for duplicate blocks and
- * drop the old one, which we rely on the rb insert/lookup for
- * handling.
+ * One exception is if we don't have a random map in which case we need
+ * to check for duplicate blocks and drop the old one, which we rely on
+ * the rb insert/lookup for handling.
*/
- if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
- (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
+ if (((!td->o.verifysort) || !td_random(td)) &&
+ file_randommap(td, ipo->file)) {
INIT_FLIST_HEAD(&ipo->list);
flist_add_tail(&ipo->list, &td->io_hist_list);
ipo->flags |= IP_F_ONLIST;
td->io_hist_len--;
rb_erase(parent, &td->io_hist_tree);
remove_trim_entry(td, __ipo);
- free(__ipo);
+ if (!(__ipo->flags & IP_F_IN_FLIGHT))
+ free(__ipo);
goto restart;
}
}
sfree(log);
}
-unsigned long hist_sum(int j, int stride, unsigned int *io_u_plat,
- unsigned int *io_u_plat_last)
+uint64_t hist_sum(int j, int stride, uint64_t *io_u_plat,
+ uint64_t *io_u_plat_last)
{
- unsigned long sum;
+ uint64_t sum;
int k;
if (io_u_plat_last) {
int log_offset;
uint64_t i, j, nr_samples;
struct io_u_plat_entry *entry, *entry_before;
- unsigned int *io_u_plat;
- unsigned int *io_u_plat_before;
+ uint64_t *io_u_plat;
+ uint64_t *io_u_plat_before;
int stride = 1 << hist_coarseness;
fprintf(f, "%lu, %u, %u, ", (unsigned long) s->time,
io_sample_ddir(s), s->bs);
for (j = 0; j < FIO_IO_U_PLAT_NR - stride; j += stride) {
- fprintf(f, "%lu, ", hist_sum(j, stride, io_u_plat,
- io_u_plat_before));
+ fprintf(f, "%llu, ", (unsigned long long)
+ hist_sum(j, stride, io_u_plat, io_u_plat_before));
}
- fprintf(f, "%lu\n", (unsigned long)
+ fprintf(f, "%llu\n", (unsigned long long)
hist_sum(FIO_IO_U_PLAT_NR - stride, stride, io_u_plat,
io_u_plat_before));
struct iolog_compress ic;
z_stream stream;
struct stat sb;
- ssize_t ret;
+ size_t ret;
size_t total;
void *buf;
FILE *f;
ic.seq = 1;
ret = fread(ic.buf, ic.len, 1, f);
- if (ret < 0) {
+ if (ret == 0 && ferror(f)) {
perror("fread");
fclose(f);
free(buf);
return 1;
- } else if (ret != 1) {
+ } else if (ferror(f) || (!feof(f) && ret != 1)) {
log_err("fio: short read on reading log\n");
fclose(f);
free(buf);
#ifdef CONFIG_ZLIB
-static bool warned_on_drop;
-
static void iolog_put_deferred(struct io_log *log, void *ptr)
{
if (!ptr)
if (log->deferred < IOLOG_MAX_DEFER) {
log->deferred_items[log->deferred] = ptr;
log->deferred++;
- } else if (!warned_on_drop) {
+ } else if (!fio_did_warn(FIO_WARN_IOLOG_DROP))
log_err("fio: had to drop log entry free\n");
- warned_on_drop = true;
- }
pthread_mutex_unlock(&log->deferred_free_lock);
}