X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=blktrace.c;h=ef9ce6bffd8686e1e5f5e7bf4f00828490495c2c;hb=79201772c8091386077bf3bdc31f53211a0e020d;hp=1faa83bf0332f953b9eec619006c3ecf729c2478;hpb=f36bd1341690e0c63718c32465e173874bf56727;p=fio.git diff --git a/blktrace.c b/blktrace.c index 1faa83bf..ef9ce6bf 100644 --- a/blktrace.c +++ b/blktrace.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "flist.h" #include "fio.h" @@ -13,6 +14,12 @@ #include "blktrace_api.h" #include "oslib/linux-dev-lookup.h" +struct file_cache { + unsigned int maj; + unsigned int min; + unsigned int fileno; +}; + /* * Just discard the pdu by seeking past it. */ @@ -87,28 +94,28 @@ static void trace_add_open_close_event(struct thread_data *td, int fileno, enum flist_add_tail(&ipo->list, &td->io_log_list); } -static int trace_add_file(struct thread_data *td, __u32 device) +static int trace_add_file(struct thread_data *td, __u32 device, + struct file_cache *cache) { - static unsigned int last_maj, last_min, last_fileno; unsigned int maj = FMAJOR(device); unsigned int min = FMINOR(device); struct fio_file *f; char dev[256]; unsigned int i; - if (last_maj == maj && last_min == min) - return last_fileno; + if (cache->maj == maj && cache->min == min) + return cache->fileno; - last_maj = maj; - last_min = min; + cache->maj = maj; + cache->min = min; /* * check for this file in our list */ for_each_file(td, f, i) if (f->major == maj && f->minor == min) { - last_fileno = f->fileno; - return last_fileno; + cache->fileno = f->fileno; + return cache->fileno; } strcpy(dev, "/dev"); @@ -128,10 +135,10 @@ static int trace_add_file(struct thread_data *td, __u32 device) td->files[fileno]->major = maj; td->files[fileno]->minor = min; trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE); - last_fileno = fileno; + cache->fileno = fileno; } - return last_fileno; + return cache->fileno; } static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t) @@ -195,7 +202,8 @@ static bool handle_trace_notify(struct blk_io_trace *t) static bool handle_trace_discard(struct thread_data *td, struct blk_io_trace *t, unsigned long long ttime, - unsigned long *ios, unsigned long long *bs) + unsigned long *ios, unsigned long long *bs, + struct file_cache *cache) { struct io_piece *ipo; int fileno; @@ -205,7 +213,7 @@ static bool handle_trace_discard(struct thread_data *td, ipo = calloc(1, sizeof(*ipo)); init_ipo(ipo); - fileno = trace_add_file(td, t->device); + fileno = trace_add_file(td, t->device, cache); ios[DDIR_TRIM]++; if (t->bytes > bs[DDIR_TRIM]) @@ -238,12 +246,12 @@ static void dump_trace(struct blk_io_trace *t) static bool handle_trace_fs(struct thread_data *td, struct blk_io_trace *t, unsigned long long ttime, unsigned long *ios, - unsigned long long *bs) + unsigned long long *bs, struct file_cache *cache) { int rw; int fileno; - fileno = trace_add_file(td, t->device); + fileno = trace_add_file(td, t->device, cache); rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0; @@ -271,7 +279,8 @@ static bool handle_trace_fs(struct thread_data *td, struct blk_io_trace *t, } static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t, - unsigned long long ttime, unsigned long *ios) + unsigned long long ttime, unsigned long *ios, + struct file_cache *cache) { struct io_piece *ipo; int fileno; @@ -281,7 +290,7 @@ static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t, ipo = calloc(1, sizeof(*ipo)); init_ipo(ipo); - fileno = trace_add_file(td, t->device); + fileno = trace_add_file(td, t->device, cache); ipo->delay = ttime / 1000; ipo->ddir = DDIR_SYNC; @@ -289,6 +298,10 @@ static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t, ios[DDIR_SYNC]++; dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay); + + if (!(td->flags & TD_F_SYNCS)) + td->flags |= TD_F_SYNCS; + queue_io_piece(td, ipo); return true; } @@ -298,28 +311,18 @@ static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t, * due to internal workings of the block layer. */ static bool queue_trace(struct thread_data *td, struct blk_io_trace *t, - unsigned long *ios, unsigned long long *bs) + unsigned long *ios, unsigned long long *bs, + struct file_cache *cache) { - static unsigned long long last_ttime; + unsigned long long *last_ttime = &td->io_log_last_ttime; unsigned long long delay = 0; if ((t->action & 0xffff) != __BLK_TA_QUEUE) return false; if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) { - if (!last_ttime || td->o.no_stall || t->time < last_ttime) - delay = 0; - else if (td->o.replay_time_scale == 100) - delay = t->time - last_ttime; - else { - double tmp = t->time - last_ttime; - double scale; - - scale = (double) 100.0 / (double) td->o.replay_time_scale; - tmp *= scale; - delay = tmp; - } - last_ttime = t->time; + delay = delay_since_ttime(td, t->time); + *last_ttime = t->time; } t_bytes_align(&td->o, t); @@ -327,11 +330,11 @@ static bool queue_trace(struct thread_data *td, struct blk_io_trace *t, if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY)) return handle_trace_notify(t); else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD)) - return handle_trace_discard(td, t, delay, ios, bs); + return handle_trace_discard(td, t, delay, ios, bs, cache); else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH)) - return handle_trace_flush(td, t, delay, ios); + return handle_trace_flush(td, t, delay, ios, cache); else - return handle_trace_fs(td, t, delay, ios, bs); + return handle_trace_fs(td, t, delay, ios, bs, cache); } static void byteswap_trace(struct blk_io_trace *t) @@ -409,6 +412,7 @@ bool init_blktrace_read(struct thread_data *td, const char *filename, int need_s goto err; } td->io_log_blktrace_swap = need_swap; + td->io_log_last_ttime = 0; td->o.size = 0; free_release_files(td); @@ -439,6 +443,10 @@ err: bool read_blktrace(struct thread_data* td) { struct blk_io_trace t; + struct file_cache cache = { + .maj = ~0U, + .min = ~0U, + }; unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { }; unsigned long long rw_bs[DDIR_RWDIR_CNT] = { }; unsigned long skipped_writes; @@ -502,7 +510,7 @@ bool read_blktrace(struct thread_data* td) } } - if (!queue_trace(td, &t, ios, rw_bs)) + if (!queue_trace(td, &t, ios, rw_bs, &cache)) continue; if (td->o.read_iolog_chunked) { @@ -538,7 +546,8 @@ bool read_blktrace(struct thread_data* td) td->o.max_bs[DDIR_TRIM] = max(td->o.max_bs[DDIR_TRIM], rw_bs[DDIR_TRIM]); io_u_quiesce(td); free_io_mem(td); - init_io_u_buffers(td); + if (init_io_u_buffers(td)) + return false; } return true; }