X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=blktrace.c;h=36a7180930d0f23bf036922820a7901f165a15c9;hp=8ba538ae8abc57dc1093df9273b2a894aae81cd8;hb=d22e2fc8834280e5ee789e2a1e2b23ebbbeabb7e;hpb=f12b323f5d5ca40cd966ebbcfbbfcdf0a1fc229e diff --git a/blktrace.c b/blktrace.c index 8ba538ae..36a71809 100644 --- a/blktrace.c +++ b/blktrace.c @@ -3,12 +3,16 @@ */ #include #include +#include +#include -#include "list.h" +#include "flist.h" #include "fio.h" +#include "blktrace.h" #include "blktrace_api.h" +#include "oslib/linux-dev-lookup.h" -#define TRACE_FIFO_SIZE 65536 +#define TRACE_FIFO_SIZE 8192 /* * fifo refill frontend, to avoid reading data in trace sized bites @@ -32,6 +36,7 @@ static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd) if (ret > 0) ret = fifo_put(fifo, buf, ret); + dprint(FD_BLKTRACE, "refill: filled %d bytes\n", ret); return ret; } @@ -60,6 +65,7 @@ static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd, if (t->pdu_len == 0) return 0; + dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len); return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len); } @@ -67,56 +73,296 @@ static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd, * Check if this is a blktrace binary data file. We read a single trace * into memory and check for the magic signature. */ -int is_blktrace(const char *filename) +bool is_blktrace(const char *filename, int *need_swap) { struct blk_io_trace t; int fd, ret; fd = open(filename, O_RDONLY); - if (fd < 0) { - perror("open blktrace"); - return 0; - } + if (fd < 0) + return false; ret = read(fd, &t, sizeof(t)); close(fd); if (ret < 0) { perror("read blktrace"); - return 0; + return false; } else if (ret != sizeof(t)) { log_err("fio: short read on blktrace file\n"); - return 0; + return false; } - if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) + if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) { + *need_swap = 0; + return true; + } + + /* + * Maybe it needs to be endian swapped... + */ + t.magic = fio_swap32(t.magic); + if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) { + *need_swap = 1; + return true; + } + + return false; +} + +#define FMINORBITS 20 +#define FMINORMASK ((1U << FMINORBITS) - 1) +#define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS)) +#define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK)) + +static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action) +{ + struct io_piece *ipo; + + ipo = calloc(1, sizeof(*ipo)); + init_ipo(ipo); + + ipo->ddir = DDIR_INVAL; + ipo->fileno = fileno; + ipo->file_action = action; + flist_add_tail(&ipo->list, &td->io_log_list); +} + +static int get_dev_blocksize(const char *dev, unsigned int *bs) +{ + int fd; + + fd = open(dev, O_RDONLY); + if (fd < 0) + return 1; + + if (ioctl(fd, BLKSSZGET, bs) < 0) { + close(fd); return 1; + } + close(fd); return 0; } +static int trace_add_file(struct thread_data *td, __u32 device, + unsigned int *bs) +{ + static unsigned int last_maj, last_min, last_fileno, last_bs; + unsigned int maj = FMAJOR(device); + unsigned int min = FMINOR(device); + struct fio_file *f; + unsigned int i; + char dev[256]; + + if (last_maj == maj && last_min == min) { + *bs = last_bs; + return last_fileno; + } + + last_maj = maj; + last_min = min; + + /* + * check for this file in our list + */ + for_each_file(td, f, i) { + if (f->major == maj && f->minor == min) { + last_fileno = f->fileno; + last_bs = f->bs; + goto out; + } + } + + strcpy(dev, "/dev"); + if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) { + unsigned int this_bs; + int fileno; + + if (td->o.replay_redirect) + dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden" + " with: %s\n", maj, min, + td->o.replay_redirect); + else + dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min); + + dprint(FD_BLKTRACE, "add devices %s\n", dev); + fileno = add_file_exclusive(td, dev); + + if (get_dev_blocksize(dev, &this_bs)) + this_bs = 512; + + td->o.open_files++; + td->files[fileno]->major = maj; + td->files[fileno]->minor = min; + td->files[fileno]->bs = this_bs; + trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE); + + last_fileno = fileno; + last_bs = this_bs; + } + +out: + *bs = last_bs; + return last_fileno; +} + +static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t) +{ + if (!o->replay_align) + return; + + t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1); +} + /* * Store blk_io_trace data in an ipo for later retrieval. */ static void store_ipo(struct thread_data *td, unsigned long long offset, - unsigned int bytes, int rw, unsigned long long ttime) + unsigned int bytes, int rw, unsigned long long ttime, + int fileno, unsigned int bs) { - struct io_piece *ipo = malloc(sizeof(*ipo)); + struct io_piece *ipo; - memset(ipo, 0, sizeof(*ipo)); - INIT_LIST_HEAD(&ipo->list); - /* - * the 512 is wrong here, it should be the hardware sector size... - */ - ipo->offset = offset * 512; + ipo = calloc(1, sizeof(*ipo)); + init_ipo(ipo); + + ipo->offset = offset * bs; + if (td->o.replay_scale) + ipo->offset = ipo->offset / td->o.replay_scale; + ipo_bytes_align(td->o.replay_align, ipo); ipo->len = bytes; ipo->delay = ttime / 1000; if (rw) ipo->ddir = DDIR_WRITE; else ipo->ddir = DDIR_READ; + ipo->fileno = fileno; + + dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n", + ipo->ddir, ipo->offset, + ipo->len, ipo->delay); + queue_io_piece(td, ipo); +} + +static void handle_trace_notify(struct blk_io_trace *t) +{ + switch (t->action) { + case BLK_TN_PROCESS: + dprint(FD_BLKTRACE, "got process notify: %x, %d\n", + t->action, t->pid); + break; + case BLK_TN_TIMESTAMP: + dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n", + t->action, t->pid); + break; + case BLK_TN_MESSAGE: + break; + default: + dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action); + break; + } +} + +static void handle_trace_discard(struct thread_data *td, + struct blk_io_trace *t, + unsigned long long ttime, + unsigned long *ios, unsigned int *rw_bs) +{ + struct io_piece *ipo; + unsigned int bs; + int fileno; + + if (td->o.replay_skip & (1u << DDIR_TRIM)) + return; + + ipo = calloc(1, sizeof(*ipo)); + init_ipo(ipo); + fileno = trace_add_file(td, t->device, &bs); + + ios[DDIR_TRIM]++; + if (t->bytes > rw_bs[DDIR_TRIM]) + rw_bs[DDIR_TRIM] = t->bytes; + + td->o.size += t->bytes; + + INIT_FLIST_HEAD(&ipo->list); + + ipo->offset = t->sector * bs; + if (td->o.replay_scale) + ipo->offset = ipo->offset / td->o.replay_scale; + ipo_bytes_align(td->o.replay_align, ipo); + ipo->len = t->bytes; + ipo->delay = ttime / 1000; + ipo->ddir = DDIR_TRIM; + ipo->fileno = fileno; + + dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n", + ipo->offset, ipo->len, + ipo->delay); + queue_io_piece(td, ipo); +} + +static void dump_trace(struct blk_io_trace *t) +{ + log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action); +} + +static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t, + unsigned long long ttime, unsigned long *ios, + unsigned int *rw_bs) +{ + unsigned int bs; + int rw; + int fileno; + + fileno = trace_add_file(td, t->device, &bs); + + rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0; + + if (rw) { + if (td->o.replay_skip & (1u << DDIR_WRITE)) + return; + } else { + if (td->o.replay_skip & (1u << DDIR_READ)) + return; + } + + if (!t->bytes) { + if (!fio_did_warn(FIO_WARN_BTRACE_ZERO)) + dump_trace(t); + return; + } + + if (t->bytes > rw_bs[rw]) + rw_bs[rw] = t->bytes; + + ios[rw]++; + td->o.size += t->bytes; + store_ipo(td, t->sector, t->bytes, rw, ttime, fileno, bs); +} + +static void handle_trace_flush(struct thread_data *td, struct blk_io_trace *t, + unsigned long long ttime, unsigned long *ios) +{ + struct io_piece *ipo; + unsigned int bs; + int fileno; + + if (td->o.replay_skip & (1u << DDIR_SYNC)) + return; + + ipo = calloc(1, sizeof(*ipo)); + init_ipo(ipo); + fileno = trace_add_file(td, t->device, &bs); - list_add_tail(&ipo->list, &td->io_log_list); + ipo->delay = ttime / 1000; + ipo->ddir = DDIR_SYNC; + ipo->fileno = fileno; + + ios[DDIR_SYNC]++; + dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay); + queue_io_piece(td, ipo); } /* @@ -124,60 +370,131 @@ static void store_ipo(struct thread_data *td, unsigned long long offset, * due to internal workings of the block layer. */ static void handle_trace(struct thread_data *td, struct blk_io_trace *t, - unsigned long long ttime, unsigned long *ios, - unsigned int *bs) + unsigned long *ios, unsigned int *bs) { - int rw; + static unsigned long long last_ttime; + unsigned long long delay = 0; if ((t->action & 0xffff) != __BLK_TA_QUEUE) return; - if (t->action & BLK_TC_ACT(BLK_TC_PC)) - return; - /* - * should not happen, need to look into that... - */ - if (!t->bytes) - return; + if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) { + if (!last_ttime || td->o.no_stall) + delay = 0; + else if (td->o.replay_time_scale == 100) + delay = t->time - last_ttime; + else { + double tmp = t->time - last_ttime; + double scale; + + scale = (double) 100.0 / (double) td->o.replay_time_scale; + tmp *= scale; + delay = tmp; + } + last_ttime = t->time; + } - rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0; + t_bytes_align(&td->o, t); - if (t->bytes > bs[rw]) - bs[rw] = t->bytes; + if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY)) + handle_trace_notify(t); + else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD)) + handle_trace_discard(td, t, delay, ios, bs); + else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH)) + handle_trace_flush(td, t, delay, ios); + else + handle_trace_fs(td, t, delay, ios, bs); +} - ios[rw]++; - td->o.size += t->bytes; - store_ipo(td, t->sector, t->bytes, rw, ttime); +static void byteswap_trace(struct blk_io_trace *t) +{ + t->magic = fio_swap32(t->magic); + t->sequence = fio_swap32(t->sequence); + t->time = fio_swap64(t->time); + t->sector = fio_swap64(t->sector); + t->bytes = fio_swap32(t->bytes); + t->action = fio_swap32(t->action); + t->pid = fio_swap32(t->pid); + t->device = fio_swap32(t->device); + t->cpu = fio_swap32(t->cpu); + t->error = fio_swap16(t->error); + t->pdu_len = fio_swap16(t->pdu_len); +} + +static bool t_is_write(struct blk_io_trace *t) +{ + return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0; +} + +static enum fio_ddir t_get_ddir(struct blk_io_trace *t) +{ + if (t->action & BLK_TC_ACT(BLK_TC_READ)) + return DDIR_READ; + else if (t->action & BLK_TC_ACT(BLK_TC_WRITE)) + return DDIR_WRITE; + else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD)) + return DDIR_TRIM; + + return DDIR_INVAL; +} + +static void depth_inc(struct blk_io_trace *t, int *depth) +{ + enum fio_ddir ddir; + + ddir = t_get_ddir(t); + if (ddir != DDIR_INVAL) + depth[ddir]++; +} + +static void depth_dec(struct blk_io_trace *t, int *depth) +{ + enum fio_ddir ddir; + + ddir = t_get_ddir(t); + if (ddir != DDIR_INVAL) + depth[ddir]--; +} + +static void depth_end(struct blk_io_trace *t, int *this_depth, int *depth) +{ + enum fio_ddir ddir = DDIR_INVAL; + + ddir = t_get_ddir(t); + if (ddir != DDIR_INVAL) { + depth[ddir] = max(depth[ddir], this_depth[ddir]); + this_depth[ddir] = 0; + } } /* * Load a blktrace file by reading all the blk_io_trace entries, and storing * them as io_pieces like the fio text version would do. */ -int load_blktrace(struct thread_data *td, const char *filename) +bool load_blktrace(struct thread_data *td, const char *filename, int need_swap) { - unsigned long long ttime, delay; struct blk_io_trace t; - unsigned long ios[2]; - unsigned int cpu; - unsigned int rw_bs[2]; + unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { }; + unsigned int rw_bs[DDIR_RWDIR_CNT] = { }; + unsigned long skipped_writes; struct fifo *fifo; - int fd; + int fd, i, old_state, max_depth; + struct fio_file *f; + int this_depth[DDIR_RWDIR_CNT] = { }; + int depth[DDIR_RWDIR_CNT] = { }; fd = open(filename, O_RDONLY); if (fd < 0) { td_verror(td, errno, "open blktrace file"); - return 1; + return false; } fifo = fifo_alloc(TRACE_FIFO_SIZE); - td->o.size = 0; + old_state = td_bump_runstate(td, TD_SETTING_UP); - cpu = 0; - ttime = 0; - ios[0] = ios[1] = 0; - rw_bs[0] = rw_bs[1] = 0; + td->o.size = 0; + skipped_writes = 0; do { int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t)); @@ -190,12 +507,17 @@ int load_blktrace(struct thread_data *td, const char *filename) break; } + if (need_swap) + byteswap_trace(&t); + if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) { - log_err("fio: bad magic in blktrace data: %x\n", t.magic); + log_err("fio: bad magic in blktrace data: %x\n", + t.magic); goto err; } if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) { - log_err("fio: bad blktrace version %d\n", t.magic & 0xff); + log_err("fio: bad blktrace version %d\n", + t.magic & 0xff); goto err; } ret = discard_pdu(td, fifo, fd, &t); @@ -206,25 +528,59 @@ int load_blktrace(struct thread_data *td, const char *filename) log_err("fio: discarded %d of %d\n", ret, t.pdu_len); goto err; } - if (!ttime) { - ttime = t.time; - cpu = t.cpu; + if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) { + if ((t.action & 0xffff) == __BLK_TA_QUEUE) + depth_inc(&t, this_depth); + else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) || + ((t.action & 0xffff) == __BLK_TA_FRONTMERGE)) + depth_dec(&t, this_depth); + else if ((t.action & 0xffff) == __BLK_TA_COMPLETE) + depth_end(&t, this_depth, depth); + + if (t_is_write(&t) && read_only) { + skipped_writes++; + continue; + } } - delay = 0; - if (cpu == t.cpu) - delay = t.time - ttime; - handle_trace(td, &t, delay, ios, rw_bs); - ttime = t.time; - cpu = t.cpu; + + handle_trace(td, &t, ios, rw_bs); } while (1); + for_each_file(td, f, i) + trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE); + fifo_free(fifo); close(fd); - if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) { + td_restore_runstate(td, old_state); + + if (!td->files_index) { + log_err("fio: did not find replay device(s)\n"); + return false; + } + + /* + * For stacked devices, we don't always get a COMPLETE event so + * the depth grows to insane values. Limit it to something sane(r). + */ + max_depth = 0; + for (i = 0; i < DDIR_RWDIR_CNT; i++) { + if (depth[i] > 1024) + depth[i] = 1024; + else if (!depth[i] && ios[i]) + depth[i] = 1; + max_depth = max(depth[i], max_depth); + } + + if (skipped_writes) + log_err("fio: %s skips replay of %lu writes due to read-only\n", + td->o.name, skipped_writes); + + if (!ios[DDIR_READ] && !ios[DDIR_WRITE] && !ios[DDIR_TRIM] && + !ios[DDIR_SYNC]) { log_err("fio: found no ios in blktrace data\n"); - return 1; - } else if (ios[DDIR_READ] && !ios[DDIR_READ]) { + return false; + } else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) { td->o.td_ddir = TD_DDIR_READ; td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) { @@ -234,17 +590,26 @@ int load_blktrace(struct thread_data *td, const char *filename) td->o.td_ddir = TD_DDIR_RW; td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; + td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM]; } /* * We need to do direct/raw ios to the device, to avoid getting - * read-ahead in our way. + * read-ahead in our way. But only do so if the minimum block size + * is a multiple of 4k, otherwise we don't know if it's safe to do so. */ - td->o.odirect = 1; + if (!fio_option_is_set(&td->o, odirect) && !(td_min_bs(td) & 4095)) + td->o.odirect = 1; - return 0; + /* + * If depth wasn't manually set, use probed depth + */ + if (!fio_option_is_set(&td->o, iodepth)) + td->o.iodepth = td->o.iodepth_low = max_depth; + + return true; err: close(fd); fifo_free(fifo); - return 1; + return false; }