#include <errno.h>
#include <signal.h>
#include <locale.h>
-#include <limits.h>
+#include <libgen.h>
#include "blktrace.h"
#include "rbtree.h"
#include "jhash.h"
-static char blkparse_version[] = "0.90";
+static char blkparse_version[] = "0.99";
+
+struct skip_info {
+ unsigned long start, end;
+ struct skip_info *prev, *next;
+};
struct per_dev_info {
dev_t dev;
int backwards;
unsigned long long events;
+ unsigned long long first_reported_time;
unsigned long long last_reported_time;
unsigned long long last_read_time;
struct io_stats io_stats;
- unsigned long last_sequence;
- unsigned long skips;
-
- struct rb_root rb_last;
- unsigned long rb_last_entries;
+ unsigned long skips, nskips;
+ unsigned long long seq_skips, seq_nskips;
+ unsigned int max_depth[2];
+ unsigned int cur_depth[2];
struct rb_root rb_track;
int nfiles;
int ncpus;
+
+ unsigned long *cpu_map;
+ unsigned int cpu_map_max;
+
struct per_cpu_info *cpus;
};
static struct per_process_info *ppi_list;
static int ppi_list_entries;
-#define S_OPTS "i:o:b:stqw:f:F:vnmD:"
+#define S_OPTS "a:A:i:o:b:stqw:f:F:vVhD:"
static struct option l_opts[] = {
+ {
+ .name = "act-mask",
+ .has_arg = required_argument,
+ .flag = NULL,
+ .val = 'a'
+ },
+ {
+ .name = "set-mask",
+ .has_arg = required_argument,
+ .flag = NULL,
+ .val = 'A'
+ },
{
.name = "input",
.has_arg = required_argument,
.val = 'b'
},
{
- .name = "per program stats",
+ .name = "per-program-stats",
.has_arg = no_argument,
.flag = NULL,
.val = 's'
},
{
- .name = "track ios",
+ .name = "track-ios",
.has_arg = no_argument,
.flag = NULL,
.val = 't'
.val = 'F'
},
{
- .name = "hash by name",
+ .name = "hash-by-name",
.has_arg = no_argument,
.flag = NULL,
- .val = 'n'
+ .val = 'h'
},
{
- .name = "missing",
+ .name = "verbose",
.has_arg = no_argument,
.flag = NULL,
- .val = 'm'
+ .val = 'v'
},
{
.name = "version",
.has_arg = no_argument,
.flag = NULL,
- .val = 'v'
+ .val = 'V'
},
{
- .name = "input directory",
+ .name = "input-directory",
.has_arg = required_argument,
.flag = NULL,
.val = 'D'
},
+ {
+ .name = NULL,
+ }
};
/*
static int ndevices;
static struct per_dev_info *devices;
static char *get_dev_name(struct per_dev_info *, char *, int);
+static int trace_rb_insert_last(struct per_dev_info *, struct trace *);
FILE *ofp = NULL;
static char *output_name;
static unsigned long long genesis_time;
static unsigned long long last_allowed_time;
-static unsigned int smallest_seq_read;
static unsigned long long stopwatch_start; /* start from zero by default */
-static unsigned long long stopwatch_end = ULONG_LONG_MAX; /* "infinity" */
+static unsigned long long stopwatch_end = -1ULL; /* "infinity" */
static int per_process_stats;
+static int per_device_and_cpu_stats = 1;
static int track_ios;
static int ppi_hash_by_pid = 1;
-static int print_missing;
+static int verbose;
+static unsigned int act_mask = -1U;
+static int stats_printed;
static unsigned int t_alloc_cache;
static unsigned int bit_alloc_cache;
#define JHASH_RANDOM (0x3af5f2ee)
+#define CPUS_PER_LONG (8 * sizeof(unsigned long))
+#define CPU_IDX(cpu) ((cpu) / CPUS_PER_LONG)
+#define CPU_BIT(cpu) ((cpu) & (CPUS_PER_LONG - 1))
+
+static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
+{
+ struct per_cpu_info *cpus = pdi->cpus;
+ int ncpus = pdi->ncpus;
+ int new_count = cpu + 1;
+ int new_space, size;
+ char *new_start;
+
+ size = new_count * sizeof(struct per_cpu_info);
+ cpus = realloc(cpus, size);
+ if (!cpus) {
+ char name[20];
+ fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
+ get_dev_name(pdi, name, sizeof(name)), size);
+ exit(1);
+ }
+
+ new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
+ new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
+ memset(new_start, 0, new_space);
+
+ pdi->ncpus = new_count;
+ pdi->cpus = cpus;
+
+ for (new_count = 0; new_count < pdi->ncpus; new_count++) {
+ struct per_cpu_info *pci = &pdi->cpus[new_count];
+
+ if (!pci->fd) {
+ pci->fd = -1;
+ memset(&pci->rb_last, 0, sizeof(pci->rb_last));
+ pci->rb_last_entries = 0;
+ pci->last_sequence = -1;
+ }
+ }
+}
+
+static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
+{
+ struct per_cpu_info *pci;
+
+ if (cpu >= pdi->ncpus)
+ resize_cpu_info(pdi, cpu);
+
+ pci = &pdi->cpus[cpu];
+ pci->cpu = cpu;
+ return pci;
+}
+
+
+static int resize_devices(char *name)
+{
+ int size = (ndevices + 1) * sizeof(struct per_dev_info);
+
+ devices = realloc(devices, size);
+ if (!devices) {
+ fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
+ return 1;
+ }
+ memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
+ devices[ndevices].name = name;
+ ndevices++;
+ return 0;
+}
+
+static struct per_dev_info *get_dev_info(dev_t dev)
+{
+ struct per_dev_info *pdi;
+ int i;
+
+ for (i = 0; i < ndevices; i++) {
+ if (!devices[i].dev)
+ devices[i].dev = dev;
+ if (devices[i].dev == dev)
+ return &devices[i];
+ }
+
+ if (resize_devices(NULL))
+ return NULL;
+
+ pdi = &devices[ndevices - 1];
+ pdi->dev = dev;
+ pdi->first_reported_time = 0;
+ pdi->last_read_time = 0;
+
+ return pdi;
+}
+
+static void insert_skip(struct per_cpu_info *pci, unsigned long start,
+ unsigned long end)
+{
+ struct skip_info *sip;
+
+ for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
+ if (end == (sip->start - 1)) {
+ sip->start = start;
+ return;
+ } else if (start == (sip->end + 1)) {
+ sip->end = end;
+ return;
+ }
+ }
+
+ sip = malloc(sizeof(struct skip_info));
+ sip->start = start;
+ sip->end = end;
+ sip->prev = sip->next = NULL;
+ if (pci->skips_tail == NULL)
+ pci->skips_head = pci->skips_tail = sip;
+ else {
+ sip->prev = pci->skips_tail;
+ pci->skips_tail->next = sip;
+ pci->skips_tail = sip;
+ }
+}
+
+static void remove_sip(struct per_cpu_info *pci, struct skip_info *sip)
+{
+ if (sip->prev == NULL) {
+ if (sip->next == NULL)
+ pci->skips_head = pci->skips_tail = NULL;
+ else {
+ pci->skips_head = sip->next;
+ sip->next->prev = NULL;
+ }
+ } else if (sip->next == NULL) {
+ pci->skips_tail = sip->prev;
+ sip->prev->next = NULL;
+ } else {
+ sip->prev->next = sip->next;
+ sip->next->prev = sip->prev;
+ }
+
+ sip->prev = sip->next = NULL;
+ free(sip);
+}
+
+#define IN_SKIP(sip,seq) (((sip)->start <= (seq)) && ((seq) <= sip->end))
+static int check_current_skips(struct per_cpu_info *pci, unsigned long seq)
+{
+ struct skip_info *sip;
+
+ for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
+ if (IN_SKIP(sip, seq)) {
+ if (sip->start == seq) {
+ if (sip->end == seq)
+ remove_sip(pci, sip);
+ else
+ sip->start += 1;
+ } else if (sip->end == seq)
+ sip->end -= 1;
+ else {
+ sip->end = seq - 1;
+ insert_skip(pci, seq + 1, sip->end);
+ }
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void collect_pdi_skips(struct per_dev_info *pdi)
+{
+ struct skip_info *sip;
+ int cpu;
+
+ pdi->skips = 0;
+ pdi->seq_skips = 0;
+
+ for (cpu = 0; cpu < pdi->ncpus; cpu++) {
+ struct per_cpu_info *pci = &pdi->cpus[cpu];
+
+ for (sip = pci->skips_head; sip != NULL; sip = sip->next) {
+ pdi->skips++;
+ pdi->seq_skips += (sip->end - sip->start + 1);
+ if (verbose)
+ fprintf(stderr,"(%d,%d): skipping %lu -> %lu\n",
+ MAJOR(pdi->dev), MINOR(pdi->dev),
+ sip->start, sip->end);
+ }
+ }
+}
+
+static void cpu_mark_online(struct per_dev_info *pdi, unsigned int cpu)
+{
+ if (cpu >= pdi->cpu_map_max || !pdi->cpu_map) {
+ int new_max = (cpu + CPUS_PER_LONG) & ~(CPUS_PER_LONG - 1);
+ unsigned long *map = malloc(new_max / sizeof(long));
+
+ memset(map, 0, new_max / sizeof(long));
+
+ if (pdi->cpu_map) {
+ memcpy(map, pdi->cpu_map, pdi->cpu_map_max / sizeof(long));
+ free(pdi->cpu_map);
+ }
+
+ pdi->cpu_map = map;
+ pdi->cpu_map_max = new_max;
+ }
+
+ pdi->cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
+}
+
+static inline void cpu_mark_offline(struct per_dev_info *pdi, int cpu)
+{
+ pdi->cpu_map[CPU_IDX(cpu)] &= ~(1UL << CPU_BIT(cpu));
+}
+
+static inline int cpu_is_online(struct per_dev_info *pdi, int cpu)
+{
+ return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
+}
+
static inline int ppi_hash_pid(__u32 pid)
{
return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
return ppi;
}
-static inline int trace_rb_insert(struct trace *t, struct rb_root *root,
- int check_time)
+/*
+ * struct trace and blktrace allocation cache, we do potentially
+ * millions of mallocs for these structures while only using at most
+ * a few thousand at the time
+ */
+static inline void t_free(struct trace *t)
+{
+ if (t_alloc_cache < 1024) {
+ t->next = t_alloc_list;
+ t_alloc_list = t;
+ t_alloc_cache++;
+ } else
+ free(t);
+}
+
+static inline struct trace *t_alloc(void)
+{
+ struct trace *t = t_alloc_list;
+
+ if (t) {
+ t_alloc_list = t->next;
+ t_alloc_cache--;
+ return t;
+ }
+
+ return malloc(sizeof(*t));
+}
+
+static inline void bit_free(struct blk_io_trace *bit)
+{
+ if (bit_alloc_cache < 1024 && !bit->pdu_len) {
+ /*
+ * abuse a 64-bit field for a next pointer for the free item
+ */
+ bit->time = (__u64) (unsigned long) bit_alloc_list;
+ bit_alloc_list = (struct blk_io_trace *) bit;
+ bit_alloc_cache++;
+ } else
+ free(bit);
+}
+
+static inline struct blk_io_trace *bit_alloc(void)
+{
+ struct blk_io_trace *bit = bit_alloc_list;
+
+ if (bit) {
+ bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
+ bit->time;
+ bit_alloc_cache--;
+ return bit;
+ }
+
+ return malloc(sizeof(*bit));
+}
+
+static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
+{
+ struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
+
+ rb_erase(&t->rb_node, &pci->rb_last);
+ pci->rb_last_entries--;
+
+ bit_free(t->bit);
+ t_free(t);
+}
+
+static void put_trace(struct per_dev_info *pdi, struct trace *t)
+{
+ rb_erase(&t->rb_node, &rb_sort_root);
+ rb_sort_entries--;
+
+ trace_rb_insert_last(pdi, t);
+}
+
+static inline int trace_rb_insert(struct trace *t, struct rb_root *root)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
__t = rb_entry(parent, struct trace, rb_node);
- if (check_time) {
- if (t->bit->time < __t->bit->time) {
- p = &(*p)->rb_left;
- continue;
- } else if (t->bit->time > __t->bit->time) {
- p = &(*p)->rb_right;
- continue;
- }
- }
- if (t->bit->device < __t->bit->device)
+ if (t->bit->time < __t->bit->time)
+ p = &(*p)->rb_left;
+ else if (t->bit->time > __t->bit->time)
+ p = &(*p)->rb_right;
+ else if (t->bit->device < __t->bit->device)
p = &(*p)->rb_left;
else if (t->bit->device > __t->bit->device)
p = &(*p)->rb_right;
static inline int trace_rb_insert_sort(struct trace *t)
{
- if (!trace_rb_insert(t, &rb_sort_root, 1)) {
+ if (!trace_rb_insert(t, &rb_sort_root)) {
rb_sort_entries++;
return 0;
}
return 1;
}
-static inline int trace_rb_insert_last(struct per_dev_info *pdi,struct trace *t)
+static int trace_rb_insert_last(struct per_dev_info *pdi, struct trace *t)
{
- if (!trace_rb_insert(t, &pdi->rb_last, 1)) {
- pdi->rb_last_entries++;
- return 0;
+ struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
+
+ if (trace_rb_insert(t, &pci->rb_last))
+ return 1;
+
+ pci->rb_last_entries++;
+
+ if (pci->rb_last_entries > rb_batch * pdi->nfiles) {
+ struct rb_node *n = rb_first(&pci->rb_last);
+
+ t = rb_entry(n, struct trace, rb_node);
+ __put_trace_last(pdi, t);
}
- return 1;
+ return 0;
}
static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
while (((n = rb_next(prev)) != NULL) && max--) {
__t = rb_entry(n, struct trace, rb_node);
-
+
if (__t->bit->device == device &&
__t->bit->sequence == sequence)
return __t;
prev = n;
}
}
-
- return NULL;
-}
-static inline struct trace *trace_rb_find_sort(dev_t dev, unsigned long seq)
-{
- return trace_rb_find(dev, seq, &rb_sort_root, 1);
+ return NULL;
}
static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
+ struct per_cpu_info *pci,
unsigned long seq)
{
- return trace_rb_find(pdi->dev, seq, &pdi->rb_last, 0);
+ return trace_rb_find(pdi->dev, seq, &pci->rb_last, 0);
}
static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
if (!track_ios)
return;
- iot = __find_track(pdi, t->sector + (t->bytes >> 9));
+ iot = __find_track(pdi, t->sector + t_sec(t));
if (!iot) {
- fprintf(stderr, "merge not found for (%d,%d): %llu\n",
- MAJOR(pdi->dev), MINOR(pdi->dev),
- (unsigned long long) t->sector + (t->bytes >> 9));
+ if (verbose)
+ fprintf(stderr, "merge not found for (%d,%d): %llu\n",
+ MAJOR(pdi->dev), MINOR(pdi->dev),
+ (unsigned long long) t->sector + t_sec(t));
return;
}
rb_erase(&iot->rb_node, &pdi->rb_track);
- iot->sector -= t->bytes >> 9;
+ iot->sector -= t_sec(t);
track_rb_insert(pdi, iot);
}
iot = __find_track(pdi, t->sector);
if (!iot) {
- fprintf(stderr, "issue not found for (%d,%d): %llu\n",
- MAJOR(pdi->dev), MINOR(pdi->dev),
- (unsigned long long) t->sector);
+ if (verbose)
+ fprintf(stderr, "issue not found for (%d,%d): %llu\n",
+ MAJOR(pdi->dev), MINOR(pdi->dev),
+ (unsigned long long) t->sector);
return -1;
}
iot = __find_track(pdi, t->sector);
if (!iot) {
- fprintf(stderr, "complete not found for (%d,%d): %llu\n",
- MAJOR(pdi->dev), MINOR(pdi->dev),
- (unsigned long long) t->sector);
+ if (verbose)
+ fprintf(stderr,"complete not found for (%d,%d): %llu\n",
+ MAJOR(pdi->dev), MINOR(pdi->dev),
+ (unsigned long long) t->sector);
return -1;
}
return &ppi->io_stats;
}
-static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
-{
- struct per_cpu_info *cpus = pdi->cpus;
- int ncpus = pdi->ncpus;
- int new_count = cpu + 1;
- int new_space, size;
- char *new_start;
-
- size = new_count * sizeof(struct per_cpu_info);
- cpus = realloc(cpus, size);
- if (!cpus) {
- char name[20];
- fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
- get_dev_name(pdi, name, sizeof(name)), size);
- exit(1);
- }
-
- new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
- new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
- memset(new_start, 0, new_space);
-
- pdi->ncpus = new_count;
- pdi->cpus = cpus;
-}
-
-static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
-{
- struct per_cpu_info *pci;
-
- if (cpu >= pdi->ncpus)
- resize_cpu_info(pdi, cpu);
-
- pci = &pdi->cpus[cpu];
- pci->cpu = cpu;
- return pci;
-}
-
-
-static int resize_devices(char *name)
-{
- int size = (ndevices + 1) * sizeof(struct per_dev_info);
-
- devices = realloc(devices, size);
- if (!devices) {
- fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
- return 1;
- }
- memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
- devices[ndevices].name = name;
- ndevices++;
- return 0;
-}
-
-static struct per_dev_info *get_dev_info(dev_t dev)
-{
- struct per_dev_info *pdi;
- int i;
-
- for (i = 0; i < ndevices; i++) {
- if (!devices[i].dev)
- devices[i].dev = dev;
- if (devices[i].dev == dev)
- return &devices[i];
- }
-
- if (resize_devices(NULL))
- return NULL;
-
- pdi = &devices[ndevices - 1];
- pdi->dev = dev;
- pdi->last_sequence = -1;
- pdi->last_read_time = 0;
- memset(&pdi->rb_last, 0, sizeof(pdi->rb_last));
- pdi->rb_last_entries = 0;
- return pdi;
-}
-
static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
{
if (pdi->name)
{
if (rw) {
ios->mwrites++;
- ios->qwrite_kb += t->bytes >> 10;
+ ios->qwrite_kb += t_kb(t);
} else {
ios->mreads++;
- ios->qread_kb += t->bytes >> 10;
+ ios->qread_kb += t_kb(t);
}
}
{
if (rw) {
ios->qwrites++;
- ios->qwrite_kb += t->bytes >> 10;
+ ios->qwrite_kb += t_kb(t);
} else {
ios->qreads++;
- ios->qread_kb += t->bytes >> 10;
+ ios->qread_kb += t_kb(t);
}
}
}
}
-static inline void __account_c(struct io_stats *ios, int rw, unsigned int bytes)
+static inline void __account_c(struct io_stats *ios, int rw, int bytes)
{
if (rw) {
ios->cwrites++;
}
}
+static inline void __account_requeue(struct io_stats *ios,
+ struct blk_io_trace *t, int rw)
+{
+ if (rw) {
+ ios->wrqueue++;
+ ios->iwrite_kb -= t_kb(t);
+ } else {
+ ios->rrqueue++;
+ ios->iread_kb -= t_kb(t);
+ }
+}
+
+static inline void account_requeue(struct blk_io_trace *t,
+ struct per_cpu_info *pci, int rw)
+{
+ __account_requeue(&pci->io_stats, t, rw);
+
+ if (per_process_stats) {
+ struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
+
+ __account_requeue(ios, t, rw);
+ }
+}
+
static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
struct blk_io_trace *t, char *act)
{
static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
struct per_cpu_info *pci)
{
- int w = t->action & BLK_TC_ACT(BLK_TC_WRITE);
+ int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
int act = t->action & 0xffff;
switch (act) {
log_generic(pci, t, "S");
break;
case __BLK_TA_REQUEUE:
- account_c(t, pci, w, -t->bytes);
+ pdi->cur_depth[w]--;
+ account_requeue(t, pci, w);
log_queue(pci, t, "R");
break;
case __BLK_TA_ISSUE:
account_issue(t, pci, w);
+ pdi->cur_depth[w]++;
+ if (pdi->cur_depth[w] > pdi->max_depth[w])
+ pdi->max_depth[w] = pdi->cur_depth[w];
log_issue(pdi, pci, t, "D");
break;
case __BLK_TA_COMPLETE:
+ pdi->cur_depth[w]--;
account_c(t, pci, w, t->bytes);
log_complete(pdi, pci, t, "C");
break;
case __BLK_TA_BOUNCE:
log_generic(pci, t, "B");
break;
+ case __BLK_TA_REMAP:
+ log_generic(pci, t, "A");
+ break;
default:
fprintf(stderr, "Bad fs action %x\n", t->action);
break;
else
dump_trace_fs(t, pdi, pci);
+ if (!pdi->events)
+ pdi->first_reported_time = t->time;
+
pdi->events++;
}
-static void dump_io_stats(struct io_stats *ios, char *msg)
+/*
+ * print in a proper way, not too small and not too big. if more than
+ * 1000,000K, turn into M and so on
+ */
+static char *size_cnv(char *dst, unsigned long long num, int in_kb)
+{
+ char suff[] = { '\0', 'K', 'M', 'G', 'P' };
+ unsigned int i = 0;
+
+ if (in_kb)
+ i++;
+
+ while (num > 1000 * 1000ULL && (i < sizeof(suff) - 1)) {
+ i++;
+ num /= 1000;
+ }
+
+ sprintf(dst, "%'8Lu%c", num, suff[i]);
+ return dst;
+}
+
+static void dump_io_stats(struct per_dev_info *pdi, struct io_stats *ios,
+ char *msg)
{
+ static char x[256], y[256];
+
fprintf(ofp, "%s\n", msg);
- fprintf(ofp, " Reads Queued: %'8lu, %'8LuKiB\t", ios->qreads, ios->qread_kb);
- fprintf(ofp, " Writes Queued: %'8lu, %'8LuKiB\n", ios->qwrites,ios->qwrite_kb);
+ fprintf(ofp, " Reads Queued: %s, %siB\t", size_cnv(x, ios->qreads, 0), size_cnv(y, ios->qread_kb, 1));
+ fprintf(ofp, " Writes Queued: %s, %siB\n", size_cnv(x, ios->qwrites, 0), size_cnv(y, ios->qwrite_kb, 1));
- fprintf(ofp, " Read Dispatches: %'8lu, %'8LuKiB\t", ios->ireads, ios->iread_kb);
- fprintf(ofp, " Write Dispatches: %'8lu, %'8LuKiB\n", ios->iwrites,ios->iwrite_kb);
- fprintf(ofp, " Reads Completed: %'8lu, %'8LuKiB\t", ios->creads, ios->cread_kb);
- fprintf(ofp, " Writes Completed: %'8lu, %'8LuKiB\n", ios->cwrites,ios->cwrite_kb);
+ fprintf(ofp, " Read Dispatches: %s, %siB\t", size_cnv(x, ios->ireads, 0), size_cnv(y, ios->iread_kb, 1));
+ fprintf(ofp, " Write Dispatches: %s, %siB\n", size_cnv(x, ios->iwrites, 0), size_cnv(y, ios->iwrite_kb, 1));
+ fprintf(ofp, " Reads Requeued: %s\t\t", size_cnv(x, ios->rrqueue, 0));
+ fprintf(ofp, " Writes Requeued: %s\n", size_cnv(x, ios->wrqueue, 0));
+ fprintf(ofp, " Reads Completed: %s, %siB\t", size_cnv(x, ios->creads, 0), size_cnv(y, ios->cread_kb, 1));
+ fprintf(ofp, " Writes Completed: %s, %siB\n", size_cnv(x, ios->cwrites, 0), size_cnv(y, ios->cwrite_kb, 1));
fprintf(ofp, " Read Merges: %'8lu%8c\t", ios->mreads, ' ');
fprintf(ofp, " Write Merges: %'8lu\n", ios->mwrites);
+ if (pdi) {
+ fprintf(ofp, " Read depth: %'8u%8c\t", pdi->max_depth[0], ' ');
+ fprintf(ofp, " Write depth: %'8u\n", pdi->max_depth[1]);
+ }
fprintf(ofp, " IO unplugs: %'8lu%8c\t", ios->io_unplugs, ' ');
fprintf(ofp, " Timer unplugs: %'8lu\n", ios->timer_unplugs);
}
else
sprintf(name, "%s (%u)", ppi->name, ppi->pid);
- dump_io_stats(&ppi->io_stats, name);
+ dump_io_stats(NULL, &ppi->io_stats, name);
dump_wait_stats(ppi);
ppi = ppi->list_next;
}
struct per_dev_info *pdi;
struct per_cpu_info *pci;
struct io_stats total, *ios;
+ unsigned long long rrate, wrate, msec;
int i, j, pci_events;
char line[3 + 8/*cpu*/ + 2 + 32/*dev*/ + 3];
char name[32];
total.mwrites += ios->mwrites;
total.ireads += ios->ireads;
total.iwrites += ios->iwrites;
+ total.rrqueue += ios->rrqueue;
+ total.wrqueue += ios->wrqueue;
total.qread_kb += ios->qread_kb;
total.qwrite_kb += ios->qwrite_kb;
total.cread_kb += ios->cread_kb;
snprintf(line, sizeof(line) - 1, "CPU%d (%s):",
j, get_dev_name(pdi, name, sizeof(name)));
- dump_io_stats(ios, line);
+ dump_io_stats(pdi, ios, line);
pci_events++;
}
fprintf(ofp, "\n");
snprintf(line, sizeof(line) - 1, "Total (%s):",
get_dev_name(pdi, name, sizeof(name)));
- dump_io_stats(&total, line);
+ dump_io_stats(NULL, &total, line);
}
- fprintf(ofp, "\nEvents (%s): %'Lu entries, %'lu skips\n",
- get_dev_name(pdi, line, sizeof(line)), pdi->events,
- pdi->skips);
- }
-}
-
-/*
- * struct trace and blktrace allocation cache, we do potentially
- * millions of mallocs for these structures while only using at most
- * a few thousand at the time
- */
-static inline void t_free(struct trace *t)
-{
- if (t_alloc_cache < 1024) {
- t->next = t_alloc_list;
- t_alloc_list = t;
- t_alloc_cache++;
- } else
- free(t);
-}
-
-static inline struct trace *t_alloc(void)
-{
- struct trace *t = t_alloc_list;
-
- if (t) {
- t_alloc_list = t->next;
- t_alloc_cache--;
- return t;
- }
-
- return malloc(sizeof(*t));
-}
-
-static inline void bit_free(struct blk_io_trace *bit)
-{
- if (bit_alloc_cache < 1024) {
- /*
- * abuse a 64-bit field for a next pointer for the free item
- */
- bit->time = (__u64) (unsigned long) bit_alloc_list;
- bit_alloc_list = (struct blk_io_trace *) bit;
- bit_alloc_cache++;
- } else
- free(bit);
-}
+ wrate = rrate = 0;
+ msec = (pdi->last_reported_time - pdi->first_reported_time) / 1000000;
+ if (msec) {
+ rrate = 1000 * total.cread_kb / msec;
+ wrate = 1000 * total.cwrite_kb / msec;
+ }
-static inline struct blk_io_trace *bit_alloc(void)
-{
- struct blk_io_trace *bit = bit_alloc_list;
+ fprintf(ofp, "\nThroughput (R/W): %'LuKiB/s / %'LuKiB/s\n",
+ rrate, wrate);
+ fprintf(ofp, "Events (%s): %'Lu entries\n",
+ get_dev_name(pdi, line, sizeof(line)), pdi->events);
- if (bit) {
- bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
- bit->time;
- bit_alloc_cache--;
- return bit;
+ collect_pdi_skips(pdi);
+ fprintf(ofp, "Skips: %'lu forward (%'llu - %5.1lf%%)\n",
+ pdi->skips,pdi->seq_skips,
+ 100.0 * ((double)pdi->seq_skips /
+ (double)(pdi->events + pdi->seq_skips)));
}
-
- return malloc(sizeof(*bit));
}
static void find_genesis(void)
*/
static int sort_entries(unsigned long long *youngest)
{
+ struct per_dev_info *pdi = NULL;
+ struct per_cpu_info *pci = NULL;
struct trace *t;
if (!genesis_time)
if (bit->time < *youngest || !*youngest)
*youngest = bit->time;
+ if (!pdi || pdi->dev != bit->device) {
+ pdi = get_dev_info(bit->device);
+ pci = NULL;
+ }
+
+ if (!pci || pci->cpu != bit->cpu)
+ pci = get_cpu_info(pdi, bit->cpu);
+
+ if (bit->sequence < pci->smallest_seq_read)
+ pci->smallest_seq_read = bit->sequence;
+
if (check_stopwatch(bit)) {
bit_free(bit);
t_free(t);
if (trace_rb_insert_sort(t))
return -1;
-
- if (bit->sequence < smallest_seq_read)
- smallest_seq_read = bit->sequence;
}
return 0;
}
-static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
+/*
+ * to continue, we must have traces from all online cpus in the tree
+ */
+static int check_cpu_map(struct per_dev_info *pdi)
{
- rb_erase(&t->rb_node, &pdi->rb_last);
- pdi->rb_last_entries--;
+ unsigned long *cpu_map;
+ struct rb_node *n;
+ struct trace *__t;
+ unsigned int i;
+ int ret, cpu;
- bit_free(t->bit);
- t_free(t);
+ /*
+ * create a map of the cpus we have traces for
+ */
+ cpu_map = malloc(pdi->cpu_map_max / sizeof(long));
+ n = rb_first(&rb_sort_root);
+ while (n) {
+ __t = rb_entry(n, struct trace, rb_node);
+ cpu = __t->bit->cpu;
+
+ cpu_map[CPU_IDX(cpu)] |= (1UL << CPU_BIT(cpu));
+ n = rb_next(n);
+ }
+
+ /*
+ * we can't continue if pdi->cpu_map has entries set that we don't
+ * have in the sort rbtree. the opposite is not a problem, though
+ */
+ ret = 0;
+ for (i = 0; i < pdi->cpu_map_max / CPUS_PER_LONG; i++) {
+ if (pdi->cpu_map[i] & ~(cpu_map[i])) {
+ ret = 1;
+ break;
+ }
+ }
+
+ free(cpu_map);
+ return ret;
}
-static void put_trace(struct per_dev_info *pdi, struct trace *t)
+static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
{
- rb_erase(&t->rb_node, &rb_sort_root);
- rb_sort_entries--;
+ struct blk_io_trace *bit = t->bit;
+ unsigned long expected_sequence;
+ struct per_cpu_info *pci;
+ struct trace *__t;
- trace_rb_insert_last(pdi, t);
+ pci = get_cpu_info(pdi, bit->cpu);
+ expected_sequence = pci->last_sequence + 1;
- if (pdi->rb_last_entries > rb_batch * pdi->nfiles) {
- struct rb_node *n = rb_first(&pdi->rb_last);
+ if (!expected_sequence) {
+ /*
+ * 1 should be the first entry, just allow it
+ */
+ if (bit->sequence == 1)
+ return 0;
+ if (bit->sequence == pci->smallest_seq_read)
+ return 0;
- t = rb_entry(n, struct trace, rb_node);
- __put_trace_last(pdi, t);
+ return check_cpu_map(pdi);
}
-}
-
-static int check_sequence(struct per_dev_info *pdi, struct blk_io_trace *bit)
-{
- unsigned long expected_sequence = pdi->last_sequence + 1;
- struct trace *t;
-
- /*
- * first entry, always ok
- */
- if (!expected_sequence)
- return 0;
if (bit->sequence == expected_sequence)
return 0;
* we may not have seen that sequence yet. if we are not doing
* the final run, break and wait for more entries.
*/
- if (expected_sequence < smallest_seq_read) {
- t = trace_rb_find_last(pdi, expected_sequence);
- if (!t)
+ if (expected_sequence < pci->smallest_seq_read) {
+ __t = trace_rb_find_last(pdi, pci, expected_sequence);
+ if (!__t)
goto skip;
- __put_trace_last(pdi, t);
+ __put_trace_last(pdi, __t);
return 0;
+ } else if (!force) {
+ return 1;
} else {
skip:
- if (print_missing) {
- fprintf(stderr, "(%d,%d): skipping %lu -> %u\n",
- MAJOR(pdi->dev), MINOR(pdi->dev),
- pdi->last_sequence, bit->sequence);
- }
- pdi->skips++;
+ if (check_current_skips(pci, bit->sequence))
+ return 0;
+
+ if (expected_sequence < bit->sequence)
+ insert_skip(pci, expected_sequence, bit->sequence - 1);
return 0;
}
}
t = rb_entry(n, struct trace, rb_node);
bit = t->bit;
- if (!pdi || pdi->dev != bit->device)
+ if (!pdi || pdi->dev != bit->device) {
pdi = get_dev_info(bit->device);
+ pci = NULL;
+ }
if (!pdi) {
fprintf(stderr, "Unknown device ID? (%d,%d)\n",
break;
}
- if (!force) {
- if (check_sequence(pdi, bit))
- break;
-
- if (bit->time > last_allowed_time)
- break;
- }
+ if (check_sequence(pdi, t, force))
+ break;
- pdi->last_sequence = bit->sequence;
+ if (!force && bit->time > last_allowed_time)
+ break;
check_time(pdi, bit);
if (!pci || pci->cpu != bit->cpu)
pci = get_cpu_info(pdi, bit->cpu);
- dump_trace(bit, pci, pdi);
+ pci->last_sequence = bit->sequence;
+
+ pci->nelems++;
+
+ if (bit->action & (act_mask << BLK_TC_SHIFT))
+ dump_trace(bit, pci, pdi);
put_trace(pdi, t);
}
}
-static int read_data(int fd, void *buffer, int bytes, int block)
+static int read_data(int fd, void *buffer, int bytes, int block, int *fdblock)
{
int ret, bytes_left, fl;
void *p;
- fl = fcntl(fd, F_GETFL);
+ if (block != *fdblock) {
+ fl = fcntl(fd, F_GETFL);
- if (!block)
- fcntl(fd, F_SETFL, fl | O_NONBLOCK);
- else
- fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
+ if (!block) {
+ *fdblock = 0;
+ fcntl(fd, F_SETFL, fl | O_NONBLOCK);
+ } else {
+ *fdblock = 1;
+ fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
+ }
+ }
bytes_left = bytes;
p = buffer;
if (!ret)
return 1;
else if (ret < 0) {
- if (errno != EAGAIN)
+ if (errno != EAGAIN) {
perror("read");
+ return -1;
+ }
- return -1;
+ /*
+ * never do partial reads. we can return if we
+ * didn't read anything and we should not block,
+ * otherwise wait for data
+ */
+ if ((bytes_left == bytes) && !block)
+ return 1;
+
+ usleep(10);
+ continue;
} else {
p += ret;
bytes_left -= ret;
return 0;
}
-static int read_events(int fd, int always_block)
+static int read_events(int fd, int always_block, int *fdblock)
{
struct per_dev_info *pdi = NULL;
unsigned int events = 0;
while (!is_done() && events < rb_batch) {
struct blk_io_trace *bit;
struct trace *t;
- int pdu_len;
+ int pdu_len, should_block, ret;
__u32 magic;
bit = bit_alloc();
- if (read_data(fd, bit, sizeof(*bit), !events || always_block))
+ should_block = !events || always_block;
+
+ ret = read_data(fd, bit, sizeof(*bit), should_block, fdblock);
+ if (ret) {
+ bit_free(bit);
+ if (!events && ret < 0)
+ events = ret;
break;
+ }
magic = be32_to_cpu(bit->magic);
if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
if (pdu_len) {
void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
- if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1))
+ if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1, fdblock)) {
+ bit_free(ptr);
break;
+ }
bit = ptr;
}
for (i = 0; i < ndevices; i++) {
pdi = &devices[i];
pdi->nfiles = 0;
- pdi->last_sequence = -1;
for (j = 0;; j++) {
struct stat st;
int len = 0;
+ char *p, *dname;
pci = get_cpu_info(pdi, j);
pci->cpu = j;
pci->fd = -1;
+ pci->fdblock = -1;
+
+ p = strdup(pdi->name);
+ dname = dirname(p);
+ if (strcmp(dname, ".")) {
+ input_dir = dname;
+ p = strdup(pdi->name);
+ strcpy(pdi->name, basename(p));
+ }
+ free(p);
if (input_dir)
len = sprintf(pci->fname, "%s/", input_dir);
printf("Input file %s added\n", pci->fname);
pdi->nfiles++;
+ cpu_mark_online(pdi, pci->cpu);
}
}
events_added = 0;
last_allowed_time = -1ULL;
- smallest_seq_read = -1U;
for (i = 0; i < ndevices; i++) {
pdi = &devices[i];
+ pdi->last_read_time = -1ULL;
for (j = 0; j < pdi->nfiles; j++) {
if (pci->fd == -1)
continue;
- events = read_events(pci->fd, 1);
- if (!events) {
+ pci->smallest_seq_read = -1;
+
+ events = read_events(pci->fd, 1, &pci->fdblock);
+ if (events <= 0) {
+ cpu_mark_offline(pdi, pci->cpu);
close(pci->fd);
pci->fd = -1;
continue;
static int do_stdin(void)
{
unsigned long long youngest;
- int fd, events, loops;
+ int fd, events, fdblock;
last_allowed_time = -1ULL;
fd = dup(STDIN_FILENO);
return -1;
}
- loops = 0;
- while ((events = read_events(fd, 0)) != 0) {
+ fdblock = -1;
+ while ((events = read_events(fd, 0, &fdblock)) > 0) {
+#if 0
smallest_seq_read = -1U;
+#endif
if (sort_entries(&youngest))
break;
if (youngest > stopwatch_end)
break;
- if (loops++ & 1)
- show_entries_rb(0);
+ show_entries_rb(0);
}
if (rb_sort_entries)
return 0;
}
-static void flush_output(void)
+static void show_stats(void)
{
+ if (!ofp)
+ return;
+ if (stats_printed)
+ return;
+
+ stats_printed = 1;
+
+ if (per_process_stats)
+ show_process_stats();
+
+ if (per_device_and_cpu_stats)
+ show_device_and_cpu_stats();
+
fflush(ofp);
}
static void handle_sigint(__attribute__((__unused__)) int sig)
{
done = 1;
- flush_output();
}
/*
"\t-o Output file. If not given, output is stdout\n" \
"\t-b stdin read batching\n" \
"\t-s Show per-program io statistics\n" \
- "\t-n Hash processes by name, not pid\n" \
+ "\t-h Hash processes by name, not pid\n" \
"\t-t Track individual ios. Will tell you the time a request took\n" \
"\t to get queued, to get dispatched, and to get completed\n" \
"\t-q Quiet. Don't display any stats at the end of the trace\n" \
"\t-w Only parse data between the given time interval in seconds.\n" \
"\t If 'start' isn't given, blkparse defaults the start time to 0\n" \
- "\t -f Output format. Customize the output format. The format field\n" \
- "\t identifies can be found in the documentation\n" \
+ "\t-f Output format. Customize the output format. The format field\n" \
+ "\t identifies can be found in the documentation\n" \
"\t-F Format specification. Can be found in the documentation\n" \
- "\t-m Print missing entries\n" \
- "\t-v Print program version info\n\n";
+ "\t-v More verbose for marginal errors\n" \
+ "\t-V Print program version info\n\n";
static void usage(char *prog)
{
int main(int argc, char *argv[])
{
char *ofp_buffer;
- int c, ret, mode;
- int per_device_and_cpu_stats = 1;
+ int i, c, ret, mode;
+ int act_mask_tmp = 0;
while ((c = getopt_long(argc, argv, S_OPTS, l_opts, NULL)) != -1) {
switch (c) {
+ case 'a':
+ i = find_mask_map(optarg);
+ if (i < 0) {
+ fprintf(stderr,"Invalid action mask %s\n",
+ optarg);
+ return 1;
+ }
+ act_mask_tmp |= i;
+ break;
+
+ case 'A':
+ if ((sscanf(optarg, "%x", &i) != 1) ||
+ !valid_act_opt(i)) {
+ fprintf(stderr,
+ "Invalid set action mask %s/0x%x\n",
+ optarg, i);
+ return 1;
+ }
+ act_mask_tmp = i;
+ break;
case 'i':
if (!strcmp(optarg, "-") && !pipeline)
pipeline = 1;
if (add_format_spec(optarg) != 0)
return 1;
break;
- case 'n':
+ case 'h':
ppi_hash_by_pid = 0;
break;
- case 'm':
- print_missing = 1;
- break;
case 'v':
+ verbose++;
+ break;
+ case 'V':
printf("%s version %s\n", argv[0], blkparse_version);
return 0;
default:
return 1;
}
+ if (act_mask_tmp != 0)
+ act_mask = act_mask_tmp;
+
memset(&rb_sort_root, 0, sizeof(rb_sort_root));
signal(SIGINT, handle_sigint);
else
ret = do_file();
- if (per_process_stats)
- show_process_stats();
-
- if (per_device_and_cpu_stats)
- show_device_and_cpu_stats();
-
- flush_output();
+ show_stats();
+ free(ofp_buffer);
return ret;
}