#include "blktrace.h"
#include "rbtree.h"
+#include "jhash.h"
static char blkparse_version[] = "0.90";
struct per_dev_info {
- dev_t id;
+ dev_t dev;
char *name;
int backwards;
unsigned long last_sequence;
unsigned long skips;
+ struct rb_root rb_last;
+ unsigned long rb_last_entries;
+
+ struct rb_root rb_track;
+
int nfiles;
int ncpus;
struct per_cpu_info *cpus;
__u32 pid;
struct io_stats io_stats;
struct per_process_info *hash_next, *list_next;
+ int more_than_one;
/*
* individual io stats
};
#define PPI_HASH_SHIFT (8)
-static struct per_process_info *ppi_hash[1 << PPI_HASH_SHIFT];
+#define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
+#define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
+static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
static struct per_process_info *ppi_list;
static int ppi_list_entries;
-#define S_OPTS "i:o:b:stqw:f:F:v"
+#define S_OPTS "i:o:b:stqw:f:F:vnm"
static struct option l_opts[] = {
{
.name = "input",
.flag = NULL,
.val = 'F'
},
+ {
+ .name = "hash by name",
+ .has_arg = no_argument,
+ .flag = NULL,
+ .val = 'n'
+ },
+ {
+ .name = "missing",
+ .has_arg = no_argument,
+ .flag = NULL,
+ .val = 'm'
+ },
{
.name = "version",
.has_arg = no_argument,
struct blk_io_trace *bit;
struct rb_node rb_node;
struct trace *next;
- int skipped;
};
static struct rb_root rb_sort_root;
-static struct rb_root rb_track_root;
+static unsigned long rb_sort_entries;
static struct trace *trace_list;
struct io_track {
struct rb_node rb_node;
- dev_t device;
__u64 sector;
__u32 pid;
+ char comm[16];
unsigned long long allocation_time;
unsigned long long queue_time;
unsigned long long dispatch_time;
static unsigned long long genesis_time;
static unsigned long long last_allowed_time;
+static unsigned int smallest_seq_read;
static unsigned long long stopwatch_start; /* start from zero by default */
static unsigned long long stopwatch_end = ULONG_LONG_MAX; /* "infinity" */
static int per_process_stats;
static int track_ios;
+static int ppi_hash_by_pid = 1;
+static int print_missing;
+
+static unsigned int t_alloc_cache;
+static unsigned int bit_alloc_cache;
#define RB_BATCH_DEFAULT (512)
-static int rb_batch = RB_BATCH_DEFAULT;
+static unsigned int rb_batch = RB_BATCH_DEFAULT;
static int pipeline;
#define is_done() (*(volatile int *)(&done))
static volatile int done;
-static inline unsigned long hash_long(unsigned long val)
+#define JHASH_RANDOM (0x3af5f2ee)
+
+static inline int ppi_hash_pid(__u32 pid)
+{
+ return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
+}
+
+static inline int ppi_hash_name(const char *name)
{
-#if __WORDSIZE == 32
- val *= 0x9e370001UL;
-#elif __WORDSIZE == 64
- val *= 0x9e37fffffffc0001UL;
-#else
-#error unknown word size
-#endif
+ return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
+}
- return val >> (__WORDSIZE - PPI_HASH_SHIFT);
+static inline int ppi_hash(struct per_process_info *ppi)
+{
+ if (ppi_hash_by_pid)
+ return ppi_hash_pid(ppi->pid);
+
+ return ppi_hash_name(ppi->name);
}
static inline void add_process_to_hash(struct per_process_info *ppi)
{
- const int hash_idx = hash_long(ppi->pid);
+ const int hash_idx = ppi_hash(ppi);
- ppi->hash_next = ppi_hash[hash_idx];
- ppi_hash[hash_idx] = ppi;
+ ppi->hash_next = ppi_hash_table[hash_idx];
+ ppi_hash_table[hash_idx] = ppi;
}
static inline void add_process_to_list(struct per_process_info *ppi)
ppi_list_entries++;
}
+static struct per_process_info *find_process_by_name(char *name)
+{
+ const int hash_idx = ppi_hash_name(name);
+ struct per_process_info *ppi;
+
+ ppi = ppi_hash_table[hash_idx];
+ while (ppi) {
+ if (!strcmp(ppi->name, name))
+ return ppi;
+
+ ppi = ppi->hash_next;
+ }
+
+ return NULL;
+}
+
static struct per_process_info *find_process_by_pid(__u32 pid)
{
- const int hash_idx = hash_long(pid);
+ const int hash_idx = ppi_hash_pid(pid);
struct per_process_info *ppi;
- ppi = ppi_hash[hash_idx];
+ ppi = ppi_hash_table[hash_idx];
while (ppi) {
if (ppi->pid == pid)
return ppi;
return NULL;
}
-static inline int trace_rb_insert(struct trace *t)
+static struct per_process_info *find_process(__u32 pid, char *name)
{
- struct rb_node **p = &rb_sort_root.rb_node;
+ struct per_process_info *ppi;
+
+ if (ppi_hash_by_pid)
+ return find_process_by_pid(pid);
+
+ ppi = find_process_by_name(name);
+ if (ppi && ppi->pid != pid)
+ ppi->more_than_one = 1;
+
+ return ppi;
+}
+
+static inline int trace_rb_insert(struct trace *t, struct rb_root *root,
+ int check_time)
+{
+ struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct trace *__t;
while (*p) {
parent = *p;
+
__t = rb_entry(parent, struct trace, rb_node);
- if (t->bit->time < __t->bit->time)
- p = &(*p)->rb_left;
- else if (t->bit->time > __t->bit->time)
- p = &(*p)->rb_right;
- else if (t->bit->device < __t->bit->device)
+ if (check_time) {
+ if (t->bit->time < __t->bit->time) {
+ p = &(*p)->rb_left;
+ continue;
+ } else if (t->bit->time > __t->bit->time) {
+ p = &(*p)->rb_right;
+ continue;
+ }
+ }
+ if (t->bit->device < __t->bit->device)
p = &(*p)->rb_left;
else if (t->bit->device > __t->bit->device)
p = &(*p)->rb_right;
}
rb_link_node(&t->rb_node, parent, p);
- rb_insert_color(&t->rb_node, &rb_sort_root);
+ rb_insert_color(&t->rb_node, root);
return 0;
}
-static inline int track_rb_insert(struct io_track *iot)
+static inline int trace_rb_insert_sort(struct trace *t)
+{
+ if (!trace_rb_insert(t, &rb_sort_root, 1)) {
+ rb_sort_entries++;
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int trace_rb_insert_last(struct per_dev_info *pdi,struct trace *t)
+{
+ if (!trace_rb_insert(t, &pdi->rb_last, 1)) {
+ pdi->rb_last_entries++;
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
+ struct rb_root *root, int order)
+{
+ struct rb_node *n = root->rb_node;
+ struct rb_node *prev = NULL;
+ struct trace *__t;
+
+ while (n) {
+ __t = rb_entry(n, struct trace, rb_node);
+ prev = n;
+
+ if (device < __t->bit->device)
+ n = n->rb_left;
+ else if (device > __t->bit->device)
+ n = n->rb_right;
+ else if (sequence < __t->bit->sequence)
+ n = n->rb_left;
+ else if (sequence > __t->bit->sequence)
+ n = n->rb_right;
+ else
+ return __t;
+ }
+
+ /*
+ * hack - the list may not be sequence ordered because some
+ * events don't have sequence and time matched. so we end up
+ * being a little off in the rb lookup here, because we don't
+ * know the time we are looking for. compensate by browsing
+ * a little ahead from the last entry to find the match
+ */
+ if (order && prev) {
+ int max = 5;
+
+ while (((n = rb_next(prev)) != NULL) && max--) {
+ __t = rb_entry(n, struct trace, rb_node);
+
+ if (__t->bit->device == device &&
+ __t->bit->sequence == sequence)
+ return __t;
+
+ prev = n;
+ }
+ }
+
+ return NULL;
+}
+
+static inline struct trace *trace_rb_find_sort(dev_t dev, unsigned long seq)
{
- struct rb_node **p = &rb_track_root.rb_node;
+ return trace_rb_find(dev, seq, &rb_sort_root, 1);
+}
+
+static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
+ unsigned long seq)
+{
+ return trace_rb_find(pdi->dev, seq, &pdi->rb_last, 0);
+}
+
+static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
+{
+ struct rb_node **p = &pdi->rb_track.rb_node;
struct rb_node *parent = NULL;
struct io_track *__iot;
while (*p) {
parent = *p;
-
__iot = rb_entry(parent, struct io_track, rb_node);
- if (iot->device < __iot->device)
- p = &(*p)->rb_left;
- else if (iot->device > __iot->device)
- p = &(*p)->rb_right;
- else if (iot->sector < __iot->sector)
+ if (iot->sector < __iot->sector)
p = &(*p)->rb_left;
else if (iot->sector > __iot->sector)
p = &(*p)->rb_right;
fprintf(stderr,
"sector alias (%Lu) on device %d,%d!\n",
(unsigned long long) iot->sector,
- MAJOR(iot->device), MINOR(iot->device));
+ MAJOR(pdi->dev), MINOR(pdi->dev));
return 1;
}
}
rb_link_node(&iot->rb_node, parent, p);
- rb_insert_color(&iot->rb_node, &rb_track_root);
+ rb_insert_color(&iot->rb_node, &pdi->rb_track);
return 0;
}
-static struct io_track *__find_track(dev_t device, __u64 sector)
+static struct io_track *__find_track(struct per_dev_info *pdi, __u64 sector)
{
- struct rb_node **p = &rb_track_root.rb_node;
- struct rb_node *parent = NULL;
+ struct rb_node *n = pdi->rb_track.rb_node;
struct io_track *__iot;
- while (*p) {
- parent = *p;
-
- __iot = rb_entry(parent, struct io_track, rb_node);
+ while (n) {
+ __iot = rb_entry(n, struct io_track, rb_node);
- if (device < __iot->device)
- p = &(*p)->rb_left;
- else if (device > __iot->device)
- p = &(*p)->rb_right;
- else if (sector < __iot->sector)
- p = &(*p)->rb_left;
+ if (sector < __iot->sector)
+ n = n->rb_left;
else if (sector > __iot->sector)
- p = &(*p)->rb_right;
+ n = n->rb_right;
else
return __iot;
}
return NULL;
}
-static struct io_track *find_track(__u32 pid, dev_t device, __u64 sector)
+static struct io_track *find_track(struct per_dev_info *pdi, __u32 pid,
+ char *comm, __u64 sector)
{
struct io_track *iot;
- iot = __find_track(device, sector);
+ iot = __find_track(pdi, sector);
if (!iot) {
iot = malloc(sizeof(*iot));
iot->pid = pid;
- iot->device = device;
+ memcpy(iot->comm, comm, sizeof(iot->comm));
iot->sector = sector;
- track_rb_insert(iot);
+ track_rb_insert(pdi, iot);
}
return iot;
}
-static void log_track_frontmerge(struct blk_io_trace *t)
+static void log_track_frontmerge(struct per_dev_info *pdi,
+ struct blk_io_trace *t)
{
struct io_track *iot;
if (!track_ios)
return;
- iot = __find_track(t->device, t->sector + (t->bytes >> 9));
+ iot = __find_track(pdi, t->sector + (t->bytes >> 9));
if (!iot) {
- fprintf(stderr, "failed to find mergeable event\n");
+ fprintf(stderr, "merge not found for (%d,%d): %llu\n",
+ MAJOR(pdi->dev), MINOR(pdi->dev),
+ (unsigned long long) t->sector + (t->bytes >> 9));
return;
}
- rb_erase(&iot->rb_node, &rb_track_root);
+ rb_erase(&iot->rb_node, &pdi->rb_track);
iot->sector -= t->bytes >> 9;
- track_rb_insert(iot);
+ track_rb_insert(pdi, iot);
}
-static void log_track_getrq(struct blk_io_trace *t)
+static void log_track_getrq(struct per_dev_info *pdi, struct blk_io_trace *t)
{
struct io_track *iot;
if (!track_ios)
return;
- iot = find_track(t->pid, t->device, t->sector);
+ iot = find_track(pdi, t->pid, t->comm, t->sector);
iot->allocation_time = t->time;
}
-
/*
* return time between rq allocation and insertion
*/
-static unsigned long long log_track_insert(struct blk_io_trace *t)
+static unsigned long long log_track_insert(struct per_dev_info *pdi,
+ struct blk_io_trace *t)
{
unsigned long long elapsed;
struct io_track *iot;
if (!track_ios)
return -1;
- iot = find_track(t->pid, t->device, t->sector);
+ iot = find_track(pdi, t->pid, t->comm, t->sector);
iot->queue_time = t->time;
+
+ if (!iot->allocation_time)
+ return -1;
+
elapsed = iot->queue_time - iot->allocation_time;
if (per_process_stats) {
- struct per_process_info *ppi = find_process_by_pid(iot->pid);
+ struct per_process_info *ppi = find_process(iot->pid,iot->comm);
int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (ppi && elapsed > ppi->longest_allocation_wait[w])
/*
* return time between queue and issue
*/
-static unsigned long long log_track_issue(struct blk_io_trace *t)
+static unsigned long long log_track_issue(struct per_dev_info *pdi,
+ struct blk_io_trace *t)
{
unsigned long long elapsed;
struct io_track *iot;
if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
return -1;
- iot = __find_track(t->device, t->sector);
+ iot = __find_track(pdi, t->sector);
if (!iot) {
- fprintf(stderr, "failed to find issue event\n");
+ fprintf(stderr, "issue not found for (%d,%d): %llu\n",
+ MAJOR(pdi->dev), MINOR(pdi->dev),
+ (unsigned long long) t->sector);
return -1;
}
elapsed = iot->dispatch_time - iot->queue_time;
if (per_process_stats) {
- struct per_process_info *ppi = find_process_by_pid(iot->pid);
+ struct per_process_info *ppi = find_process(iot->pid,iot->comm);
int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (ppi && elapsed > ppi->longest_dispatch_wait[w])
/*
* return time between dispatch and complete
*/
-static unsigned long long log_track_complete(struct blk_io_trace *t)
+static unsigned long long log_track_complete(struct per_dev_info *pdi,
+ struct blk_io_trace *t)
{
unsigned long long elapsed;
struct io_track *iot;
if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
return -1;
- iot = __find_track(t->device, t->sector);
+ iot = __find_track(pdi, t->sector);
if (!iot) {
- fprintf(stderr, "failed to find complete event\n");
+ fprintf(stderr, "complete not found for (%d,%d): %llu\n",
+ MAJOR(pdi->dev), MINOR(pdi->dev),
+ (unsigned long long) t->sector);
return -1;
}
elapsed = iot->completion_time - iot->dispatch_time;
if (per_process_stats) {
- struct per_process_info *ppi = find_process_by_pid(iot->pid);
+ struct per_process_info *ppi = find_process(iot->pid,iot->comm);
int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (ppi && elapsed > ppi->longest_completion_wait[w])
/*
* kill the trace, we don't need it after completion
*/
- rb_erase(&iot->rb_node, &rb_track_root);
+ rb_erase(&iot->rb_node, &pdi->rb_track);
free(iot);
return elapsed;
static struct io_stats *find_process_io_stats(__u32 pid, char *name)
{
- struct per_process_info *ppi = find_process_by_pid(pid);
+ struct per_process_info *ppi = find_process(pid, name);
if (!ppi) {
ppi = malloc(sizeof(*ppi));
memset(ppi, 0, sizeof(*ppi));
- strncpy(ppi->name, name, sizeof(ppi->name));
+ memcpy(ppi->name, name, 16);
ppi->pid = pid;
add_process_to_hash(ppi);
add_process_to_list(ppi);
return &ppi->io_stats;
}
-
static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
{
struct per_cpu_info *cpus = pdi->cpus;
return 0;
}
-static struct per_dev_info *get_dev_info(dev_t id)
+static struct per_dev_info *get_dev_info(dev_t dev)
{
struct per_dev_info *pdi;
int i;
for (i = 0; i < ndevices; i++) {
- if (!devices[i].id)
- devices[i].id = id;
- if (devices[i].id == id)
+ if (!devices[i].dev)
+ devices[i].dev = dev;
+ if (devices[i].dev == dev)
return &devices[i];
}
- if (resize_devices(NULL) != 0)
+ if (resize_devices(NULL))
return NULL;
pdi = &devices[ndevices - 1];
- pdi->id = id;
+ pdi->dev = dev;
pdi->last_sequence = -1;
pdi->last_read_time = 0;
+ memset(&pdi->rb_last, 0, sizeof(pdi->rb_last));
+ pdi->rb_last_entries = 0;
return pdi;
}
if (pdi->name)
snprintf(buffer, size, "%s", pdi->name);
else
- snprintf(buffer, size, "%d,%d", MAJOR(pdi->id), MINOR(pdi->id));
+ snprintf(buffer, size, "%d,%d",MAJOR(pdi->dev),MINOR(pdi->dev));
return buffer;
}
}
}
-static void log_complete(struct per_cpu_info *pci, struct blk_io_trace *t,
- char *act)
+static void log_complete(struct per_dev_info *pdi, struct per_cpu_info *pci,
+ struct blk_io_trace *t, char *act)
{
- process_fmt(act, pci, t, log_track_complete(t), 0, NULL);
+ process_fmt(act, pci, t, log_track_complete(pdi, t), 0, NULL);
}
-static void log_insert(struct per_cpu_info *pci, struct blk_io_trace *t,
- char *act)
+static void log_insert(struct per_dev_info *pdi, struct per_cpu_info *pci,
+ struct blk_io_trace *t, char *act)
{
- process_fmt(act, pci, t, log_track_insert(t), 0, NULL);
+ process_fmt(act, pci, t, log_track_insert(pdi, t), 0, NULL);
}
static void log_queue(struct per_cpu_info *pci, struct blk_io_trace *t,
process_fmt(act, pci, t, -1, 0, NULL);
}
-static void log_issue(struct per_cpu_info *pci, struct blk_io_trace *t,
- char *act)
+static void log_issue(struct per_dev_info *pdi, struct per_cpu_info *pci,
+ struct blk_io_trace *t, char *act)
{
- process_fmt(act, pci, t, log_track_issue(t), 0, NULL);
+ process_fmt(act, pci, t, log_track_issue(pdi, t), 0, NULL);
}
-static void log_merge(struct per_cpu_info *pci, struct blk_io_trace *t,
- char *act)
+static void log_merge(struct per_dev_info *pdi, struct per_cpu_info *pci,
+ struct blk_io_trace *t, char *act)
{
if (act[0] == 'F')
- log_track_frontmerge(t);
+ log_track_frontmerge(pdi, t);
process_fmt(act, pci, t, -1ULL, 0, NULL);
}
}
}
-static void dump_trace_fs(struct blk_io_trace *t, struct per_cpu_info *pci)
+static void dump_trace_fs(struct blk_io_trace *t, struct per_dev_info *pdi,
+ struct per_cpu_info *pci)
{
int w = t->action & BLK_TC_ACT(BLK_TC_WRITE);
int act = t->action & 0xffff;
log_queue(pci, t, "Q");
break;
case __BLK_TA_INSERT:
- log_insert(pci, t, "I");
+ log_insert(pdi, pci, t, "I");
break;
case __BLK_TA_BACKMERGE:
account_m(t, pci, w);
- log_merge(pci, t, "M");
+ log_merge(pdi, pci, t, "M");
break;
case __BLK_TA_FRONTMERGE:
account_m(t, pci, w);
- log_merge(pci, t, "F");
+ log_merge(pdi, pci, t, "F");
break;
case __BLK_TA_GETRQ:
- log_track_getrq(t);
+ log_track_getrq(pdi, t);
log_generic(pci, t, "G");
break;
case __BLK_TA_SLEEPRQ:
break;
case __BLK_TA_ISSUE:
account_issue(t, pci, w);
- log_issue(pci, t, "D");
+ log_issue(pdi, pci, t, "D");
break;
case __BLK_TA_COMPLETE:
account_c(t, pci, w, t->bytes);
- log_complete(pci, t, "C");
+ log_complete(pdi, pci, t, "C");
break;
case __BLK_TA_PLUG:
log_action(pci, t, "P");
if (t->action & BLK_TC_ACT(BLK_TC_PC))
dump_trace_pc(t, pci);
else
- dump_trace_fs(t, pci);
+ dump_trace_fs(t, pdi, pci);
pdi->events++;
}
while (ppi) {
char name[64];
- snprintf(name, sizeof(name)-1, "%s (%u)", ppi->name, ppi->pid);
+ if (ppi->more_than_one)
+ sprintf(name, "%s (%u, ...)", ppi->name, ppi->pid);
+ else
+ sprintf(name, "%s (%u)", ppi->name, ppi->pid);
+
dump_io_stats(&ppi->io_stats, name);
dump_wait_stats(ppi);
ppi = ppi->list_next;
}
}
-static int sort_entries(void)
-{
- struct trace *t;
- int nr = 0;
-
- while ((t = trace_list) != NULL) {
- trace_list = t->next;
-
- if (verify_trace(t->bit))
- continue;
- if (trace_rb_insert(t))
- break;
- nr++;
- }
-
- return nr;
-}
-
/*
* struct trace and blktrace allocation cache, we do potentially
* millions of mallocs for these structures while only using at most
*/
static inline void t_free(struct trace *t)
{
- t->next = t_alloc_list;
- t_alloc_list = t;
+ if (t_alloc_cache < 1024) {
+ t->next = t_alloc_list;
+ t_alloc_list = t;
+ t_alloc_cache++;
+ } else
+ free(t);
}
static inline struct trace *t_alloc(void)
if (t) {
t_alloc_list = t->next;
+ t_alloc_cache--;
return t;
}
static inline void bit_free(struct blk_io_trace *bit)
{
- /*
- * abuse a 64-bit field for a next pointer for the free item
- */
- bit->time = (__u64) (unsigned long) bit_alloc_list;
- bit_alloc_list = (struct blk_io_trace *) bit;
+ if (bit_alloc_cache < 1024) {
+ /*
+ * abuse a 64-bit field for a next pointer for the free item
+ */
+ bit->time = (__u64) (unsigned long) bit_alloc_list;
+ bit_alloc_list = (struct blk_io_trace *) bit;
+ bit_alloc_cache++;
+ } else
+ free(bit);
}
static inline struct blk_io_trace *bit_alloc(void)
if (bit) {
bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
bit->time;
+ bit_alloc_cache--;
return bit;
}
return malloc(sizeof(*bit));
}
-static void show_entries_rb(void)
+static void find_genesis(void)
+{
+ struct trace *t = trace_list;
+
+ genesis_time = -1ULL;
+ while (t != NULL) {
+ if (t->bit->time < genesis_time)
+ genesis_time = t->bit->time;
+
+ t = t->next;
+ }
+}
+
+static inline int check_stopwatch(struct blk_io_trace *bit)
+{
+ if (bit->time < stopwatch_end &&
+ bit->time >= stopwatch_start)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * return youngest entry read
+ */
+static int sort_entries(unsigned long long *youngest)
+{
+ struct trace *t;
+
+ if (!genesis_time)
+ find_genesis();
+
+ *youngest = 0;
+ while ((t = trace_list) != NULL) {
+ struct blk_io_trace *bit = t->bit;
+
+ trace_list = t->next;
+
+ bit->time -= genesis_time;
+
+ if (bit->time < *youngest || !*youngest)
+ *youngest = bit->time;
+
+ if (check_stopwatch(bit)) {
+ bit_free(bit);
+ t_free(t);
+ continue;
+ }
+
+ if (trace_rb_insert_sort(t))
+ return -1;
+
+ if (bit->sequence < smallest_seq_read)
+ smallest_seq_read = bit->sequence;
+ }
+
+ return 0;
+}
+
+static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
+{
+ rb_erase(&t->rb_node, &pdi->rb_last);
+ pdi->rb_last_entries--;
+
+ bit_free(t->bit);
+ t_free(t);
+}
+
+static void put_trace(struct per_dev_info *pdi, struct trace *t)
+{
+ rb_erase(&t->rb_node, &rb_sort_root);
+ rb_sort_entries--;
+
+ trace_rb_insert_last(pdi, t);
+
+ if (pdi->rb_last_entries > rb_batch * pdi->nfiles) {
+ struct rb_node *n = rb_first(&pdi->rb_last);
+
+ t = rb_entry(n, struct trace, rb_node);
+ __put_trace_last(pdi, t);
+ }
+}
+
+static int check_sequence(struct per_dev_info *pdi, struct blk_io_trace *bit,
+ int force)
+{
+ unsigned long expected_sequence = pdi->last_sequence + 1;
+ struct trace *t;
+
+ /*
+ * first entry, always ok
+ */
+ if (!expected_sequence)
+ return 0;
+
+ if (bit->sequence == expected_sequence)
+ return 0;
+
+ /*
+ * we may not have seen that sequence yet. if we are not doing
+ * the final run, break and wait for more entries.
+ */
+ if (expected_sequence < smallest_seq_read) {
+ t = trace_rb_find_last(pdi, expected_sequence);
+ if (!t)
+ goto skip;
+
+ __put_trace_last(pdi, t);
+ return 0;
+ } else if (!force)
+ return 1;
+ else {
+skip:
+ if (print_missing) {
+ fprintf(stderr, "(%d,%d): skipping %lu -> %u\n",
+ MAJOR(pdi->dev), MINOR(pdi->dev),
+ pdi->last_sequence, bit->sequence);
+ }
+ pdi->skips++;
+ return 0;
+ }
+}
+
+static void show_entries_rb(int force)
{
struct per_dev_info *pdi = NULL;
struct per_cpu_info *pci = NULL;
struct rb_node *n;
struct trace *t;
- while ((n = rb_first(&rb_sort_root)) != NULL) {
+ if (force) {
+ n = rb_first(&rb_sort_root);
+ t = rb_entry(n, struct trace, rb_node);
+ fprintf(stderr, "first force %u\n", t->bit->sequence);
+ }
+ while ((n = rb_first(&rb_sort_root)) != NULL) {
if (done)
break;
t = rb_entry(n, struct trace, rb_node);
bit = t->bit;
- if (!pdi || pdi->id != bit->device)
+ if (!pdi || pdi->dev != bit->device)
pdi = get_dev_info(bit->device);
if (!pdi) {
break;
}
- if (bit->cpu > pdi->ncpus) {
- fprintf(stderr, "Unknown CPU ID? (%d, device %d,%d)\n",
- bit->cpu, MAJOR(bit->device),
- MINOR(bit->device));
+ if (check_sequence(pdi, bit, force))
break;
- }
- /*
- * back off displaying more info if we are out of sync
- * on SMP systems. to prevent stalling on lost events,
- * only allow an event to skip us a few times
- */
- if (bit->sequence != (pdi->last_sequence + 1)
- && pdi->last_sequence != -1) {
- if (t->skipped < 5) {
- t->skipped++;
- break;
- } else {
- fprintf(stderr, "skipping from %lu to %u\n", pdi->last_sequence, bit->sequence);
- pdi->skips++;
- }
- }
-
- if (bit->time >= stopwatch_end || bit->time > last_allowed_time)
+ if (!force && bit->time > last_allowed_time)
break;
pdi->last_sequence = bit->sequence;
- if (bit->time >= stopwatch_start) {
- check_time(pdi, bit);
+ check_time(pdi, bit);
- if (!pci || pci->cpu != bit->cpu)
- pci = get_cpu_info(pdi, bit->cpu);
+ if (!pci || pci->cpu != bit->cpu)
+ pci = get_cpu_info(pdi, bit->cpu);
- dump_trace(bit, pci, pdi);
- }
+ dump_trace(bit, pci, pdi);
- rb_erase(&t->rb_node, &rb_sort_root);
- bit_free(bit);
- t_free(t);
+ put_trace(pdi, t);
}
}
else if (ret < 0) {
if (errno != EAGAIN)
perror("read");
+
return -1;
} else {
p += ret;
return 0;
}
-static int read_events(int fd)
+static int read_events(int fd, int always_block)
{
struct per_dev_info *pdi = NULL;
- int events = 0;
+ unsigned int events = 0;
while (!is_done() && events < rb_batch) {
struct blk_io_trace *bit;
bit = bit_alloc();
- if (read_data(fd, bit, sizeof(*bit), !events))
+ if (read_data(fd, bit, sizeof(*bit), !events || always_block))
break;
magic = be32_to_cpu(bit->magic);
bit = ptr;
}
+ trace_to_cpu(bit);
+
+ if (verify_trace(bit)) {
+ bit_free(bit);
+ continue;
+ }
+
t = t_alloc();
memset(t, 0, sizeof(*t));
t->bit = bit;
- trace_to_cpu(bit);
t->next = trace_list;
trace_list = t;
- if (genesis_time == 0 || t->bit->time < genesis_time)
- genesis_time = t->bit->time;
-
- bit->time -= genesis_time;
-
- if (!pdi || pdi->id != bit->device)
+ if (!pdi || pdi->dev != bit->device)
pdi = get_dev_info(bit->device);
if (bit->time > pdi->last_read_time)
* now loop over the files reading in the data
*/
do {
+ unsigned long long youngest;
+
events_added = 0;
last_allowed_time = -1ULL;
+ smallest_seq_read = -1U;
for (i = 0; i < ndevices; i++) {
pdi = &devices[i];
if (pci->fd == -1)
continue;
- events = read_events(pci->fd);
+ events = read_events(pci->fd, 1);
if (!events) {
close(pci->fd);
pci->fd = -1;
}
}
- if (sort_entries() == -1)
+ if (sort_entries(&youngest))
+ break;
+
+ if (youngest > stopwatch_end)
break;
- show_entries_rb();
+ show_entries_rb(0);
} while (events_added);
+ if (rb_sort_entries)
+ show_entries_rb(1);
+
return 0;
}
static int do_stdin(void)
{
+ unsigned long long youngest;
int fd;
last_allowed_time = -1ULL;
do {
int events;
- events = read_events(fd);
+ events = read_events(fd, 0);
if (!events)
break;
- if (sort_entries() == -1)
+ if (sort_entries(&youngest))
+ break;
+
+ if (youngest > stopwatch_end)
break;
- show_entries_rb();
+ show_entries_rb(0);
} while (1);
+ if (rb_sort_entries)
+ show_entries_rb(1);
+
close(fd);
return 0;
}
fflush(ofp);
}
-static void handle_sigint(int sig)
+static void handle_sigint(__attribute__((__unused__)) int sig)
{
done = 1;
flush_output();
"\t-o Output file. If not given, output is stdout\n" \
"\t-b stdin read batching\n" \
"\t-s Show per-program io statistics\n" \
+ "\t-n Hash processes by name, not pid\n" \
"\t-t Track individual ios. Will tell you the time a request took\n" \
"\t to get queued, to get dispatched, and to get completed\n" \
"\t-q Quiet. Don't display any stats at the end of the trace\n" \
"\t -f Output format. Customize the output format. The format field\n" \
"\t identifies can be found in the documentation\n" \
"\t-F Format specification. Can be found in the documentation\n" \
+ "\t-m Print missing entries\n" \
"\t-v Print program version info\n\n";
static void usage(char *prog)
if (add_format_spec(optarg) != 0)
return 1;
break;
+ case 'n':
+ ppi_hash_by_pid = 0;
+ break;
+ case 'm':
+ print_missing = 1;
+ break;
case 'v':
printf("%s version %s\n", argv[0], blkparse_version);
return 0;
}
memset(&rb_sort_root, 0, sizeof(rb_sort_root));
- memset(&rb_track_root, 0, sizeof(rb_track_root));
signal(SIGINT, handle_sigint);
signal(SIGHUP, handle_sigint);