#include "blktrace.h"
#include "rbtree.h"
+#include "jhash.h"
static char blkparse_version[] = "0.90";
__u32 pid;
struct io_stats io_stats;
struct per_process_info *hash_next, *list_next;
+ int more_than_one;
/*
* individual io stats
};
#define PPI_HASH_SHIFT (8)
-static struct per_process_info *ppi_hash[1 << PPI_HASH_SHIFT];
+#define PPI_HASH_SIZE (1 << PPI_HASH_SHIFT)
+#define PPI_HASH_MASK (PPI_HASH_SIZE - 1)
+static struct per_process_info *ppi_hash_table[PPI_HASH_SIZE];
static struct per_process_info *ppi_list;
static int ppi_list_entries;
-#define S_OPTS "i:o:b:stqw:f:F:v"
+#define S_OPTS "i:o:b:stqw:f:F:vn"
static struct option l_opts[] = {
{
.name = "input",
.flag = NULL,
.val = 'F'
},
+ {
+ .name = "hash by name",
+ .has_arg = no_argument,
+ .flag = NULL,
+ .val = 'n'
+ },
{
.name = "version",
.has_arg = no_argument,
dev_t device;
__u64 sector;
__u32 pid;
+ char comm[16];
unsigned long long allocation_time;
unsigned long long queue_time;
unsigned long long dispatch_time;
static int per_process_stats;
static int track_ios;
+static int ppi_hash_by_pid = 1;
#define RB_BATCH_DEFAULT (512)
static int rb_batch = RB_BATCH_DEFAULT;
#define is_done() (*(volatile int *)(&done))
static volatile int done;
-static inline unsigned long hash_long(unsigned long val)
+#define JHASH_RANDOM (0x3af5f2ee)
+
+static inline int ppi_hash_pid(__u32 pid)
{
-#if __WORDSIZE == 32
- val *= 0x9e370001UL;
-#elif __WORDSIZE == 64
- val *= 0x9e37fffffffc0001UL;
-#else
-#error unknown word size
-#endif
+ return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
+}
- return val >> (__WORDSIZE - PPI_HASH_SHIFT);
+static inline int ppi_hash_name(const char *name)
+{
+ return jhash(name, 16, JHASH_RANDOM) & PPI_HASH_MASK;
+}
+
+static inline int ppi_hash(struct per_process_info *ppi)
+{
+ if (ppi_hash_by_pid)
+ return ppi_hash_pid(ppi->pid);
+
+ return ppi_hash_name(ppi->name);
}
static inline void add_process_to_hash(struct per_process_info *ppi)
{
- const int hash_idx = hash_long(ppi->pid);
+ const int hash_idx = ppi_hash(ppi);
- ppi->hash_next = ppi_hash[hash_idx];
- ppi_hash[hash_idx] = ppi;
+ ppi->hash_next = ppi_hash_table[hash_idx];
+ ppi_hash_table[hash_idx] = ppi;
}
static inline void add_process_to_list(struct per_process_info *ppi)
ppi_list_entries++;
}
+static struct per_process_info *find_process_by_name(char *name)
+{
+ const int hash_idx = ppi_hash_name(name);
+ struct per_process_info *ppi;
+
+ ppi = ppi_hash_table[hash_idx];
+ while (ppi) {
+ if (!strcmp(ppi->name, name))
+ return ppi;
+
+ ppi = ppi->hash_next;
+ }
+
+ return NULL;
+}
+
static struct per_process_info *find_process_by_pid(__u32 pid)
{
- const int hash_idx = hash_long(pid);
+ const int hash_idx = ppi_hash_pid(pid);
struct per_process_info *ppi;
- ppi = ppi_hash[hash_idx];
+ ppi = ppi_hash_table[hash_idx];
while (ppi) {
if (ppi->pid == pid)
return ppi;
return NULL;
}
+static struct per_process_info *find_process(__u32 pid, char *name)
+{
+ struct per_process_info *ppi;
+
+ if (ppi_hash_by_pid)
+ return find_process_by_pid(pid);
+
+ ppi = find_process_by_name(name);
+ if (ppi && ppi->pid != pid)
+ ppi->more_than_one = 1;
+
+ return ppi;
+}
+
static inline int trace_rb_insert(struct trace *t)
{
struct rb_node **p = &rb_sort_root.rb_node;
return 0;
}
+static struct trace *trace_rb_find(dev_t device, unsigned long sequence)
+{
+ struct rb_node **p = &rb_sort_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct trace *__t;
+
+ while (*p) {
+ parent = *p;
+ __t = rb_entry(parent, struct trace, rb_node);
+
+ if (device < __t->bit->device)
+ p = &(*p)->rb_left;
+ else if (device > __t->bit->device)
+ p = &(*p)->rb_right;
+ else if (sequence < __t->bit->sequence)
+ p = &(*p)->rb_left;
+ else if (sequence > __t->bit->sequence)
+ p = &(*p)->rb_right;
+ else
+ return __t;
+ }
+
+ return NULL;
+}
+
static inline int track_rb_insert(struct io_track *iot)
{
struct rb_node **p = &rb_track_root.rb_node;
return NULL;
}
-static struct io_track *find_track(__u32 pid, dev_t device, __u64 sector)
+static struct io_track *find_track(__u32 pid, char *comm, dev_t device,
+ __u64 sector)
{
struct io_track *iot;
if (!iot) {
iot = malloc(sizeof(*iot));
iot->pid = pid;
+ memcpy(iot->comm, comm, sizeof(iot->comm));
iot->device = device;
iot->sector = sector;
track_rb_insert(iot);
if (!track_ios)
return;
- iot = find_track(t->pid, t->device, t->sector);
+ iot = find_track(t->pid, t->comm, t->device, t->sector);
iot->allocation_time = t->time;
}
if (!track_ios)
return -1;
- iot = find_track(t->pid, t->device, t->sector);
+ iot = find_track(t->pid, t->comm, t->device, t->sector);
iot->queue_time = t->time;
elapsed = iot->queue_time - iot->allocation_time;
if (per_process_stats) {
- struct per_process_info *ppi = find_process_by_pid(iot->pid);
+ struct per_process_info *ppi = find_process(iot->pid,iot->comm);
int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (ppi && elapsed > ppi->longest_allocation_wait[w])
elapsed = iot->dispatch_time - iot->queue_time;
if (per_process_stats) {
- struct per_process_info *ppi = find_process_by_pid(iot->pid);
+ struct per_process_info *ppi = find_process(iot->pid,iot->comm);
int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (ppi && elapsed > ppi->longest_dispatch_wait[w])
elapsed = iot->completion_time - iot->dispatch_time;
if (per_process_stats) {
- struct per_process_info *ppi = find_process_by_pid(iot->pid);
+ struct per_process_info *ppi = find_process(iot->pid,iot->comm);
int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (ppi && elapsed > ppi->longest_completion_wait[w])
static struct io_stats *find_process_io_stats(__u32 pid, char *name)
{
- struct per_process_info *ppi = find_process_by_pid(pid);
+ struct per_process_info *ppi = find_process(pid, name);
if (!ppi) {
ppi = malloc(sizeof(*ppi));
memset(ppi, 0, sizeof(*ppi));
- strncpy(ppi->name, name, sizeof(ppi->name));
+ memcpy(ppi->name, name, 16);
ppi->pid = pid;
add_process_to_hash(ppi);
add_process_to_list(ppi);
return &ppi->io_stats;
}
-
static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
{
struct per_cpu_info *cpus = pdi->cpus;
pdi = &devices[ndevices - 1];
pdi->id = id;
- pdi->last_sequence = -1;
+ pdi->last_sequence = 0;
pdi->last_read_time = 0;
return pdi;
}
while (ppi) {
char name[64];
- snprintf(name, sizeof(name)-1, "%s (%u)", ppi->name, ppi->pid);
+ if (ppi->more_than_one)
+ sprintf(name, "%s (%u, ...)", ppi->name, ppi->pid);
+ else
+ sprintf(name, "%s (%u)", ppi->name, ppi->pid);
+
dump_io_stats(&ppi->io_stats, name);
dump_wait_stats(ppi);
ppi = ppi->list_next;
}
}
-static int sort_entries(void)
-{
- struct trace *t;
- int nr = 0;
-
- while ((t = trace_list) != NULL) {
- trace_list = t->next;
-
- if (verify_trace(t->bit))
- continue;
- if (trace_rb_insert(t))
- break;
- nr++;
- }
-
- return nr;
-}
-
/*
* struct trace and blktrace allocation cache, we do potentially
* millions of mallocs for these structures while only using at most
return malloc(sizeof(*bit));
}
+static void find_genesis(void)
+{
+ struct trace *t = trace_list;
+
+ genesis_time = -1ULL;
+ while (t != NULL) {
+ if (t->bit->time < genesis_time)
+ genesis_time = t->bit->time;
+
+ t = t->next;
+ }
+}
+
+static inline int check_stopwatch(struct blk_io_trace *bit)
+{
+ if (bit->time < stopwatch_end &&
+ bit->time >= stopwatch_start)
+ return 0;
+
+ return 1;
+}
+
+static int sort_entries(void)
+{
+ struct trace *t;
+ int nr = 0;
+
+ if (!genesis_time)
+ find_genesis();
+
+ while ((t = trace_list) != NULL) {
+ struct blk_io_trace *bit = t->bit;
+
+ trace_list = t->next;
+
+ if (verify_trace(bit))
+ continue;
+
+ bit->time -= genesis_time;
+
+ if (check_stopwatch(bit)) {
+ bit_free(bit);
+ t_free(t);
+ continue;
+ }
+
+ if (trace_rb_insert(t))
+ break;
+
+ nr++;
+ }
+
+ return nr;
+}
+
static void show_entries_rb(int force)
{
struct per_dev_info *pdi = NULL;
* on SMP systems. to prevent stalling on lost events,
* only allow an event to skip us a few times
*/
- if (bit->sequence != (pdi->last_sequence + 1)
- && pdi->last_sequence != -1 && !force) {
+ if (bit->sequence > (pdi->last_sequence + 1) && !force) {
+ struct trace *__t;
+
+ /*
+ * the wanted sequence is really there, continue
+ * because this means that the log time is earlier
+ * on the trace we have now
+ */
+ __t = trace_rb_find(pdi->id, pdi->last_sequence + 1);
+ if (__t)
+ goto ok;
+
if (t->skipped < 5) {
t->skipped++;
break;
- } else {
- fprintf(stderr, "skipping from %lu to %u\n", pdi->last_sequence, bit->sequence);
+ } else
pdi->skips++;
- }
}
- if (bit->time >= stopwatch_end)
- break;
-
+ok:
if (!force && bit->time > last_allowed_time)
break;
pdi->last_sequence = bit->sequence;
- if (bit->time >= stopwatch_start) {
- check_time(pdi, bit);
+ check_time(pdi, bit);
- if (!pci || pci->cpu != bit->cpu)
- pci = get_cpu_info(pdi, bit->cpu);
+ if (!pci || pci->cpu != bit->cpu)
+ pci = get_cpu_info(pdi, bit->cpu);
- dump_trace(bit, pci, pdi);
- }
+ dump_trace(bit, pci, pdi);
rb_erase(&t->rb_node, &rb_sort_root);
rb_sort_entries--;
t->bit = bit;
trace_to_cpu(bit);
+
t->next = trace_list;
trace_list = t;
- if (genesis_time == 0 || t->bit->time < genesis_time)
- genesis_time = t->bit->time;
-
- bit->time -= genesis_time;
-
if (!pdi || pdi->id != bit->device)
pdi = get_dev_info(bit->device);
for (i = 0; i < ndevices; i++) {
pdi = &devices[i];
pdi->nfiles = 0;
- pdi->last_sequence = -1;
+ pdi->last_sequence = 0;
for (j = 0;; j++) {
struct stat st;
"\t-o Output file. If not given, output is stdout\n" \
"\t-b stdin read batching\n" \
"\t-s Show per-program io statistics\n" \
+ "\t-n Hash processes by name, not pid\n" \
"\t-t Track individual ios. Will tell you the time a request took\n" \
"\t to get queued, to get dispatched, and to get completed\n" \
"\t-q Quiet. Don't display any stats at the end of the trace\n" \
if (add_format_spec(optarg) != 0)
return 1;
break;
+ case 'n':
+ ppi_hash_by_pid = 0;
+ break;
case 'v':
printf("%s version %s\n", argv[0], blkparse_version);
return 0;