unsigned long long last_reported_time;
unsigned long long last_read_time;
struct io_stats io_stats;
- unsigned long last_sequence;
- unsigned long skips, nskips;
- unsigned long long seq_skips, seq_nskips;
+ unsigned long skips;
+ unsigned long long seq_skips;
unsigned int max_depth[2];
unsigned int cur_depth[2];
- struct rb_root rb_last;
- unsigned long rb_last_entries;
-
struct rb_root rb_track;
int nfiles;
unsigned int cpu_map_max;
struct per_cpu_info *cpus;
- struct skip_info *skips_head;
- struct skip_info *skips_tail;
};
+/*
+ * some duplicated effort here, we can unify this hash and the ppi hash later
+ */
+struct process_pid_map {
+ pid_t pid;
+ char comm[16];
+ struct process_pid_map *hash_next, *list_next;
+};
+
+#define PPM_HASH_SHIFT (8)
+#define PPM_HASH_SIZE (1 << PPM_HASH_SHIFT)
+#define PPM_HASH_MASK (PPM_HASH_SIZE - 1)
+static struct process_pid_map *ppm_hash_table[PPM_HASH_SIZE];
+
struct per_process_info {
- char name[16];
- __u32 pid;
+ struct process_pid_map *ppm;
struct io_stats io_stats;
struct per_process_info *hash_next, *list_next;
int more_than_one;
struct blk_io_trace *bit;
struct rb_node rb_node;
struct trace *next;
+ unsigned long read_sequence;
};
static struct rb_root rb_sort_root;
struct io_track {
struct rb_node rb_node;
+ struct process_pid_map *ppm;
__u64 sector;
- __u32 pid;
- char comm[16];
unsigned long long allocation_time;
unsigned long long queue_time;
unsigned long long dispatch_time;
static int ndevices;
static struct per_dev_info *devices;
static char *get_dev_name(struct per_dev_info *, char *, int);
+static int trace_rb_insert_last(struct per_dev_info *, struct trace *);
FILE *ofp = NULL;
static char *output_name;
static unsigned long long genesis_time;
static unsigned long long last_allowed_time;
-static unsigned int smallest_seq_read;
static unsigned long long stopwatch_start; /* start from zero by default */
static unsigned long long stopwatch_end = -1ULL; /* "infinity" */
+static unsigned long read_sequence;
static int per_process_stats;
static int per_device_and_cpu_stats = 1;
static int verbose;
static unsigned int act_mask = -1U;
static int stats_printed;
+int data_is_native = -1;
static unsigned int t_alloc_cache;
static unsigned int bit_alloc_cache;
#define CPU_IDX(cpu) ((cpu) / CPUS_PER_LONG)
#define CPU_BIT(cpu) ((cpu) & (CPUS_PER_LONG - 1))
-static void insert_skip(struct per_dev_info *pdi, unsigned long start,
+static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
+{
+ struct per_cpu_info *cpus = pdi->cpus;
+ int ncpus = pdi->ncpus;
+ int new_count = cpu + 1;
+ int new_space, size;
+ char *new_start;
+
+ size = new_count * sizeof(struct per_cpu_info);
+ cpus = realloc(cpus, size);
+ if (!cpus) {
+ char name[20];
+ fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
+ get_dev_name(pdi, name, sizeof(name)), size);
+ exit(1);
+ }
+
+ new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
+ new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
+ memset(new_start, 0, new_space);
+
+ pdi->ncpus = new_count;
+ pdi->cpus = cpus;
+
+ for (new_count = 0; new_count < pdi->ncpus; new_count++) {
+ struct per_cpu_info *pci = &pdi->cpus[new_count];
+
+ if (!pci->fd) {
+ pci->fd = -1;
+ memset(&pci->rb_last, 0, sizeof(pci->rb_last));
+ pci->rb_last_entries = 0;
+ pci->last_sequence = -1;
+ }
+ }
+}
+
+static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
+{
+ struct per_cpu_info *pci;
+
+ if (cpu >= pdi->ncpus)
+ resize_cpu_info(pdi, cpu);
+
+ pci = &pdi->cpus[cpu];
+ pci->cpu = cpu;
+ return pci;
+}
+
+
+static int resize_devices(char *name)
+{
+ int size = (ndevices + 1) * sizeof(struct per_dev_info);
+
+ devices = realloc(devices, size);
+ if (!devices) {
+ fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
+ return 1;
+ }
+ memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
+ devices[ndevices].name = name;
+ ndevices++;
+ return 0;
+}
+
+static struct per_dev_info *get_dev_info(dev_t dev)
+{
+ struct per_dev_info *pdi;
+ int i;
+
+ for (i = 0; i < ndevices; i++) {
+ if (!devices[i].dev)
+ devices[i].dev = dev;
+ if (devices[i].dev == dev)
+ return &devices[i];
+ }
+
+ if (resize_devices(NULL))
+ return NULL;
+
+ pdi = &devices[ndevices - 1];
+ pdi->dev = dev;
+ pdi->first_reported_time = 0;
+ pdi->last_read_time = 0;
+
+ return pdi;
+}
+
+static void insert_skip(struct per_cpu_info *pci, unsigned long start,
unsigned long end)
{
struct skip_info *sip;
- for (sip = pdi->skips_tail; sip != NULL; sip = sip->prev) {
+ for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
if (end == (sip->start - 1)) {
sip->start = start;
return;
sip->start = start;
sip->end = end;
sip->prev = sip->next = NULL;
- if (pdi->skips_tail == NULL)
- pdi->skips_head = pdi->skips_tail = sip;
+ if (pci->skips_tail == NULL)
+ pci->skips_head = pci->skips_tail = sip;
else {
- sip->prev = pdi->skips_tail;
- pdi->skips_tail->next = sip;
- pdi->skips_tail = sip;
+ sip->prev = pci->skips_tail;
+ pci->skips_tail->next = sip;
+ pci->skips_tail = sip;
}
}
-static void remove_sip(struct per_dev_info *pdi, struct skip_info *sip)
+static void remove_sip(struct per_cpu_info *pci, struct skip_info *sip)
{
if (sip->prev == NULL) {
if (sip->next == NULL)
- pdi->skips_head = pdi->skips_tail = NULL;
+ pci->skips_head = pci->skips_tail = NULL;
else {
- pdi->skips_head = sip->next;
+ pci->skips_head = sip->next;
sip->next->prev = NULL;
}
} else if (sip->next == NULL) {
- pdi->skips_tail = sip->prev;
+ pci->skips_tail = sip->prev;
sip->prev->next = NULL;
} else {
sip->prev->next = sip->next;
}
#define IN_SKIP(sip,seq) (((sip)->start <= (seq)) && ((seq) <= sip->end))
-static int check_current_skips(struct per_dev_info *pdi, unsigned long seq)
+static int check_current_skips(struct per_cpu_info *pci, unsigned long seq)
{
struct skip_info *sip;
- for (sip = pdi->skips_tail; sip != NULL; sip = sip->prev) {
- if (IN_SKIP(sip,seq)) {
+ for (sip = pci->skips_tail; sip != NULL; sip = sip->prev) {
+ if (IN_SKIP(sip, seq)) {
if (sip->start == seq) {
if (sip->end == seq)
- remove_sip(pdi,sip);
+ remove_sip(pci, sip);
else
sip->start += 1;
} else if (sip->end == seq)
sip->end -= 1;
else {
sip->end = seq - 1;
- insert_skip(pdi,seq+1,sip->end);
+ insert_skip(pci, seq + 1, sip->end);
}
return 1;
}
}
+
return 0;
}
static void collect_pdi_skips(struct per_dev_info *pdi)
{
struct skip_info *sip;
+ int cpu;
pdi->skips = 0;
pdi->seq_skips = 0;
- for (sip = pdi->skips_head; sip != NULL; sip = sip->next) {
- pdi->skips += 1;
- pdi->seq_skips += (sip->end - sip->start + 1);
- if (verbose)
- fprintf(stderr, "(%d,%d): skipping %lu -> %lu\n",
- MAJOR(pdi->dev), MINOR(pdi->dev),
- sip->start, sip->end);
+
+ for (cpu = 0; cpu < pdi->ncpus; cpu++) {
+ struct per_cpu_info *pci = &pdi->cpus[cpu];
+
+ for (sip = pci->skips_head; sip != NULL; sip = sip->next) {
+ pdi->skips++;
+ pdi->seq_skips += (sip->end - sip->start + 1);
+ if (verbose)
+ fprintf(stderr,"(%d,%d): skipping %lu -> %lu\n",
+ MAJOR(pdi->dev), MINOR(pdi->dev),
+ sip->start, sip->end);
+ }
}
}
return (pdi->cpu_map[CPU_IDX(cpu)] & (1UL << CPU_BIT(cpu))) != 0;
}
-static inline int ppi_hash_pid(__u32 pid)
+static inline int ppm_hash_pid(pid_t pid)
+{
+ return jhash_1word(pid, JHASH_RANDOM) & PPM_HASH_MASK;
+}
+
+static struct process_pid_map *find_ppm(pid_t pid)
+{
+ const int hash_idx = ppm_hash_pid(pid);
+ struct process_pid_map *ppm;
+
+ ppm = ppm_hash_table[hash_idx];
+ while (ppm) {
+ if (ppm->pid == pid)
+ return ppm;
+
+ ppm = ppm->hash_next;
+ }
+
+ return NULL;
+}
+
+static void add_ppm_hash(pid_t pid, const char *name)
+{
+ const int hash_idx = ppm_hash_pid(pid);
+ struct process_pid_map *ppm;
+
+ ppm = find_ppm(pid);
+ if (!ppm) {
+ ppm = malloc(sizeof(*ppm));
+ memset(ppm, 0, sizeof(*ppm));
+ ppm->pid = pid;
+ strcpy(ppm->comm, name);
+ ppm->hash_next = ppm_hash_table[hash_idx];
+ ppm_hash_table[hash_idx] = ppm;
+ }
+}
+
+char *find_process_name(pid_t pid)
+{
+ struct process_pid_map *ppm = find_ppm(pid);
+
+ if (ppm)
+ return ppm->comm;
+
+ return NULL;
+}
+
+static inline int ppi_hash_pid(pid_t pid)
{
return jhash_1word(pid, JHASH_RANDOM) & PPI_HASH_MASK;
}
static inline int ppi_hash(struct per_process_info *ppi)
{
+ struct process_pid_map *ppm = ppi->ppm;
+
if (ppi_hash_by_pid)
- return ppi_hash_pid(ppi->pid);
+ return ppi_hash_pid(ppm->pid);
- return ppi_hash_name(ppi->name);
+ return ppi_hash_name(ppm->comm);
}
-static inline void add_process_to_hash(struct per_process_info *ppi)
+static inline void add_ppi_to_hash(struct per_process_info *ppi)
{
const int hash_idx = ppi_hash(ppi);
ppi_hash_table[hash_idx] = ppi;
}
-static inline void add_process_to_list(struct per_process_info *ppi)
+static inline void add_ppi_to_list(struct per_process_info *ppi)
{
ppi->list_next = ppi_list;
ppi_list = ppi;
ppi_list_entries++;
}
-static struct per_process_info *find_process_by_name(char *name)
+static struct per_process_info *find_ppi_by_name(char *name)
{
const int hash_idx = ppi_hash_name(name);
struct per_process_info *ppi;
ppi = ppi_hash_table[hash_idx];
while (ppi) {
- if (!strcmp(ppi->name, name))
+ struct process_pid_map *ppm = ppi->ppm;
+
+ if (!strcmp(ppm->comm, name))
return ppi;
ppi = ppi->hash_next;
return NULL;
}
-static struct per_process_info *find_process_by_pid(__u32 pid)
+static struct per_process_info *find_ppi_by_pid(pid_t pid)
{
const int hash_idx = ppi_hash_pid(pid);
struct per_process_info *ppi;
ppi = ppi_hash_table[hash_idx];
while (ppi) {
- if (ppi->pid == pid)
+ struct process_pid_map *ppm = ppi->ppm;
+
+ if (ppm->pid == pid)
return ppi;
ppi = ppi->hash_next;
return NULL;
}
-static struct per_process_info *find_process(__u32 pid, char *name)
+static struct per_process_info *find_ppi(pid_t pid)
{
struct per_process_info *ppi;
+ char *name;
if (ppi_hash_by_pid)
- return find_process_by_pid(pid);
+ return find_ppi_by_pid(pid);
- ppi = find_process_by_name(name);
- if (ppi && ppi->pid != pid)
+ name = find_process_name(pid);
+ if (!name)
+ return NULL;
+
+ ppi = find_ppi_by_name(name);
+ if (ppi && ppi->ppm->pid != pid)
ppi->more_than_one = 1;
return ppi;
}
-static inline int trace_rb_insert(struct trace *t, struct rb_root *root,
- int check_time)
+/*
+ * struct trace and blktrace allocation cache, we do potentially
+ * millions of mallocs for these structures while only using at most
+ * a few thousand at the time
+ */
+static inline void t_free(struct trace *t)
+{
+ if (t_alloc_cache < 1024) {
+ t->next = t_alloc_list;
+ t_alloc_list = t;
+ t_alloc_cache++;
+ } else
+ free(t);
+}
+
+static inline struct trace *t_alloc(void)
+{
+ struct trace *t = t_alloc_list;
+
+ if (t) {
+ t_alloc_list = t->next;
+ t_alloc_cache--;
+ return t;
+ }
+
+ return malloc(sizeof(*t));
+}
+
+static inline void bit_free(struct blk_io_trace *bit)
+{
+ if (bit_alloc_cache < 1024 && !bit->pdu_len) {
+ /*
+ * abuse a 64-bit field for a next pointer for the free item
+ */
+ bit->time = (__u64) (unsigned long) bit_alloc_list;
+ bit_alloc_list = (struct blk_io_trace *) bit;
+ bit_alloc_cache++;
+ } else
+ free(bit);
+}
+
+static inline struct blk_io_trace *bit_alloc(void)
+{
+ struct blk_io_trace *bit = bit_alloc_list;
+
+ if (bit) {
+ bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
+ bit->time;
+ bit_alloc_cache--;
+ return bit;
+ }
+
+ return malloc(sizeof(*bit));
+}
+
+static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
+{
+ struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
+
+ rb_erase(&t->rb_node, &pci->rb_last);
+ pci->rb_last_entries--;
+
+ bit_free(t->bit);
+ t_free(t);
+}
+
+static void put_trace(struct per_dev_info *pdi, struct trace *t)
+{
+ rb_erase(&t->rb_node, &rb_sort_root);
+ rb_sort_entries--;
+
+ trace_rb_insert_last(pdi, t);
+}
+
+static inline int trace_rb_insert(struct trace *t, struct rb_root *root)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
__t = rb_entry(parent, struct trace, rb_node);
- if (check_time) {
- if (t->bit->time < __t->bit->time) {
- p = &(*p)->rb_left;
- continue;
- } else if (t->bit->time > __t->bit->time) {
- p = &(*p)->rb_right;
- continue;
- }
- }
- if (t->bit->device < __t->bit->device)
+ if (t->bit->time < __t->bit->time)
+ p = &(*p)->rb_left;
+ else if (t->bit->time > __t->bit->time)
+ p = &(*p)->rb_right;
+ else if (t->bit->device < __t->bit->device)
p = &(*p)->rb_left;
else if (t->bit->device > __t->bit->device)
p = &(*p)->rb_right;
static inline int trace_rb_insert_sort(struct trace *t)
{
- if (!trace_rb_insert(t, &rb_sort_root, 1)) {
+ if (!trace_rb_insert(t, &rb_sort_root)) {
rb_sort_entries++;
return 0;
}
return 1;
}
-static inline int trace_rb_insert_last(struct per_dev_info *pdi,struct trace *t)
+static int trace_rb_insert_last(struct per_dev_info *pdi, struct trace *t)
{
- if (!trace_rb_insert(t, &pdi->rb_last, 1)) {
- pdi->rb_last_entries++;
- return 0;
+ struct per_cpu_info *pci = get_cpu_info(pdi, t->bit->cpu);
+
+ if (trace_rb_insert(t, &pci->rb_last))
+ return 1;
+
+ pci->rb_last_entries++;
+
+ if (pci->rb_last_entries > rb_batch * pdi->nfiles) {
+ struct rb_node *n = rb_first(&pci->rb_last);
+
+ t = rb_entry(n, struct trace, rb_node);
+ __put_trace_last(pdi, t);
}
- return 1;
+ return 0;
}
static struct trace *trace_rb_find(dev_t device, unsigned long sequence,
return NULL;
}
-static inline struct trace *trace_rb_find_sort(dev_t dev, unsigned long seq)
-{
- return trace_rb_find(dev, seq, &rb_sort_root, 1);
-}
-
static inline struct trace *trace_rb_find_last(struct per_dev_info *pdi,
+ struct per_cpu_info *pci,
unsigned long seq)
{
- return trace_rb_find(pdi->dev, seq, &pdi->rb_last, 0);
+ return trace_rb_find(pdi->dev, seq, &pci->rb_last, 0);
}
static inline int track_rb_insert(struct per_dev_info *pdi,struct io_track *iot)
return NULL;
}
-static struct io_track *find_track(struct per_dev_info *pdi, __u32 pid,
- char *comm, __u64 sector)
+static struct io_track *find_track(struct per_dev_info *pdi, pid_t pid,
+ __u64 sector)
{
struct io_track *iot;
iot = __find_track(pdi, sector);
if (!iot) {
iot = malloc(sizeof(*iot));
- iot->pid = pid;
- memcpy(iot->comm, comm, sizeof(iot->comm));
+ iot->ppm = find_ppm(pid);
iot->sector = sector;
track_rb_insert(pdi, iot);
}
if (!track_ios)
return;
- iot = find_track(pdi, t->pid, t->comm, t->sector);
+ iot = find_track(pdi, t->pid, t->sector);
iot->allocation_time = t->time;
}
+static inline int is_remapper(struct per_dev_info *pdi)
+{
+ int major = MAJOR(pdi->dev);
+
+ return (major == 253 || major == 9);
+}
+
+/*
+ * for md/dm setups, the interesting cycle is Q -> C. So track queueing
+ * time here, as dispatch time
+ */
+static void log_track_queue(struct per_dev_info *pdi, struct blk_io_trace *t)
+{
+ struct io_track *iot;
+
+ if (!track_ios)
+ return;
+ if (!is_remapper(pdi))
+ return;
+
+ iot = find_track(pdi, t->pid, t->sector);
+ iot->dispatch_time = t->time;
+}
+
/*
* return time between rq allocation and insertion
*/
if (!track_ios)
return -1;
- iot = find_track(pdi, t->pid, t->comm, t->sector);
+ iot = find_track(pdi, t->pid, t->sector);
iot->queue_time = t->time;
if (!iot->allocation_time)
elapsed = iot->queue_time - iot->allocation_time;
if (per_process_stats) {
- struct per_process_info *ppi = find_process(iot->pid,iot->comm);
+ struct per_process_info *ppi = find_ppi(iot->ppm->pid);
int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (ppi && elapsed > ppi->longest_allocation_wait[w])
elapsed = iot->dispatch_time - iot->queue_time;
if (per_process_stats) {
- struct per_process_info *ppi = find_process(iot->pid,iot->comm);
+ struct per_process_info *ppi = find_ppi(iot->ppm->pid);
int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (ppi && elapsed > ppi->longest_dispatch_wait[w])
if (!track_ios)
return -1;
- if ((t->action & BLK_TC_ACT(BLK_TC_FS)) == 0)
- return -1;
iot = __find_track(pdi, t->sector);
if (!iot) {
elapsed = iot->completion_time - iot->dispatch_time;
if (per_process_stats) {
- struct per_process_info *ppi = find_process(iot->pid,iot->comm);
+ struct per_process_info *ppi = find_ppi(iot->ppm->pid);
int w = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (ppi && elapsed > ppi->longest_completion_wait[w])
}
-static struct io_stats *find_process_io_stats(__u32 pid, char *name)
+static struct io_stats *find_process_io_stats(pid_t pid)
{
- struct per_process_info *ppi = find_process(pid, name);
+ struct per_process_info *ppi = find_ppi(pid);
if (!ppi) {
ppi = malloc(sizeof(*ppi));
memset(ppi, 0, sizeof(*ppi));
- memcpy(ppi->name, name, 16);
- ppi->pid = pid;
- add_process_to_hash(ppi);
- add_process_to_list(ppi);
+ ppi->ppm = find_ppm(pid);
+ add_ppi_to_hash(ppi);
+ add_ppi_to_list(ppi);
}
return &ppi->io_stats;
}
-static void resize_cpu_info(struct per_dev_info *pdi, int cpu)
-{
- struct per_cpu_info *cpus = pdi->cpus;
- int ncpus = pdi->ncpus;
- int new_count = cpu + 1;
- int new_space, size;
- char *new_start;
-
- size = new_count * sizeof(struct per_cpu_info);
- cpus = realloc(cpus, size);
- if (!cpus) {
- char name[20];
- fprintf(stderr, "Out of memory, CPU info for device %s (%d)\n",
- get_dev_name(pdi, name, sizeof(name)), size);
- exit(1);
- }
-
- new_start = (char *)cpus + (ncpus * sizeof(struct per_cpu_info));
- new_space = (new_count - ncpus) * sizeof(struct per_cpu_info);
- memset(new_start, 0, new_space);
-
- pdi->ncpus = new_count;
- pdi->cpus = cpus;
-
- for (new_count = 0; new_count < pdi->ncpus; new_count++)
- if (!pdi->cpus[new_count].fd)
- pdi->cpus[new_count].fd = -1;
-}
-
-static struct per_cpu_info *get_cpu_info(struct per_dev_info *pdi, int cpu)
-{
- struct per_cpu_info *pci;
-
- if (cpu >= pdi->ncpus)
- resize_cpu_info(pdi, cpu);
-
- pci = &pdi->cpus[cpu];
- pci->cpu = cpu;
- return pci;
-}
-
-
-static int resize_devices(char *name)
-{
- int size = (ndevices + 1) * sizeof(struct per_dev_info);
-
- devices = realloc(devices, size);
- if (!devices) {
- fprintf(stderr, "Out of memory, device %s (%d)\n", name, size);
- return 1;
- }
- memset(&devices[ndevices], 0, sizeof(struct per_dev_info));
- devices[ndevices].name = name;
- ndevices++;
- return 0;
-}
-
-static struct per_dev_info *get_dev_info(dev_t dev)
-{
- struct per_dev_info *pdi;
- int i;
-
- for (i = 0; i < ndevices; i++) {
- if (!devices[i].dev)
- devices[i].dev = dev;
- if (devices[i].dev == dev)
- return &devices[i];
- }
-
- if (resize_devices(NULL))
- return NULL;
-
- pdi = &devices[ndevices - 1];
- pdi->dev = dev;
- pdi->first_reported_time = 0;
- pdi->last_sequence = -1;
- pdi->last_read_time = 0;
- memset(&pdi->rb_last, 0, sizeof(pdi->rb_last));
- pdi->rb_last_entries = 0;
-
- pdi->skips_head = pdi->skips_tail = NULL;
-
- return pdi;
-}
-
static char *get_dev_name(struct per_dev_info *pdi, char *buffer, int size)
{
if (pdi->name)
__account_m(&pci->io_stats, t, rw);
if (per_process_stats) {
- struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
+ struct io_stats *ios = find_process_io_stats(t->pid);
__account_m(ios, t, rw);
}
__account_queue(&pci->io_stats, t, rw);
if (per_process_stats) {
- struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
+ struct io_stats *ios = find_process_io_stats(t->pid);
__account_queue(ios, t, rw);
}
__account_c(&pci->io_stats, rw, bytes);
if (per_process_stats) {
- struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
+ struct io_stats *ios = find_process_io_stats(t->pid);
__account_c(ios, rw, bytes);
}
__account_issue(&pci->io_stats, rw, t->bytes);
if (per_process_stats) {
- struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
+ struct io_stats *ios = find_process_io_stats(t->pid);
__account_issue(ios, rw, t->bytes);
}
__account_unplug(&pci->io_stats, timer);
if (per_process_stats) {
- struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
+ struct io_stats *ios = find_process_io_stats(t->pid);
__account_unplug(ios, timer);
}
__account_requeue(&pci->io_stats, t, rw);
if (per_process_stats) {
- struct io_stats *ios = find_process_io_stats(t->pid, t->comm);
+ struct io_stats *ios = find_process_io_stats(t->pid);
__account_requeue(ios, t, rw);
}
switch (act) {
case __BLK_TA_QUEUE:
+ log_track_queue(pdi, t);
account_queue(t, pci, w);
log_queue(pci, t, "Q");
break;
log_generic(pci, t, "S");
break;
case __BLK_TA_REQUEUE:
- pdi->cur_depth[w]--;
+ /*
+ * can happen if we miss traces, don't let it go
+ * below zero
+ */
+ if (pdi->cur_depth[w])
+ pdi->cur_depth[w]--;
account_requeue(t, pci, w);
log_queue(pci, t, "R");
break;
log_issue(pdi, pci, t, "D");
break;
case __BLK_TA_COMPLETE:
- pdi->cur_depth[w]--;
+ if (pdi->cur_depth[w])
+ pdi->cur_depth[w]--;
account_c(t, pci, w, t->bytes);
log_complete(pdi, pci, t, "C");
break;
struct per_process_info *ppi2 = *((struct per_process_info **) p2);
int res;
- res = strverscmp(ppi1->name, ppi2->name);
+ res = strverscmp(ppi1->ppm->comm, ppi2->ppm->comm);
if (!res)
- res = ppi1->pid > ppi2->pid;
+ res = ppi1->ppm->pid > ppi2->ppm->pid;
return res;
}
ppi = ppi_list;
while (ppi) {
+ struct process_pid_map *ppm = ppi->ppm;
char name[64];
if (ppi->more_than_one)
- sprintf(name, "%s (%u, ...)", ppi->name, ppi->pid);
+ sprintf(name, "%s (%u, ...)", ppm->comm, ppm->pid);
else
- sprintf(name, "%s (%u)", ppi->name, ppi->pid);
+ sprintf(name, "%s (%u)", ppm->comm, ppm->pid);
dump_io_stats(NULL, &ppi->io_stats, name);
dump_wait_stats(ppi);
}
}
-/*
- * struct trace and blktrace allocation cache, we do potentially
- * millions of mallocs for these structures while only using at most
- * a few thousand at the time
- */
-static inline void t_free(struct trace *t)
-{
- if (t_alloc_cache < 1024) {
- t->next = t_alloc_list;
- t_alloc_list = t;
- t_alloc_cache++;
- } else
- free(t);
-}
-
-static inline struct trace *t_alloc(void)
-{
- struct trace *t = t_alloc_list;
-
- if (t) {
- t_alloc_list = t->next;
- t_alloc_cache--;
- return t;
- }
-
- return malloc(sizeof(*t));
-}
-
-static inline void bit_free(struct blk_io_trace *bit)
-{
- if (bit_alloc_cache < 1024 && !bit->pdu_len) {
- /*
- * abuse a 64-bit field for a next pointer for the free item
- */
- bit->time = (__u64) (unsigned long) bit_alloc_list;
- bit_alloc_list = (struct blk_io_trace *) bit;
- bit_alloc_cache++;
- } else
- free(bit);
-}
-
-static inline struct blk_io_trace *bit_alloc(void)
-{
- struct blk_io_trace *bit = bit_alloc_list;
-
- if (bit) {
- bit_alloc_list = (struct blk_io_trace *) (unsigned long) \
- bit->time;
- bit_alloc_cache--;
- return bit;
- }
-
- return malloc(sizeof(*bit));
-}
-
static void find_genesis(void)
{
struct trace *t = trace_list;
*/
static int sort_entries(unsigned long long *youngest)
{
+ struct per_dev_info *pdi = NULL;
+ struct per_cpu_info *pci = NULL;
struct trace *t;
if (!genesis_time)
if (bit->time < *youngest || !*youngest)
*youngest = bit->time;
- if (bit->sequence < smallest_seq_read)
- smallest_seq_read = bit->sequence;
+ if (!pdi || pdi->dev != bit->device) {
+ pdi = get_dev_info(bit->device);
+ pci = NULL;
+ }
+
+ if (!pci || pci->cpu != bit->cpu)
+ pci = get_cpu_info(pdi, bit->cpu);
+
+ if (bit->sequence < pci->smallest_seq_read)
+ pci->smallest_seq_read = bit->sequence;
if (check_stopwatch(bit)) {
bit_free(bit);
return 0;
}
-static inline void __put_trace_last(struct per_dev_info *pdi, struct trace *t)
-{
- rb_erase(&t->rb_node, &pdi->rb_last);
- pdi->rb_last_entries--;
-
- bit_free(t->bit);
- t_free(t);
-}
-
-static void put_trace(struct per_dev_info *pdi, struct trace *t)
-{
- rb_erase(&t->rb_node, &rb_sort_root);
- rb_sort_entries--;
-
- trace_rb_insert_last(pdi, t);
-
- if (pdi->rb_last_entries > rb_batch * pdi->nfiles) {
- struct rb_node *n = rb_first(&pdi->rb_last);
-
- t = rb_entry(n, struct trace, rb_node);
- __put_trace_last(pdi, t);
- }
-}
-
/*
* to continue, we must have traces from all online cpus in the tree
*/
static int check_sequence(struct per_dev_info *pdi, struct trace *t, int force)
{
- unsigned long expected_sequence = pdi->last_sequence + 1;
struct blk_io_trace *bit = t->bit;
+ unsigned long expected_sequence;
+ struct per_cpu_info *pci;
struct trace *__t;
+ pci = get_cpu_info(pdi, bit->cpu);
+ expected_sequence = pci->last_sequence + 1;
+
if (!expected_sequence) {
/*
* 1 should be the first entry, just allow it
*/
if (bit->sequence == 1)
return 0;
- if (bit->sequence == smallest_seq_read)
+ if (bit->sequence == pci->smallest_seq_read)
return 0;
return check_cpu_map(pdi);
* we may not have seen that sequence yet. if we are not doing
* the final run, break and wait for more entries.
*/
- if (expected_sequence < smallest_seq_read) {
- __t = trace_rb_find_last(pdi, expected_sequence);
+ if (expected_sequence < pci->smallest_seq_read) {
+ __t = trace_rb_find_last(pdi, pci, expected_sequence);
if (!__t)
goto skip;
return 1;
} else {
skip:
- if (check_current_skips(pdi,bit->sequence))
+ if (check_current_skips(pci, bit->sequence))
return 0;
if (expected_sequence < bit->sequence)
- insert_skip(pdi, expected_sequence, bit->sequence - 1);
+ insert_skip(pci, expected_sequence, bit->sequence - 1);
return 0;
}
}
t = rb_entry(n, struct trace, rb_node);
bit = t->bit;
- if (!pdi || pdi->dev != bit->device)
+ if (read_sequence - t->read_sequence < 1 && !force)
+ break;
+
+ if (!pdi || pdi->dev != bit->device) {
pdi = get_dev_info(bit->device);
+ pci = NULL;
+ }
if (!pdi) {
fprintf(stderr, "Unknown device ID? (%d,%d)\n",
if (!force && bit->time > last_allowed_time)
break;
- pdi->last_sequence = bit->sequence;
-
check_time(pdi, bit);
if (!pci || pci->cpu != bit->cpu)
pci = get_cpu_info(pdi, bit->cpu);
+ pci->last_sequence = bit->sequence;
+
pci->nelems++;
- if (bit->action & (act_mask << BLK_TC_SHIFT))
+ if (bit->action & (act_mask << BLK_TC_SHIFT))
dump_trace(bit, pci, pdi);
put_trace(pdi, t);
}
}
-static int read_data(int fd, void *buffer, int bytes, int block)
+static int read_data(int fd, void *buffer, int bytes, int block, int *fdblock)
{
int ret, bytes_left, fl;
void *p;
- fl = fcntl(fd, F_GETFL);
+ if (block != *fdblock) {
+ fl = fcntl(fd, F_GETFL);
- if (!block)
- fcntl(fd, F_SETFL, fl | O_NONBLOCK);
- else
- fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
+ if (!block) {
+ *fdblock = 0;
+ fcntl(fd, F_SETFL, fl | O_NONBLOCK);
+ } else {
+ *fdblock = 1;
+ fcntl(fd, F_SETFL, fl & ~O_NONBLOCK);
+ }
+ }
bytes_left = bytes;
p = buffer;
if (!ret)
return 1;
else if (ret < 0) {
- if (errno != EAGAIN)
+ if (errno != EAGAIN) {
perror("read");
+ return -1;
+ }
- return -1;
+ /*
+ * never do partial reads. we can return if we
+ * didn't read anything and we should not block,
+ * otherwise wait for data
+ */
+ if ((bytes_left == bytes) && !block)
+ return 1;
+
+ usleep(10);
+ continue;
} else {
p += ret;
bytes_left -= ret;
return 0;
}
-static int read_events(int fd, int always_block)
+static inline __u16 get_pdulen(struct blk_io_trace *bit)
+{
+ if (data_is_native)
+ return bit->pdu_len;
+
+ return __bswap_16(bit->pdu_len);
+}
+
+static inline __u32 get_magic(struct blk_io_trace *bit)
+{
+ if (data_is_native)
+ return bit->magic;
+
+ return __bswap_32(bit->magic);
+}
+
+static int read_events(int fd, int always_block, int *fdblock)
{
struct per_dev_info *pdi = NULL;
unsigned int events = 0;
while (!is_done() && events < rb_batch) {
struct blk_io_trace *bit;
struct trace *t;
- int pdu_len;
+ int pdu_len, should_block, ret;
__u32 magic;
bit = bit_alloc();
- if (read_data(fd, bit, sizeof(*bit), !events || always_block)) {
+ should_block = !events || always_block;
+
+ ret = read_data(fd, bit, sizeof(*bit), should_block, fdblock);
+ if (ret) {
bit_free(bit);
+ if (!events && ret < 0)
+ events = ret;
break;
}
- magic = be32_to_cpu(bit->magic);
+ /*
+ * look at first trace to check whether we need to convert
+ * data in the future
+ */
+ if (data_is_native == -1 && check_data_endianness(bit->magic))
+ break;
+
+ magic = get_magic(bit);
if ((magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
fprintf(stderr, "Bad magic %x\n", magic);
break;
}
- pdu_len = be16_to_cpu(bit->pdu_len);
+ pdu_len = get_pdulen(bit);
if (pdu_len) {
void *ptr = realloc(bit, sizeof(*bit) + pdu_len);
- if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1)) {
+ if (read_data(fd, ptr + sizeof(*bit), pdu_len, 1, fdblock)) {
bit_free(ptr);
break;
}
continue;
}
+ /*
+ * not a real trace, so grab and handle it here
+ */
+ if (bit->action & BLK_TC_ACT(BLK_TC_NOTIFY)) {
+ add_ppm_hash(bit->pid, (char *) bit + sizeof(*bit));
+ continue;
+ }
+
t = t_alloc();
memset(t, 0, sizeof(*t));
t->bit = bit;
+ t->read_sequence = read_sequence;
t->next = trace_list;
trace_list = t;
for (i = 0; i < ndevices; i++) {
pdi = &devices[i];
pdi->nfiles = 0;
- pdi->last_sequence = -1;
for (j = 0;; j++) {
struct stat st;
pci = get_cpu_info(pdi, j);
pci->cpu = j;
pci->fd = -1;
+ pci->fdblock = -1;
p = strdup(pdi->name);
dname = dirname(p);
events_added = 0;
last_allowed_time = -1ULL;
- smallest_seq_read = -1U;
+ read_sequence++;
for (i = 0; i < ndevices; i++) {
pdi = &devices[i];
+ pdi->last_read_time = -1ULL;
for (j = 0; j < pdi->nfiles; j++) {
if (pci->fd == -1)
continue;
- events = read_events(pci->fd, 1);
- if (!events) {
+ pci->smallest_seq_read = -1;
+
+ events = read_events(pci->fd, 1, &pci->fdblock);
+ if (events <= 0) {
cpu_mark_offline(pdi, pci->cpu);
close(pci->fd);
pci->fd = -1;
static int do_stdin(void)
{
unsigned long long youngest;
- int fd, events;
+ int fd, events, fdblock;
last_allowed_time = -1ULL;
fd = dup(STDIN_FILENO);
return -1;
}
- while ((events = read_events(fd, 0)) != 0) {
+ fdblock = -1;
+ while ((events = read_events(fd, 0, &fdblock)) > 0) {
+ read_sequence++;
+#if 0
smallest_seq_read = -1U;
+#endif
if (sort_entries(&youngest))
break;