2 * blktrace output analysis: generate a timeline & gather statistics
4 * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #define N_DEV_HASH 128
25 #define DEV_HASH(dev) ((MAJOR(dev) ^ MINOR(dev)) & (N_DEV_HASH - 1))
26 struct list_head dev_heads[N_DEV_HASH];
28 static inline void *dip_rb_mkhds(void)
30 size_t len = N_IOP_TYPES * sizeof(struct rb_root);
31 return memset(malloc(len), 0, len);
34 static void __destroy(struct rb_node *n)
37 struct io *iop = rb_entry(n, struct io, rb_node);
39 __destroy(n->rb_left);
40 __destroy(n->rb_right);
45 static void __destroy_heads(struct rb_root *roots)
49 for (i = 0; i < N_IOP_TYPES; i++)
50 __destroy(roots[i].rb_node);
56 void __dump_rb_node(struct rb_node *n)
58 struct io *iop = rb_entry(n, struct io, rb_node);
61 __dump_iop(stdout, iop, 0);
63 __dump_rb_node(n->rb_left);
65 __dump_rb_node(n->rb_right);
68 void __dump_rb_tree(struct d_info *dip, enum iop_type type)
70 struct rb_root *roots = dip->heads;
71 struct rb_root *root = &roots[type];
72 struct rb_node *n = root->rb_node;
75 printf("\tIOP_%c\n", type2c(type));
80 void dump_rb_trees(void)
87 for (i = 0; i < N_DEV_HASH; i++) {
88 __list_for_each(p, &dev_heads[i]) {
89 dip = list_entry(p, struct d_info, hash_head);
90 printf("Trees for %3d,%-3d\n", MAJOR(dip->device),
92 for (type = IOP_Q; type < N_IOP_TYPES; type++)
93 __dump_rb_tree(dip, type);
99 void init_dev_heads(void)
102 for (i = 0; i < N_DEV_HASH; i++)
103 INIT_LIST_HEAD(&dev_heads[i]);
106 struct d_info *__dip_find(__u32 device)
111 __list_for_each(p, &dev_heads[DEV_HASH(device)]) {
112 dip = list_entry(p, struct d_info, hash_head);
113 if (device == dip->device)
123 struct list_head *p, *q;
125 list_for_each_safe(p, q, &all_devs) {
126 dip = list_entry(p, struct d_info, all_head);
128 __destroy_heads(dip->heads);
129 region_exit(&dip->regions);
130 seeki_exit(dip->seek_handle);
131 seeki_exit(dip->q2q_handle);
132 bno_dump_exit(dip->bno_dump_handle);
133 unplug_hist_exit(dip->unplug_hist_handle);
135 q2d_release(dip->q2d_priv);
140 static inline char *mkhandle(char *str, __u32 device, char *post)
142 int mjr = device >> MINORBITS;
143 int mnr = device & ((1 << MINORBITS) - 1);
145 sprintf(str, "%03d,%03d%s", mjr, mnr, post);
149 struct d_info *dip_add(__u32 device, struct io *iop)
151 struct d_info *dip = __dip_find(device);
156 dip = malloc(sizeof(struct d_info));
157 memset(dip, 0, sizeof(*dip));
158 dip->heads = dip_rb_mkhds();
159 region_init(&dip->regions);
160 dip->device = device;
161 dip->last_q = (__u64)-1;
162 dip->map = dev_map_find(device);
163 dip->bno_dump_handle = bno_dump_init(device);
164 dip->unplug_hist_handle = unplug_hist_init(device);
165 dip->seek_handle = seeki_init(mkhandle(str, device, "_d2d"));
166 dip->q2q_handle = seeki_init(mkhandle(str, device, "_q2q"));
168 list_add_tail(&dip->hash_head, &dev_heads[DEV_HASH(device)]);
169 list_add_tail(&dip->all_head, &all_devs);
170 dip->start_time = BIT_TIME(iop->t.time);
171 dip->pre_culling = 1;
173 dip->q2d_priv = q2d_init();
177 if (dip->pre_culling) {
178 if (iop->type == IOP_Q || iop->type == IOP_A)
179 dip->pre_culling = 0;
184 iop->linked = dip_rb_ins(dip, iop);
185 dip->end_time = BIT_TIME(iop->t.time);
195 void dip_rem(struct io *iop)
203 void dip_foreach(struct io *iop, enum iop_type type,
204 void (*fnc)(struct io *iop, struct io *this), int rm_after)
209 struct list_head *p, *q;
211 dip_rb_fe(iop->dip, type, iop, fnc, &head);
212 list_for_each_safe(p, q, &head) {
213 this = list_entry(p, struct io, f_head);
214 LIST_DEL(&this->f_head);
219 dip_rb_fe(iop->dip, type, iop, fnc, NULL);
222 void dip_foreach_list(struct io *iop, enum iop_type type, struct list_head *hd)
224 dip_rb_fe(iop->dip, type, iop, NULL, hd);
227 struct io *dip_find_sec(struct d_info *dip, enum iop_type type, __u64 sec)
229 return dip_rb_find_sec(dip, type, sec);
232 void dip_foreach_out(void (*func)(struct d_info *, void *), void *arg)
234 if (devices == NULL) {
236 __list_for_each(p, &all_devs)
237 func(list_entry(p, struct d_info, all_head), arg);
242 unsigned int mjr, mnr;
245 while (p && ((i = sscanf(p, "%u,%u", &mjr, &mnr)) == 2)) {
246 dip = __dip_find((__u32)((mjr << MINORBITS) | mnr));
257 void dip_plug(__u32 dev, double cur_time)
259 struct d_info *dip = __dip_find(dev);
261 if (!dip || dip->is_plugged) return;
264 dip->last_plug = cur_time;
267 void dip_unplug(__u32 dev, double cur_time, __u64 nios_up)
269 struct d_info *dip = __dip_find(dev);
271 if (dip && dip->is_plugged) {
273 dip->plugged_time += (cur_time - dip->last_plug);
275 dip->nios_up += nios_up;
279 void dip_unplug_tm(__u32 dev, __u64 nios_up)
281 struct d_info *dip = __dip_find(dev);
283 if (dip && dip->is_plugged) {
284 dip->n_timer_unplugs++;
285 dip->nios_upt += nios_up;