Added in running stats for btt
[blktrace.git] / btt / devs.c
... / ...
CommitLineData
1/*
2 * blktrace output analysis: generate a timeline & gather statistics
3 *
4 * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#include <stdio.h>
22#include "globals.h"
23
24#define N_DEV_HASH 128
25#define DEV_HASH(dev) ((MAJOR(dev) ^ MINOR(dev)) & (N_DEV_HASH - 1))
26struct list_head dev_heads[N_DEV_HASH];
27
28static inline void *dip_rb_mkhds(void)
29{
30 size_t len = N_IOP_TYPES * sizeof(struct rb_root);
31 return memset(malloc(len), 0, len);
32}
33
34static void __destroy(struct rb_node *n)
35{
36 if (n) {
37 struct io *iop = rb_entry(n, struct io, rb_node);
38
39 __destroy(n->rb_left);
40 __destroy(n->rb_right);
41 io_release(iop);
42 }
43}
44
45static void __destroy_heads(struct rb_root *roots)
46{
47 int i;
48
49 for (i = 0; i < N_IOP_TYPES; i++)
50 __destroy(roots[i].rb_node);
51
52 free(roots);
53}
54
55void init_dev_heads(void)
56{
57 int i;
58 for (i = 0; i < N_DEV_HASH; i++)
59 INIT_LIST_HEAD(&dev_heads[i]);
60}
61
62struct d_info *__dip_find(__u32 device)
63{
64 struct d_info *dip;
65 struct list_head *p;
66
67 __list_for_each(p, &dev_heads[DEV_HASH(device)]) {
68 dip = list_entry(p, struct d_info, hash_head);
69 if (device == dip->device)
70 return dip;
71 }
72
73 return NULL;
74}
75
76void __dip_exit(struct d_info *dip)
77{
78 list_del(&dip->all_head);
79 __destroy_heads(dip->heads);
80 region_exit(&dip->regions);
81 seeki_free(dip->seek_handle);
82 seeki_free(dip->q2q_handle);
83 aqd_free(dip->aqd_handle);
84 plat_free(dip->q2d_plat_handle);
85 plat_free(dip->q2c_plat_handle);
86 plat_free(dip->d2c_plat_handle);
87 bno_dump_free(dip->bno_dump_handle);
88 unplug_hist_free(dip->up_hist_handle);
89 rstat_free(dip->rstat_handle);
90 if (output_all_data)
91 q2d_free(dip->q2d_priv);
92 if (dip->pit_fp)
93 fclose(dip->pit_fp);
94 free(dip);
95}
96
97void dip_exit(void)
98{
99 struct list_head *p, *q;
100
101 list_for_each_safe(p, q, &all_devs) {
102 struct d_info *dip = list_entry(p, struct d_info, all_head);
103 __dip_exit(dip);
104 }
105}
106
107static inline char *mkhandle(char *str, __u32 device, char *post)
108{
109 int mjr = device >> MINORBITS;
110 int mnr = device & ((1 << MINORBITS) - 1);
111
112 sprintf(str, "%03d,%03d%s", mjr, mnr, post);
113 return str;
114}
115
116static inline FILE *open_pit(char *str)
117{
118 FILE *fp = my_fopen(str, "w");
119
120 if (fp == NULL)
121 perror(str);
122
123 return fp;
124}
125
126struct d_info *dip_alloc(__u32 device, struct io *iop)
127{
128 struct d_info *dip = __dip_find(device);
129
130 if (dip == NULL) {
131 char str[256];
132
133 dip = malloc(sizeof(struct d_info));
134 memset(dip, 0, sizeof(*dip));
135 dip->heads = dip_rb_mkhds();
136 region_init(&dip->regions);
137 dip->device = device;
138 dip->last_q = (__u64)-1;
139 dip->devmap = dev_map_find(device);
140 dip->bno_dump_handle = bno_dump_alloc(device);
141 dip->up_hist_handle = unplug_hist_alloc(device);
142 dip->seek_handle = seeki_alloc(mkhandle(str, device, "_d2d"));
143 dip->q2q_handle = seeki_alloc(mkhandle(str, device, "_q2q"));
144 dip->aqd_handle = aqd_alloc(mkhandle(str, device, "_aqd"));
145 dip->q2d_plat_handle =
146 plat_alloc(mkhandle(str, device, "_q2d_plat"));
147 dip->q2c_plat_handle =
148 plat_alloc(mkhandle(str, device, "_q2c_plat"));
149 dip->d2c_plat_handle =
150 plat_alloc(mkhandle(str, device, "_d2c_plat"));
151 latency_alloc(dip);
152 list_add_tail(&dip->hash_head, &dev_heads[DEV_HASH(device)]);
153 list_add_tail(&dip->all_head, &all_devs);
154 dip->start_time = BIT_TIME(iop->t.time);
155 dip->pre_culling = 1;
156 dip->rstat_handle = rstat_alloc(mkhandle(str, device, ""));
157 if (output_all_data)
158 dip->q2d_priv = q2d_alloc();
159 n_devs++;
160 if (per_io_trees)
161 dip->pit_fp = open_pit(mkhandle(per_io_trees,
162 device, "_pit.dat"));
163 }
164
165 if (dip->pre_culling) {
166 if (iop->type == IOP_Q || iop->type == IOP_A)
167 dip->pre_culling = 0;
168 else
169 return NULL;
170 }
171
172 iop->linked = dip_rb_ins(dip, iop);
173 dip->end_time = BIT_TIME(iop->t.time);
174
175 return dip;
176}
177
178void iop_rem_dip(struct io *iop)
179{
180 if (iop->linked) {
181 dip_rb_rem(iop);
182 iop->linked = 0;
183 }
184}
185
186void dip_foreach(struct io *iop, enum iop_type type,
187 void (*fnc)(struct io *iop, struct io *this), int rm_after)
188{
189 if (rm_after) {
190 LIST_HEAD(head);
191 struct io *this;
192 struct list_head *p, *q;
193
194 dip_rb_fe(iop->dip, type, iop, fnc, &head);
195 list_for_each_safe(p, q, &head) {
196 this = list_entry(p, struct io, f_head);
197 list_del(&this->f_head);
198 io_release(this);
199 }
200 } else
201 dip_rb_fe(iop->dip, type, iop, fnc, NULL);
202}
203
204void dip_foreach_list(struct io *iop, enum iop_type type, struct list_head *hd)
205{
206 dip_rb_fe(iop->dip, type, iop, NULL, hd);
207}
208
209struct io *dip_find_sec(struct d_info *dip, enum iop_type type, __u64 sec)
210{
211 return dip_rb_find_sec(dip, type, sec);
212}
213
214void dip_foreach_out(void (*func)(struct d_info *, void *), void *arg)
215{
216 if (devices == NULL) {
217 struct list_head *p;
218 __list_for_each(p, &all_devs)
219 func(list_entry(p, struct d_info, all_head), arg);
220 } else {
221 int i;
222 struct d_info *dip;
223 unsigned int mjr, mnr;
224 char *p = devices;
225
226 while (p && ((i = sscanf(p, "%u,%u", &mjr, &mnr)) == 2)) {
227 dip = __dip_find((__u32)((mjr << MINORBITS) | mnr));
228 func(dip, arg);
229 p = strchr(p, ';');
230 if (p) p++;
231 }
232 }
233}
234
235void dip_plug(__u32 dev, double cur_time)
236{
237 struct d_info *dip = __dip_find(dev);
238
239 if (dip && !dip->is_plugged) {
240 dip->is_plugged = 1;
241 dip->last_plug = cur_time;
242 }
243}
244
245static inline void unplug(struct d_info *dip, double cur_time)
246{
247 dip->is_plugged = 0;
248 dip->plugged_time += (cur_time - dip->last_plug);
249}
250
251void dip_unplug(__u32 dev, double cur_time, __u64 nios_up)
252{
253 struct d_info *dip = __dip_find(dev);
254
255 if (dip && dip->is_plugged) {
256 dip->nplugs++;
257 dip->nios_up += nios_up;
258 unplug(dip, cur_time);
259 }
260}
261
262void dip_unplug_tm(__u32 dev, double cur_time, __u64 nios_up)
263{
264 struct d_info *dip = __dip_find(dev);
265
266 if (dip && dip->is_plugged) {
267 dip->nios_upt += nios_up;
268 dip->nplugs_t++;
269 unplug(dip, cur_time);
270 }
271}
272
273void dip_cleanup(void)
274{
275 struct list_head *p, *q;
276
277 list_for_each_safe(p, q, &all_devs) {
278 struct d_info *dip = list_entry(p, struct d_info, all_head);
279
280 if (dip->n_qs == 0 && dip->n_ds == 0)
281 __dip_exit(dip);
282 }
283}