[PATCH] Added iostat-style statistics to btt
[blktrace.git] / btt / trace.c
CommitLineData
63eba147
JA
1/*
2 * blktrace output analysis: generate a timeline & gather statistics
3 *
4 * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#include "globals.h"
22
23static inline void release_iop(struct io *iop)
24{
25 if (iop->pdu) free(iop->pdu);
26 IO_FREE(iop);
27}
28
29struct io *dip_find_exact(struct list_head *head, struct io *iop_in)
30{
31 struct io *iop;
32 struct list_head *p;
33
97d13fb0 34 if (head != NULL) __list_for_each(p, head) {
63eba147
JA
35 iop = list_entry(p, struct io, dev_head);
36 if (is_bit(iop_in, iop))
37 return iop;
38 }
39 return NULL;
40}
41
42struct io *dip_find_in(struct list_head *head, struct io *iop_in)
43{
44 struct io *iop;
45 struct list_head *p;
46
97d13fb0 47 if (head != NULL) __list_for_each(p, head) {
63eba147
JA
48 iop = list_entry(p, struct io, dev_head);
49 if (in_bit(iop, iop_in))
50 return iop;
51 }
52 return NULL;
53}
54
55struct io *dip_find_start(struct list_head *head, __u64 sector)
56{
57 struct io *iop;
58 struct list_head *p;
59
97d13fb0 60 if (head != NULL) __list_for_each(p, head) {
63eba147
JA
61 iop = list_entry(p, struct io, dev_head);
62 if (BIT_START(iop) == sector)
63 return iop;
64 }
65 return NULL;
66}
67
68struct io *dip_find_end(struct list_head *head, __u64 sector)
69{
70 struct io *iop;
71 struct list_head *p;
72
97d13fb0 73 if (head != NULL) __list_for_each(p, head) {
63eba147
JA
74 iop = list_entry(p, struct io, dev_head);
75 if (BIT_END(iop) == sector)
76 return iop;
77 }
78 return NULL;
79}
80
81struct io *dip_find_in_sec(struct list_head *head, __u64 sector)
82{
83 struct io *iop;
84 struct list_head *p;
85
97d13fb0 86 if (head != NULL) __list_for_each(p, head) {
63eba147
JA
87 iop = list_entry(p, struct io, dev_head);
88 if (BIT_START(iop) <= sector && sector <= BIT_END(iop))
89 return iop;
90 }
91 return NULL;
92}
93
94struct io *dip_find_qa(struct list_head *head, struct blk_io_trace *t)
95{
96 struct io *iop;
97 struct list_head *p;
98
97d13fb0 99 if (head != NULL) __list_for_each(p, head) {
63eba147
JA
100 iop = list_entry(p, struct io, dev_head);
101 if (iop->t.cpu == t->cpu && iop->t.sequence == (t->sequence-1))
102 return iop;
103 }
104 return NULL;
105}
106
107void dip_add_ms(struct list_head *head, struct io *d_iop)
108{
109 struct io *m_iop;
110 struct list_head *p;
111 struct io_list *iolp;
112
97d13fb0 113 if (head != NULL) __list_for_each(p, head) {
63eba147
JA
114 m_iop = list_entry(p, struct io, dev_head);
115 if (in_bit(m_iop, d_iop)) {
116 iolp = malloc(sizeof(*iolp));
117 io_link(&iolp->iop, m_iop);
118 list_add_tail(&iolp->head, &d_iop->u.d.d_im_head);
119 }
120 }
121}
122
123void dip_add_qs(struct list_head *head, struct io *i_iop)
124{
125 struct io *q_iop;
126 struct list_head *p;
127 struct io_list *iolp;
128
97d13fb0 129 if (head != NULL) __list_for_each(p, head) {
63eba147
JA
130 q_iop = list_entry(p, struct io, dev_head);
131 if (in_bit(q_iop, i_iop)) {
132 iolp = malloc(sizeof(*iolp));
133 io_link(&iolp->iop, q_iop);
134 list_add_tail(&iolp->head, &i_iop->u.i.i_qs_head);
135 }
136 }
137}
138
139void handle_queue(struct io *iop)
140{
141 struct io *tmp;
142
143 io_setup(iop, IOP_Q);
144
145 update_lq(&last_q, &all_avgs.q2q, iop->t.time);
146 update_qregion(&all_regions, iop->t.time);
147 dip_update_q(iop->dip, iop);
148 pip_update_q(iop);
149
150 tmp = dip_find_exact(dip_get_head(iop->dip, IOP_A), iop);
151 if (tmp) {
152 iop->u.q.qp_type = Q_A;
153 io_link(&iop->u.q.qp.q_a, tmp);
154 }
155 else
156 iop->u.q.qp_type = Q_NONE;
63eba147
JA
157}
158
159void handle_merge(struct io *iop)
160{
161 struct io *q_iop;
162
163 io_setup(iop, IOP_M);
164
165 q_iop = dip_find_exact(dip_get_head(iop->dip, IOP_Q), iop);
166 if (q_iop)
167 io_link(&iop->u.m.m_q, q_iop);
21e47d90
ADB
168
169 iostat_merge(iop);
63eba147
JA
170}
171
172void handle_insert(struct io *iop)
173{
174 struct io_list *iolp = malloc(sizeof(*iolp));
175
176 io_setup(iop, IOP_I);
177 INIT_LIST_HEAD(&iop->u.i.i_qs_head);
178 dip_add_qs(dip_get_head(iop->dip, IOP_Q), iop);
21e47d90
ADB
179
180 iostat_insert(iop);
63eba147
JA
181}
182
183void handle_complete(struct io *iop)
184{
185 struct io *d_iop;
186
187 io_setup(iop, IOP_C);
188 update_blks(iop);
189 update_cregion(&all_regions, iop->t.time);
190 update_cregion(&iop->dip->regions, iop->t.time);
191 if (iop->pip)
192 update_cregion(&iop->pip->regions, iop->t.time);
193
194 d_iop = dip_find_exact(dip_get_head(iop->dip, IOP_D), iop);
195 if (d_iop) {
196 io_link(&iop->u.c.c_d, d_iop);
21e47d90 197 iostat_complete(iop);
63eba147
JA
198 add_cy(iop);
199 }
200 else
201 io_free(iop);
202}
203
204void handle_issue(struct io *iop)
205{
206 struct io *i_iop;
207 struct io_list *iolp = malloc(sizeof(*iolp));
208
209 io_setup(iop, IOP_D);
210 iop->dip->n_ds++;
211
212 INIT_LIST_HEAD(&iop->u.d.d_im_head);
213 i_iop = dip_find_in(dip_get_head(iop->dip, IOP_I), iop);
214 if (i_iop) {
215 io_link(&iolp->iop, i_iop);
216 list_add_tail(&iolp->head, &iop->u.d.d_im_head);
217 }
218
219 dip_add_ms(dip_get_head(iop->dip, IOP_M), iop);
5225e788 220 seeki_add(iop->dip->seek_handle, iop);
21e47d90
ADB
221
222 iostat_issue(iop);
63eba147
JA
223}
224
225void handle_split(struct io *iop)
226{
227 struct io *q_iop;
228
229 pending_xs++;
230 io_setup(iop, IOP_X);
231
232 q_iop = dip_find_exact(dip_get_head(iop->dip, IOP_Q), iop);
233 if (q_iop)
234 io_link(&iop->u.x.x_q, q_iop);
235}
236
63eba147
JA
237void handle_remap(struct io *iop)
238{
97d13fb0 239 struct io *q_iop, *a_iop;
63eba147
JA
240 struct blk_io_trace_remap *rp = iop->pdu;
241 __u32 dev = be32_to_cpu(rp->device);
242 __u64 sector = be64_to_cpu(rp->sector);
243
97d13fb0 244 io_setup(iop, IOP_A);
63eba147
JA
245 q_iop = dip_find_in_sec(dip_get_head_dev(dev, IOP_Q), sector);
246 if (q_iop) {
97d13fb0
AB
247 iop->u.a.ap_type = A_Q;
248 io_link(&iop->u.a.ap.a_q, q_iop);
249 return;
250 }
63eba147 251
97d13fb0
AB
252 a_iop = dip_find_in_sec(dip_get_head_dev(dev, IOP_A), sector);
253 if (a_iop) {
254 iop->u.a.ap_type = A_A;
255 io_link(&iop->u.a.ap.a_a, a_iop);
256 return;
63eba147 257 }
97d13fb0
AB
258
259 iop->u.a.ap_type = A_NONE;
63eba147
JA
260}
261
262void extract_i(struct io *i_iop)
263{
264 struct io_list *iolp;
265 struct list_head *p, *q;
266
267 ASSERT(i_iop != NULL && i_iop->type == IOP_I);
268 list_for_each_safe(p, q, &i_iop->u.i.i_qs_head) {
269 iolp = list_entry(p, struct io_list, head);
270 LIST_DEL(&iolp->head);
271
272 ASSERT(iolp->iop->type == IOP_Q);
273 (void)__io_put(iolp->iop);
274
275 free(iolp);
276 }
277}
278
279/*
280 * Careful surgery
281 * (1) Need to remove D & its I & M's
282 * (2) Need to leave I's Q and M's Q's -- *no* io_put (__io_put instead)
283 */
284void handle_requeue(struct io *iop)
285{
286 struct io *d_iop;
287 struct io_list *iolp;
288 struct list_head *p, *q;
289
290 d_iop = dip_find_start(dip_get_head_dev(iop->t.device, IOP_D),
291 iop->t.sector);
292 if (d_iop) {
293 list_for_each_safe(p, q, &d_iop->u.d.d_im_head) {
294 iolp = list_entry(p, struct io_list, head);
295 LIST_DEL(&iolp->head);
296
297 if (iolp->iop->type == IOP_M)
298 (void)__io_put(iolp->iop->u.m.m_q);
299 else
300 extract_i(iolp->iop);
301
302 iolp->iop->users = 0;
303 io_free(iolp->iop);
304 free(iolp);
305 }
306
307 d_iop->users = 0;
308 io_free(d_iop);
309 }
310
311 release_iop(iop);
312
313}
314
315void __add_trace(struct io *iop)
316{
317 n_traces++;
318
21e47d90 319 iostat_check_time(iop->t.time);
63eba147
JA
320 if (verbose && (n_traces % 10000) == 0) {
321 printf("%10lu t, %10lu m %1lu f\r",
322 n_traces, n_io_allocs, n_io_frees);
323 fflush(stdout);
324 if ((n_traces % 1000000) == 0) printf("\n");
325 }
326
327 switch (iop->t.action & 0xffff) {
328 case __BLK_TA_QUEUE: handle_queue(iop); break;
329 case __BLK_TA_BACKMERGE: handle_merge(iop); break;
330 case __BLK_TA_FRONTMERGE: handle_merge(iop); break;
331 case __BLK_TA_ISSUE: handle_issue(iop); break;
332 case __BLK_TA_COMPLETE: handle_complete(iop); break;
333 case __BLK_TA_INSERT: handle_insert(iop); break;
334 case __BLK_TA_SPLIT: handle_split(iop); break;
63eba147
JA
335 case __BLK_TA_REMAP: handle_remap(iop); break;
336 case __BLK_TA_REQUEUE: handle_requeue(iop); break;
337 }
338}
339
340void add_trace(struct io *iop)
341{
342 if (iop->t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) {
343 char *slash = strchr(iop->pdu, '/');
344
345 if (slash)
346 *slash = '\0';
347 add_process(iop->t.pid, iop->pdu);
348 release_iop(iop);
349 }
f8d501e1
ADB
350 else if (iop->t.action & BLK_TC_ACT(BLK_TC_PC))
351 release_iop(iop);
63eba147
JA
352 else
353 __add_trace(iop);
354}