2 * blktrace output analysis: generate a timeline & gather statistics
4 * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 static inline void release_iop(struct io *iop)
25 if (iop->pdu) free(iop->pdu);
29 struct io *dip_find_exact(struct list_head *head, struct io *iop_in)
34 __list_for_each(p, head) {
35 iop = list_entry(p, struct io, dev_head);
36 if (is_bit(iop_in, iop))
42 struct io *dip_find_in(struct list_head *head, struct io *iop_in)
47 __list_for_each(p, head) {
48 iop = list_entry(p, struct io, dev_head);
49 if (in_bit(iop, iop_in))
55 struct io *dip_find_start(struct list_head *head, __u64 sector)
60 __list_for_each(p, head) {
61 iop = list_entry(p, struct io, dev_head);
62 if (BIT_START(iop) == sector)
68 struct io *dip_find_end(struct list_head *head, __u64 sector)
73 __list_for_each(p, head) {
74 iop = list_entry(p, struct io, dev_head);
75 if (BIT_END(iop) == sector)
81 struct io *dip_find_in_sec(struct list_head *head, __u64 sector)
86 __list_for_each(p, head) {
87 iop = list_entry(p, struct io, dev_head);
88 if (BIT_START(iop) <= sector && sector <= BIT_END(iop))
94 struct io *dip_find_qa(struct list_head *head, struct blk_io_trace *t)
99 __list_for_each(p, head) {
100 iop = list_entry(p, struct io, dev_head);
101 if (iop->t.cpu == t->cpu && iop->t.sequence == (t->sequence-1))
107 void dip_add_ms(struct list_head *head, struct io *d_iop)
111 struct io_list *iolp;
113 __list_for_each(p, head) {
114 m_iop = list_entry(p, struct io, dev_head);
115 if (in_bit(m_iop, d_iop)) {
116 iolp = malloc(sizeof(*iolp));
117 io_link(&iolp->iop, m_iop);
118 list_add_tail(&iolp->head, &d_iop->u.d.d_im_head);
123 void dip_add_qs(struct list_head *head, struct io *i_iop)
127 struct io_list *iolp;
129 __list_for_each(p, head) {
130 q_iop = list_entry(p, struct io, dev_head);
131 if (in_bit(q_iop, i_iop)) {
132 iolp = malloc(sizeof(*iolp));
133 io_link(&iolp->iop, q_iop);
134 list_add_tail(&iolp->head, &i_iop->u.i.i_qs_head);
139 void handle_queue(struct io *iop)
143 io_setup(iop, IOP_Q);
145 update_lq(&last_q, &all_avgs.q2q, iop->t.time);
146 update_qregion(&all_regions, iop->t.time);
147 dip_update_q(iop->dip, iop);
150 tmp = dip_find_exact(dip_get_head(iop->dip, IOP_A), iop);
152 iop->u.q.qp_type = Q_A;
153 io_link(&iop->u.q.qp.q_a, tmp);
156 iop->u.q.qp_type = Q_NONE;
158 #if defined(LVM_REMAP_WORKAROUND)
160 tmp = dip_find_qa(dip_get_head(iop->dip, IOP_A), &iop->t);
162 iop->u.q.qp_type = Q_A;
163 io_link(&iop->u.q.qp.q_a, tmp);
169 void handle_merge(struct io *iop)
173 io_setup(iop, IOP_M);
175 q_iop = dip_find_exact(dip_get_head(iop->dip, IOP_Q), iop);
177 io_link(&iop->u.m.m_q, q_iop);
180 void handle_insert(struct io *iop)
182 struct io_list *iolp = malloc(sizeof(*iolp));
184 io_setup(iop, IOP_I);
185 INIT_LIST_HEAD(&iop->u.i.i_qs_head);
186 dip_add_qs(dip_get_head(iop->dip, IOP_Q), iop);
189 void handle_complete(struct io *iop)
193 io_setup(iop, IOP_C);
195 update_cregion(&all_regions, iop->t.time);
196 update_cregion(&iop->dip->regions, iop->t.time);
198 update_cregion(&iop->pip->regions, iop->t.time);
200 d_iop = dip_find_exact(dip_get_head(iop->dip, IOP_D), iop);
202 io_link(&iop->u.c.c_d, d_iop);
210 void handle_issue(struct io *iop)
213 struct io_list *iolp = malloc(sizeof(*iolp));
215 io_setup(iop, IOP_D);
218 INIT_LIST_HEAD(&iop->u.d.d_im_head);
219 i_iop = dip_find_in(dip_get_head(iop->dip, IOP_I), iop);
221 io_link(&iolp->iop, i_iop);
222 list_add_tail(&iolp->head, &iop->u.d.d_im_head);
225 dip_add_ms(dip_get_head(iop->dip, IOP_M), iop);
226 seeki_add(iop->dip->seek_handle, iop);
229 void handle_split(struct io *iop)
234 io_setup(iop, IOP_X);
236 q_iop = dip_find_exact(dip_get_head(iop->dip, IOP_Q), iop);
238 io_link(&iop->u.x.x_q, q_iop);
242 void __x_add_c(struct io *y_iop, struct io *x_iop,
243 struct blk_io_trace_split_end *rp, int which)
248 struct io **y_cp, *q_iop, *c_iop;
251 y_cp = &y_iop->u.y.y_c1;
252 dev = be32_to_cpu(rp->dev1);
253 sector = be64_to_cpu(rp->sector1);
256 y_cp = &y_iop->u.y.y_c2;
257 dev = be32_to_cpu(rp->dev2);
258 sector = be64_to_cpu(rp->sector2);
261 dip = __dip_find(dev);
264 q_iop = dip_find_end(dip_get_head(dip, IOP_Q), sector);
266 q_iop->u.q.qp_type = Q_X;
267 io_link(&q_iop->u.q.qp.q_x, x_iop);
270 c_iop = dip_find_in_sec(dip_get_head(dip, IOP_C), sector);
272 io_link(y_cp, c_iop);
275 void handle_split_end(struct io *iop)
278 struct blk_io_trace_split_end *rp = iop->pdu;
281 io_setup(iop, IOP_Y);
283 x_iop = dip_find_exact(dip_get_head(iop->dip, IOP_X), iop);
285 __x_add_c(iop, x_iop, rp, 1);
286 __x_add_c(iop, x_iop, rp, 2);
288 rem_c(iop->u.y.y_c1);
289 rem_c(iop->u.y.y_c2);
298 void handle_remap(struct io *iop)
301 struct blk_io_trace_remap *rp = iop->pdu;
302 __u32 dev = be32_to_cpu(rp->device);
303 __u64 sector = be64_to_cpu(rp->sector);
305 q_iop = dip_find_in_sec(dip_get_head_dev(dev, IOP_Q), sector);
307 io_setup(iop, IOP_A);
309 #if defined(LVM_REMAP_WORKAROUND)
311 sector = iop->t.sector;
312 iop->t.sector = be64_to_cpu(rp->sector);
315 io_link(&iop->u.a.a_q, q_iop);
321 void extract_i(struct io *i_iop)
323 struct io_list *iolp;
324 struct list_head *p, *q;
326 ASSERT(i_iop != NULL && i_iop->type == IOP_I);
327 list_for_each_safe(p, q, &i_iop->u.i.i_qs_head) {
328 iolp = list_entry(p, struct io_list, head);
329 LIST_DEL(&iolp->head);
331 ASSERT(iolp->iop->type == IOP_Q);
332 (void)__io_put(iolp->iop);
340 * (1) Need to remove D & its I & M's
341 * (2) Need to leave I's Q and M's Q's -- *no* io_put (__io_put instead)
343 void handle_requeue(struct io *iop)
346 struct io_list *iolp;
347 struct list_head *p, *q;
349 d_iop = dip_find_start(dip_get_head_dev(iop->t.device, IOP_D),
352 list_for_each_safe(p, q, &d_iop->u.d.d_im_head) {
353 iolp = list_entry(p, struct io_list, head);
354 LIST_DEL(&iolp->head);
356 if (iolp->iop->type == IOP_M)
357 (void)__io_put(iolp->iop->u.m.m_q);
359 extract_i(iolp->iop);
361 iolp->iop->users = 0;
374 void __add_trace(struct io *iop)
378 if (verbose && (n_traces % 10000) == 0) {
379 printf("%10lu t, %10lu m %1lu f\r",
380 n_traces, n_io_allocs, n_io_frees);
382 if ((n_traces % 1000000) == 0) printf("\n");
385 switch (iop->t.action & 0xffff) {
386 case __BLK_TA_QUEUE: handle_queue(iop); break;
387 case __BLK_TA_BACKMERGE: handle_merge(iop); break;
388 case __BLK_TA_FRONTMERGE: handle_merge(iop); break;
389 case __BLK_TA_ISSUE: handle_issue(iop); break;
390 case __BLK_TA_COMPLETE: handle_complete(iop); break;
391 case __BLK_TA_INSERT: handle_insert(iop); break;
392 case __BLK_TA_SPLIT: handle_split(iop); break;
394 case __BLK_TA_SPLIT_END: handle_split_end(iop); break;
396 case __BLK_TA_REMAP: handle_remap(iop); break;
397 case __BLK_TA_REQUEUE: handle_requeue(iop); break;
401 void add_trace(struct io *iop)
403 if (iop->t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) {
404 char *slash = strchr(iop->pdu, '/');
408 add_process(iop->t.pid, iop->pdu);
411 else if (iop->t.action & BLK_TC_ACT(BLK_TC_PC))