[PATCH] BTT patch: (2/3) per-IO stream output
[blktrace.git] / btt / trace_complete.c
1 /*
2  * blktrace output analysis: generate a timeline & gather statistics
3  *
4  * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include "globals.h"
22
23 LIST_HEAD(pending_cs);
24
25 static void gen_c_list(struct io *c_iop, struct list_head *c_head)
26 {
27         struct io *iop;
28         struct list_head *p;
29
30         __list_for_each(p, &pending_cs) {
31                 iop = list_entry(p, struct io, c_pending);
32                 if (iop->t.device == c_iop->t.device)
33                         continue;
34                 if (dip_find_sec(iop->dip, IOP_D, BIT_START(iop)) == NULL)
35                         continue;
36
37                 __link(iop, c_iop);
38                 if (ready_complete(iop, c_iop))
39                         list_add_tail(&iop->f_head, c_head);
40                 __unlink(iop, c_iop);
41         }
42 }
43
44 static void run_comp(struct io *c_iop, struct io *top, struct list_head *rmhd)
45 {
46         struct io *d_iop = dip_find_sec(c_iop->dip, IOP_D, BIT_START(c_iop));
47
48         update_blks(c_iop);
49         if (d_iop) {
50                 __u64 d2c = tdelta(d_iop, c_iop);
51
52                 update_d2c(d_iop, d2c);
53                 latency_d2c(d_iop->dip, c_iop->t.time, d2c);
54                 iostat_complete(d_iop, c_iop);
55
56                 __link(d_iop, c_iop);
57                 run_issue(d_iop, top, rmhd);
58                 __unlink(d_iop, c_iop);
59         }
60         else {
61                 LIST_HEAD(head);
62                 struct io *iop;
63                 struct list_head *p, *q;
64
65                 gen_c_list(c_iop, &head);
66                 list_for_each_safe(p, q, &head) {
67                         iop = list_entry(p, struct io, f_head);
68                         LIST_DEL(&iop->f_head);
69
70                         dump_level++;
71                         __link(iop, c_iop);
72                         run_comp(iop, top, rmhd);
73                         __unlink(iop, c_iop);
74                         dump_level--;
75                 }
76         }
77         dump_iop(per_io_ofp, c_iop, NULL, 0);
78         LIST_DEL(&c_iop->c_pending);
79         list_add_tail(&c_iop->f_head, rmhd);
80 }
81
82 static int ready_comp(struct io *c_iop, 
83                                 __attribute__((__unused__)) struct io *top)
84 {
85         LIST_HEAD(head);
86         struct io *iop;
87         struct list_head *p, *q;
88         __u64 bl = c_iop->bytes_left;
89
90         gen_c_list(c_iop, &head);
91         list_for_each_safe(p, q, &head) {
92                 iop = list_entry(p, struct io, f_head);
93                 LIST_DEL(&iop->f_head);
94
95                 __link(iop, c_iop);
96                 if (ready_complete(iop, c_iop))
97                         bl -= iop->bytes_left;
98                 __unlink(iop, c_iop);
99         }
100
101         return bl == 0;
102 }
103
104 void trace_complete(struct io *c_iop)
105 {
106         if (!io_setup(c_iop, IOP_C)) {
107                 io_release(c_iop);
108                 return;
109         }
110
111         list_add_tail(&c_iop->c_pending, &pending_cs);
112         if (ready_complete(c_iop, c_iop)) {
113                 dump_level = 0;
114                 run_complete(c_iop);
115         }
116         else 
117                 add_retry(c_iop);
118 }
119
120 int retry_complete(struct io *c_iop)
121 {
122         if (!ready_complete(c_iop, c_iop))
123                 return 0;
124
125         del_retry(c_iop);
126         run_complete(c_iop);
127         return 1;
128 }
129
130 int ready_complete(struct io *c_iop, struct io *top)
131 {
132         struct io *d_iop = dip_find_sec(c_iop->dip, IOP_D, BIT_START(c_iop));
133
134         if (d_iop) {
135                 ASSERT(d_iop->t.bytes == c_iop->bytes_left);
136                 return ready_issue(d_iop, top);
137         }
138         else 
139                 return ready_comp(c_iop, top);
140 }
141
142 void run_complete(struct io *c_iop)
143 {
144         LIST_HEAD(rmhd);
145
146         update_cregion(&all_regions, c_iop->t.time);
147         update_cregion(&c_iop->dip->regions, c_iop->t.time);
148         if (c_iop->pip)
149                 update_cregion(&c_iop->pip->regions, c_iop->t.time);
150
151         run_comp(c_iop, c_iop, &rmhd);
152         if (per_io_ofp) fprintf(per_io_ofp, "\n");
153         release_iops(&rmhd);
154 }