[PATCH] BTT patch: (3/3) time bounded trace analysis
[blktrace.git] / btt / trace_complete.c
1 /*
2  * blktrace output analysis: generate a timeline & gather statistics
3  *
4  * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  */
21 #include "globals.h"
22
23 LIST_HEAD(pending_cs);
24
25 static void gen_c_list(struct io *c_iop, struct list_head *c_head)
26 {
27         struct io *iop;
28         struct list_head *p;
29
30         __list_for_each(p, &pending_cs) {
31                 iop = list_entry(p, struct io, c_pending);
32                 if (iop->t.device == c_iop->t.device)
33                         continue;
34                 if (dip_find_sec(iop->dip, IOP_D, BIT_START(iop)) == NULL)
35                         continue;
36
37                 __link(iop, c_iop);
38                 if (ready_complete(iop, c_iop))
39                         list_add_tail(&iop->f_head, c_head);
40                 __unlink(iop, c_iop);
41         }
42 }
43
44 static void run_comp(struct io *c_iop, struct io *top, struct list_head *rmhd)
45 {
46         struct io *d_iop = dip_find_sec(c_iop->dip, IOP_D, BIT_START(c_iop));
47
48         update_blks(c_iop);
49         if (d_iop) {
50                 __u64 d2c = tdelta(d_iop, c_iop);
51
52                 update_d2c(d_iop, d2c);
53                 latency_d2c(d_iop->dip, c_iop->t.time, d2c);
54                 iostat_complete(d_iop, c_iop);
55
56                 __link(d_iop, c_iop);
57                 run_issue(d_iop, top, rmhd);
58                 __unlink(d_iop, c_iop);
59         }
60         else {
61                 LIST_HEAD(head);
62                 struct io *iop;
63                 struct list_head *p, *q;
64
65                 gen_c_list(c_iop, &head);
66                 list_for_each_safe(p, q, &head) {
67                         iop = list_entry(p, struct io, f_head);
68                         LIST_DEL(&iop->f_head);
69
70                         dump_level++;
71                         __link(iop, c_iop);
72                         run_comp(iop, top, rmhd);
73                         __unlink(iop, c_iop);
74                         dump_level--;
75                 }
76         }
77
78         dump_iop(per_io_ofp, c_iop, NULL, 0);
79
80         LIST_DEL(&c_iop->c_pending);
81         del_retry(c_iop);
82         list_add_tail(&c_iop->f_head, rmhd);
83 }
84
85 static int ready_comp(struct io *c_iop, 
86                                 __attribute__((__unused__)) struct io *top)
87 {
88         LIST_HEAD(head);
89         struct io *iop;
90         struct list_head *p, *q;
91         __u64 bl = c_iop->bytes_left;
92
93         gen_c_list(c_iop, &head);
94         list_for_each_safe(p, q, &head) {
95                 iop = list_entry(p, struct io, f_head);
96                 LIST_DEL(&iop->f_head);
97
98                 __link(iop, c_iop);
99                 if (ready_complete(iop, c_iop))
100                         bl -= iop->bytes_left;
101                 __unlink(iop, c_iop);
102         }
103
104         return bl == 0;
105 }
106
107 void trace_complete(struct io *c_iop)
108 {
109         if (!io_setup(c_iop, IOP_C)) {
110                 io_release(c_iop);
111                 return;
112         }
113
114         list_add_tail(&c_iop->c_pending, &pending_cs);
115         if (ready_complete(c_iop, c_iop)) {
116                 dump_level = 0;
117                 run_complete(c_iop);
118         }
119         else 
120                 add_retry(c_iop);
121 }
122
123 int retry_complete(struct io *c_iop)
124 {
125         if (!ready_complete(c_iop, c_iop))
126                 return 0;
127
128         run_complete(c_iop);
129         return 1;
130 }
131
132 int ready_complete(struct io *c_iop, struct io *top)
133 {
134         struct io *d_iop = dip_find_sec(c_iop->dip, IOP_D, BIT_START(c_iop));
135
136         if (d_iop) {
137                 ASSERT(d_iop->t.bytes == c_iop->bytes_left);
138                 return ready_issue(d_iop, top);
139         }
140         else 
141                 return ready_comp(c_iop, top);
142 }
143
144 void run_complete(struct io *c_iop)
145 {
146         LIST_HEAD(rmhd);
147
148         update_cregion(&all_regions, c_iop->t.time);
149         update_cregion(&c_iop->dip->regions, c_iop->t.time);
150         if (c_iop->pip)
151                 update_cregion(&c_iop->pip->regions, c_iop->t.time);
152
153         run_comp(c_iop, c_iop, &rmhd);
154         if (per_io_ofp) fprintf(per_io_ofp, "\n");
155         release_iops(&rmhd);
156 }