2 * blktrace output analysis: generate a timeline & gather statistics
4 * Copyright (C) 2006 Alan D. Brunelle <Alan.Brunelle@hp.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 #define BIT_TIME(t) ((double)SECONDS(t) + ((double)NANO_SECONDS(t) / 1.0e9))
32 #define BIT_START(iop) ((iop)->t.sector)
33 #define BIT_END(iop) ((iop)->t.sector + ((iop)->t.bytes >> 9))
34 #define IOP_READ(iop) ((iop)->t.action & BLK_TC_ACT(BLK_TC_READ))
35 #define IOP_RW(iop) (IOP_READ(iop) ? 1 : 0)
37 #define TO_SEC(nanosec) ((double)(nanosec) / 1.0e9)
38 #define TO_MSEC(nanosec) (1000.0 * TO_SEC(nanosec))
41 #define DBG_PING() dbg_ping()
42 #define ASSERT(truth) do { \
50 #define LIST_DEL(hp) list_del(hp)
54 #define LIST_DEL(hp) do { \
55 if (((hp)->next != NULL) && \
56 ((hp)->next != LIST_POISON1)) \
65 IOP_L = 3, // Betwen-device linkage
72 #define N_IOP_TYPES (IOP_R + 1)
75 struct file_info *next;
87 struct list_head head;
93 __u64 min, max, total;
101 struct avg_info q2a; /* Q to (A or X) */
102 struct avg_info q2i; /* Q to (I or M) */
103 struct avg_info i2d; /* (I or M) to D */
106 struct avg_info blks; /* Blocks transferred */
110 struct list_head head; /* on: qranges OR cranges */
115 struct list_head qranges;
116 struct list_head cranges;
117 struct range_info *qr_cur, *cr_cur;
121 struct region_info regions;
122 struct avgs_info avgs;
130 unsigned int host, bus, target, lun, irq, cpu;
132 char device[32], node[32], pci[32], devno[32];
136 __u64 rqm[2], ios[2], sec[2], wait, svctm;
137 double last_qu_change, last_dev_change, tot_qusz, idle_time;
138 int cur_qusz, cur_dev;
143 double rqm_s[2], ios_s[2], sec_s[2];
144 double avgrq_sz, avgqu_sz, await, svctm, p_util;
148 struct list_head all_head, hash_head;
150 struct region_info regions;
153 FILE *d2c_ofp, *q2c_ofp;
154 struct avgs_info avgs;
155 struct stats stats, all_stats;
161 struct rb_node rb_node;
162 struct list_head f_head;
165 struct blk_io_trace t;
169 struct list_head down_head, up_head, c_pending, retry;
170 struct list_head down_list, up_list;
172 int run_ready, linked, self_remap, displayed;
177 extern char bt_timeline_version[], *devices, *exes, *input_name, *output_name;
178 extern char *seek_name, *iostat_name, *d2c_name, *q2c_name, *per_io_name;
179 extern double range_delta;
180 extern FILE *ranges_ofp, *avgs_ofp, *iostat_ofp, *per_io_ofp;;
181 extern int verbose, ifd, dump_level;
182 extern unsigned int n_devs;
183 extern unsigned long n_traces;
184 extern struct list_head all_devs, all_procs, retries;
185 extern struct avgs_info all_avgs;
187 extern struct region_info all_regions;
188 extern struct list_head free_ios;
189 extern __u64 iostat_interval, iostat_last_stamp;
190 extern time_t genesis, last_vtrace;
193 void handle_args(int argc, char *argv[]);
196 int dev_map_read(char *fname);
197 struct devmap *dev_map_find(__u32 device);
200 void init_dev_heads(void);
201 struct d_info *dip_add(__u32 device, struct io *iop);
202 void dip_rem(struct io *iop);
203 struct d_info *__dip_find(__u32 device);
204 void dip_foreach_list(struct io *iop, enum iop_type type, struct list_head *hd);
205 void dip_foreach(struct io *iop, enum iop_type type,
206 void (*fnc)(struct io *iop, struct io *this), int rm_after);
207 struct io *dip_find_sec(struct d_info *dip, enum iop_type type, __u64 sec);
208 void dip_foreach_out(void (*func)(struct d_info *, void *), void *arg);
211 int rb_insert(struct rb_root *root, struct io *iop);
212 struct io *rb_find_sec(struct rb_root *root, __u64 sec);
213 void rb_foreach(struct rb_node *n, struct io *iop,
214 void (*fnc)(struct io *iop, struct io *this),
215 struct list_head *head);
218 void iostat_init(void);
219 void iostat_insert(struct io *iop);
220 void iostat_merge(struct io *iop);
221 void iostat_issue(struct io *iop);
222 void iostat_unissue(struct io *iop);
223 void iostat_complete(struct io *d_iop, struct io *c_iop);
224 void iostat_check_time(__u64 stamp);
225 void iostat_dump_stats(__u64 stamp, int all);
228 void latency_init(struct d_info *dip);
229 void latency_clean(void);
230 void latency_d2c(struct d_info *dip, __u64 tstamp, __u64 latency);
231 void latency_q2c(struct d_info *dip, __u64 tstamp, __u64 latency);
234 struct blk_io_trace *convert_to_cpu(struct blk_io_trace *t);
235 int in_devices(struct blk_io_trace *t);
236 unsigned int do_read(int ifd, void *buf, int len);
237 void add_file(struct file_info **fipp, FILE *fp, char *oname);
238 void clean_files(struct file_info **fipp);
242 int output_avgs(FILE *ofp);
243 int output_ranges(FILE *ofp);
244 char *make_dev_hdr(char *pad, size_t len, struct d_info *dip);
247 void add_process(__u32 pid, char *name);
248 struct p_info *find_process(__u32 pid, char *name);
249 void pip_update_q(struct io *iop);
250 void pip_foreach_out(void (*f)(struct p_info *, void *), void *arg);
253 void *seeki_init(__u32 device);
254 void seek_clean(void);
255 void seeki_add(void *handle, struct io *iop);
256 double seeki_mean(void *handle);
257 long long seeki_nseeks(void *handle);
258 long long seeki_median(void *handle);
259 int seeki_mode(void *handle, struct mode *mp);
262 void dump_iop(FILE *ofp, struct io *to_iop, struct io *from_iop, int indent);
263 void release_iops(struct list_head *del_head);
264 void add_trace(struct io *iop);
266 /* trace_complete.c */
267 void trace_complete(struct io *c_iop);
268 int retry_complete(struct io *c_iop);
269 int ready_complete(struct io *c_iop, struct io *top);
270 void run_complete(struct io *c_iop);
273 void trace_insert(struct io *i_iop);
274 void trace_merge(struct io *m_iop);
275 int ready_im(struct io *im_iop, struct io *top);
276 void run_im(struct io *im_iop, struct io *top, struct list_head *del_head);
277 void run_unim(struct io *im_iop, struct list_head *del_head);
280 void trace_issue(struct io *d_iop);
281 int ready_issue(struct io *d_iop, struct io *top);
282 void run_issue(struct io *d_iop, struct io *top, struct list_head *del_head);
283 void run_unissue(struct io *d_iop, struct list_head *del_head);
286 void trace_queue(struct io *q_iop);
287 int ready_queue(struct io *q_iop, struct io *top);
288 void run_queue(struct io *q_iop, struct io *top, struct list_head *del_head);
289 void run_unqueue(struct io *q_iop, struct list_head *del_head);
292 void trace_remap(struct io *a_iop);
293 int ready_remap(struct io *a_iop, struct io *top);
294 void run_remap(struct io *a_iop, struct io *top, struct list_head *del_head);
295 void run_unremap(struct io *a_iop, struct list_head *del_head);
297 /* trace_requeue.c */
298 void trace_requeue(struct io *r_iop);
299 int retry_requeue(struct io *r_iop);
300 int ready_requeue(struct io *r_iop, struct io *top);
301 void run_requeue(struct io *r_iop);