[PATCH] BTT patch: (2/3) per-IO stream output
[blktrace.git] / btt / trace.c
index abb148857ad11db2405354e051106e3036c52524..4c57b5df202b02e91e530cf8b64f6adc1dd19962 100644 (file)
  */
 #include "globals.h"
 
-void im2d_func(struct io *d_iop, struct io *im_iop)
-{
-       update_i2d(im_iop, d_iop->t.time - im_iop->t.time);
-}
+int dump_level;
+LIST_HEAD(retries);
 
-void q2c_func(struct io *c_iop, struct io *q_iop)
+static inline void dump_dev(FILE *ofp, __u32 dev)
 {
-       __u64 q2c = c_iop->t.time - q_iop->t.time;
-
-       update_q2c(q_iop, q2c);
-       latency_q2c(q_iop->dip, q_iop->t.time, q2c);
+       fprintf(ofp, "%3d,%-3d ", MAJOR(dev), MINOR(dev));
 }
 
-static inline void handle_im(struct io *im_iop)
+static inline void dump_desc(FILE *ofp, struct io *iop)
 {
-       struct io *q_iop;
-
-       q_iop = dip_find_sec(im_iop->dip, IOP_Q, BIT_START(im_iop));
-       if (q_iop)
-               update_q2i(q_iop, im_iop->t.time - q_iop->t.time);
+       fprintf(ofp, "%10llu+%-4u ", (unsigned long long)iop->t.sector, 
+               t_sec(&iop->t));
 }
 
-void handle_queue(struct io *q_iop)
+void dump_iop(FILE *ofp, struct io *to_iop, struct io *from_iop, int indent)
 {
-       io_setup(q_iop, IOP_Q, 1);
-       update_lq(&last_q, &all_avgs.q2q, q_iop->t.time);
-       update_qregion(&all_regions, q_iop->t.time);
-       dip_update_q(q_iop->dip, q_iop);
-       pip_update_q(q_iop);
-}
+       int i, c;
 
-void handle_remap(struct io *a_iop)
-{
-       struct io *q_iop;
-       struct blk_io_trace_remap *rp = a_iop->pdu;
-       struct d_info *dip = __dip_find(be32_to_cpu(rp->device));
-
-       io_setup(a_iop, IOP_A, 0);
-       if (dip) {
-               q_iop = dip_find_sec(dip, IOP_Q, be64_to_cpu(rp->sector));
-               if (q_iop)
-                       update_q2a(q_iop, a_iop->t.time - q_iop->t.time);
-       }
-       io_release(a_iop);
-}
-
-void handle_insert(struct io *i_iop)
-{
-       io_setup(i_iop, IOP_I, 1);
-       iostat_insert(i_iop);
-       handle_im(i_iop);
-}
+       if (!ofp) return;
+       if (to_iop->displayed) return;
 
-void handle_merge(struct io *m_iop)
-{
-       io_setup(m_iop, IOP_M, 1);
-       iostat_merge(m_iop);
-       handle_im(m_iop);
-}
+       fprintf(ofp, "%5d.%09lu ", (int)SECONDS(to_iop->t.time),
+               (unsigned long)NANO_SECONDS(to_iop->t.time));
 
-void handle_issue(struct io *d_iop)
-{
-       io_setup(d_iop, IOP_D, 1);
-       d_iop->dip->n_ds++;
+       for (i = 0; i < ((dump_level * 4) + indent); i++)
+               fprintf(ofp, " ");
 
-       dip_foreach(d_iop, IOP_I, im2d_func, 0);
-       dip_foreach(d_iop, IOP_M, im2d_func, 0);
+       dump_dev(ofp, to_iop->t.device);
 
-       if (seek_name)
-               seeki_add(d_iop->dip->seek_handle, d_iop);
-       iostat_issue(d_iop);
-}
+       switch (to_iop->type) {
+       case IOP_Q: c = 'Q'; break;
+       case IOP_L: c = 'L'; break;
+       case IOP_A: c = 'A'; break;
+       case IOP_I: c = 'I'; break;
+       case IOP_M: c = 'M'; break;
+       case IOP_D: c = 'D'; break;
+       case IOP_C: c = 'C'; break;
+       default   : c = '?'; break;
+       }
 
-void handle_complete(struct io *c_iop)
-{
-       struct io *d_iop;
-
-       io_setup(c_iop, IOP_C, 0);
-       update_blks(c_iop);
-       update_cregion(&all_regions, c_iop->t.time);
-       update_cregion(&c_iop->dip->regions, c_iop->t.time);
-       if (c_iop->pip)
-               update_cregion(&c_iop->pip->regions, c_iop->t.time);
-
-       d_iop = dip_find_sec(c_iop->dip, IOP_D, BIT_START(c_iop));
-       if (d_iop) {
-               __u64 d2c = c_iop->t.time - d_iop->t.time;
-               update_d2c(d_iop, d2c);
-               latency_d2c(d_iop->dip, c_iop->t.time, d2c);
-               iostat_complete(d_iop, c_iop);
-               dip_foreach(d_iop, IOP_I, NULL, 1);
-               dip_foreach(d_iop, IOP_M, NULL, 1);
-               io_release(d_iop);
+       fprintf(ofp, "%c ", c);
+       dump_desc(ofp, to_iop);
+       if (from_iop) {
+               fprintf(ofp, "<- ");
+               dump_dev(ofp, from_iop->t.device);
+               dump_desc(ofp, from_iop);
        }
+               
+       fprintf(ofp, "\n");
 
-       dip_foreach(c_iop, IOP_Q, q2c_func, 1);
-       io_release(c_iop);
+       to_iop->displayed = 1;
 }
 
-void rq_im2d_func(struct io *d_iop, struct io *im_iop)
+void release_iops(struct list_head *del_head)
 {
-       unupdate_i2d(im_iop, d_iop->t.time - im_iop->t.time);
+       struct io *x_iop;
+       struct list_head *p, *q;
+
+       list_for_each_safe(p, q, del_head) {
+               x_iop = list_entry(p, struct io, f_head);
+               LIST_DEL(&x_iop->f_head);
+               io_release(x_iop);
+       }
 }
 
-/*
- * Careful surgery
- * (1) Need to remove D & its I & M's
- * (2) Need to leave I's Q and M's Q's
- * (3) XXX: Need to downward adjust stats, but we don't carry PREVIOUS
- *     XXX: min/maxes?! We'll just adjust what we can, and hope that 
- *     XXX: the min/maxes are "pretty close". (REQUEUEs are rare, right?)
- */
-void handle_requeue(struct io *r_iop)
+static void do_retries(void)
 {
-       struct io *d_iop;
-
-       io_setup(r_iop, IOP_R, 0);
-       d_iop = dip_find_sec(r_iop->dip, IOP_D, BIT_START(r_iop));
-       if (d_iop) {
-               dip_foreach(d_iop, IOP_I, rq_im2d_func, 1);
-               dip_foreach(d_iop, IOP_M, rq_im2d_func, 1);
-               iostat_unissue(d_iop);
-               io_release(d_iop);
+       struct io *iop;
+       struct list_head *p, *q;
+
+       list_for_each_safe(p, q, &retries) {
+               iop = list_entry(p, struct io, retry);
+               // iop could be gone after call...
+               if (iop->type == IOP_C) 
+                       retry_complete(iop);
+               else
+                       retry_requeue(iop);
        }
-       io_release(r_iop);
 }
 
-void __add_trace(struct io *iop)
+static void __add_trace(struct io *iop)
 {
        time_t now = time(NULL);
 
@@ -162,20 +115,27 @@ void __add_trace(struct io *iop)
        }
 
        switch (iop->t.action & 0xffff) {
-       case __BLK_TA_QUEUE:            handle_queue(iop); break;
-       case __BLK_TA_BACKMERGE:        handle_merge(iop); break;
-       case __BLK_TA_FRONTMERGE:       handle_merge(iop); break;
-       case __BLK_TA_ISSUE:            handle_issue(iop); break;
-       case __BLK_TA_COMPLETE:         handle_complete(iop); break;
-       case __BLK_TA_INSERT:           handle_insert(iop); break;
-       case __BLK_TA_REMAP:            handle_remap(iop); break;
-       case __BLK_TA_REQUEUE:          handle_requeue(iop); break;
-       default:                        io_release(iop); break;
+       case __BLK_TA_QUEUE:            trace_queue(iop); break;
+       case __BLK_TA_REMAP:            trace_remap(iop); break;
+       case __BLK_TA_INSERT:           trace_insert(iop); break;
+       case __BLK_TA_BACKMERGE:        trace_merge(iop); break;
+       case __BLK_TA_FRONTMERGE:       trace_merge(iop); break;
+       case __BLK_TA_REQUEUE:          trace_requeue(iop); break;
+       case __BLK_TA_ISSUE:            trace_issue(iop); break;
+       case __BLK_TA_COMPLETE:         trace_complete(iop); break;
+       default:                        
+               io_release(iop); 
+               return;
        }
+
+       if (((iop->t.action & 0xffff) != __BLK_TA_REQUEUE) && 
+                                               !list_empty(&retries))
+               do_retries();
 }
 
 void add_trace(struct io *iop)
 {
+       if (iop->t.time == 15717167961) dbg_ping();
        if (iop->t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) {
                char *slash = strchr(iop->pdu, '/');