Added in running stats for btt
[blktrace.git] / btt / trace.c
index f9733bfce73796e1138097b35230b67274ef7fcf..48f4f99f754524cda107a1c63d72dd5c587edd86 100644 (file)
  */
 #include "globals.h"
 
-int dump_level;
-LIST_HEAD(retries);
-
-void __dump_iop(FILE *ofp, struct io *iop, int extra_nl)
-{
-       fprintf(ofp, "%5d.%09lu %3d,%-3d %c %10llu+%-4u\n",
-               (int)SECONDS(iop->t.time),
-               (unsigned long)NANO_SECONDS(iop->t.time),
-               MAJOR(iop->t.device), MINOR(iop->t.device), type2c(iop->type),
-               (unsigned long long)iop->t.sector, t_sec(&iop->t));
-       if (extra_nl) fprintf(ofp, "\n");
-}
-
-void __dump_iop2(FILE *ofp, struct io *a_iop, struct io *l_iop)
-{
-       fprintf(ofp, "%5d.%09lu %3d,%-3d %c %10llu+%-4u <- (%3d,%-3d) %10llu\n",
-               (int)SECONDS(a_iop->t.time),
-               (unsigned long)NANO_SECONDS(a_iop->t.time),
-               MAJOR(a_iop->t.device), MINOR(a_iop->t.device), 
-               type2c(a_iop->type), (unsigned long long)a_iop->t.sector, 
-               t_sec(&a_iop->t), MAJOR(l_iop->t.device), 
-               MINOR(l_iop->t.device), (unsigned long long)l_iop->t.sector);
-}
-
-void release_iops(struct list_head *rmhd)
-{
-       struct io *x_iop;
-       struct list_head *p, *q;
-
-       list_for_each_safe(p, q, rmhd) {
-               x_iop = list_entry(p, struct io, f_head);
-               LIST_DEL(&x_iop->f_head);
-               io_release(x_iop);
-       }
-}
-
-void do_retries(__u64 now)
-{
-       struct io *iop;
-       struct list_head *p, *q;
-
-       list_for_each_safe(p, q, &retries) {
-               iop = list_entry(p, struct io, retry);
-               ASSERT(iop->type == IOP_C);
-
-               // iop could be gone after call...
-               retry_complete(iop, now);
-       }
-}
-
-static inline int retry_check_time(__u64 t)
-{
-       return next_retry_check && (t > next_retry_check);
-}
-
 static void __add_trace(struct io *iop)
 {
        time_t now = time(NULL);
-       __u64 tstamp = iop->t.time;
-       int run_retry = retry_check_time(iop->t.time);
+
+       last_t_seen = BIT_TIME(iop->t.time);
 
        n_traces++;
        iostat_check_time(iop->t.time);
 
        if (verbose && ((now - last_vtrace) > 0)) {
-#if defined(DEBUG)
-               printf("%10lu t\tretries=|%10d|\ttree size=|%10d|\r", 
-                       n_traces, list_len(&retries), rb_tree_size);
-#else
-               printf("%10lu t\r", n_traces);
-#endif
+               printf("%10lu t (%6.2lf%%)\r", n_traces, pct_done());
                if ((n_traces % 1000000) == 0) printf("\n");
                fflush(stdout);
                last_vtrace = now;
@@ -100,46 +40,63 @@ static void __add_trace(struct io *iop)
        case __BLK_TA_QUEUE:            trace_queue(iop); break;
        case __BLK_TA_REMAP:            trace_remap(iop); break;
        case __BLK_TA_INSERT:           trace_insert(iop); break;
+       case __BLK_TA_GETRQ:            trace_getrq(iop); break;
        case __BLK_TA_BACKMERGE:        trace_merge(iop); break;
        case __BLK_TA_FRONTMERGE:       trace_merge(iop); break;
        case __BLK_TA_REQUEUE:          trace_requeue(iop); break;
        case __BLK_TA_ISSUE:            trace_issue(iop); break;
        case __BLK_TA_COMPLETE:         trace_complete(iop); break;
-       default:                        
-               io_release(iop); 
+       case __BLK_TA_PLUG:             trace_plug(iop); break;
+       case __BLK_TA_UNPLUG_IO:        trace_unplug_io(iop); break;
+       case __BLK_TA_UNPLUG_TIMER:     trace_unplug_timer(iop); break;
+       case __BLK_TA_SLEEPRQ:          trace_sleeprq(iop); break;
+       default:
+               io_release(iop);
                return;
        }
+}
 
-       if (run_retry && !list_empty(&retries)) {
-               do_retries(tstamp);
-               bump_retry(tstamp);
-       }
+static void trace_message(struct io *iop)
+{
+       char scratch[15];
+       char msg[iop->t.pdu_len + 1];
+
+       if (!io_setup(iop, IOP_M))
+               return;
+
+       memcpy(msg, iop->pdu, iop->t.pdu_len);
+       msg[iop->t.pdu_len] = '\0';
+
+       fprintf(msgs_ofp, "%s %5d.%09lu %s\n",
+               make_dev_hdr(scratch, 15, iop->dip, 1),
+               (int)SECONDS(iop->t.time),
+               (unsigned long)NANO_SECONDS(iop->t.time), msg);
 }
 
 void add_trace(struct io *iop)
 {
-
        if (iop->t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) {
-               if (iop->t.pid == 0) 
-                       add_process(0, "kernel");
-               else {
-                       char *slash = strchr(iop->pdu, '/');
-                       if (slash)
-                               *slash = '\0';
-
-                       add_process(iop->t.pid, iop->pdu);
-               }
+               if (iop->t.action == BLK_TN_PROCESS) {
+                       if (iop->t.pid == 0)
+                               process_alloc(0, "kernel");
+                       else {
+                               char *slash = strchr(iop->pdu, '/');
+                               if (slash)
+                                       *slash = '\0';
+
+                               process_alloc(iop->t.pid, iop->pdu);
+                       }
+               } else if (iop->t.action == BLK_TN_MESSAGE)
+                       trace_message(iop);
                io_release(iop);
-       }
-       else if (iop->t.action & BLK_TC_ACT(BLK_TC_PC))
+       } else if (iop->t.action & BLK_TC_ACT(BLK_TC_PC)) {
                io_release(iop);
-       else {
+       else {
                if (time_bounded) {
                        if (BIT_TIME(iop->t.time) < t_astart) {
                                io_release(iop);
                                return;
-                       }
-                       else if (BIT_TIME(iop->t.time) > t_aend) {
+                       } else if (BIT_TIME(iop->t.time) > t_aend) {
                                io_release(iop);
                                done = 1;
                                return;