summaryrefslogtreecommitdiff
path: root/btt/trace_complete.c
diff options
context:
space:
mode:
authorAlan D. Brunelle <alan.brunelle@hp.com>2007-02-06 20:46:16 +0100
committerJens Axboe <jens.axboe@oracle.com>2007-02-06 20:46:16 +0100
commitd76c5b81b99faca0959afcdd2e73330c61f69bfc (patch)
treecfb3df57c89960cd144fcf6437e617ea39f78aa4 /btt/trace_complete.c
parent0ac4c20e46fc644b1ac9e3021e3ebaa88b9c536f (diff)
downloadblktrace-d76c5b81b99faca0959afcdd2e73330c61f69bfc.tar.gz
blktrace-d76c5b81b99faca0959afcdd2e73330c61f69bfc.tar.bz2
[PATCH]: btt - major fixes and speed improvements
From: Alan D. Brunelle <Alan.Brunelle@hp.com> Lots of changes to how we handle traces - adds robustness & quicker This large patch contains the following changes to the trace handling aspects of btt: 1. Use larger buffers for output options. 2. Use mmap to handle the input of trace data. 3. More precise btt statistics are output at the end. 4. Added in (under DEBUG) the display of unhandled traces. I was running into the problem where traces were not being connected, and the rb trees would get quite large. This would slow things down considerably. (See below for details on why traces weren't being handled.) 5. Sprinkled some ASSERTs (under DEBUG). 6. Added a new btt-specific trace type: "links" - since 'A' (remaps) contain two separate pieces of information, I broke them up into a link and a remap trace. [Thus, it is easy to find either end of the remap.] 7. Added in the notion of retries of completes (and requeues). I'm finding some discrepencies in the time stamps, in order to make btt handle these better, I've added the notion of keeping the trace around for a bit, to see if it gets linked up later. 8. Separated trace streams into: simple IOs, and remapped IOs. 9. Fixed up D2C averages - Q2I + I2D + D2C should equal Q2C averages. ---------------------------------------------------------------------------- I do not understand why it is so, but I am seeing two 'C' (complete) traces for the same IO track at times. The sequence number is different (+1 for the second one), and the time stamps are different (100's of microseconds apart). I'm investigating this. At least on an IA64, I am seeing time inconsistencies amongst CPUs on very heavy loads (48 disks, 4 CPUs, almost 300 million traces). I find the 'D' (issue) and 'C' (complete) traces coming out ahead of the associate 'I' (insert) and 'M' (merge) traces. It would be good to get this fixed in the kernel, but I figure it is also goodness to attempt to account for it in post-processing as well. ---------------------------------------------------------------------------- This work was done in order to handle some of these large data sets, and I've found that the performance is reasonable - here are some stats for very large file (the largest of which used to take well over 12 minutes, now it takes about 5 1/2 minutes - and a lot of that is just getting the 18GiB of data read in): Size Real User System ----- -------- -------- ------- 7GiB 123.445s 80.188s 11.392s 10GiB 179.148s 137.456s 16.680s 13GiB 237.561s 156.992s 21.968s 16GiB 283.262s 187.468s 26.748s 18GiB 336.345s 225.084s 31.200s Signed-off-by: Alan D. Brunelle <Alan.Brunelle@hp.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'btt/trace_complete.c')
-rw-r--r--btt/trace_complete.c204
1 files changed, 104 insertions, 100 deletions
diff --git a/btt/trace_complete.c b/btt/trace_complete.c
index 32c43b5..5ef23c1 100644
--- a/btt/trace_complete.c
+++ b/btt/trace_complete.c
@@ -22,135 +22,139 @@
LIST_HEAD(pending_cs);
-static void gen_c_list(struct io *c_iop, struct list_head *c_head)
+static inline void __run_complete(struct io *c_iop)
{
- struct io *iop;
- struct list_head *p;
-
- __list_for_each(p, &pending_cs) {
- iop = list_entry(p, struct io, c_pending);
- if (iop->t.device == c_iop->t.device)
- continue;
- if (dip_find_sec(iop->dip, IOP_D, BIT_START(iop)) == NULL)
- continue;
-
- __link(iop, c_iop);
- if (ready_complete(iop, c_iop))
- list_add_tail(&iop->f_head, c_head);
- __unlink(iop, c_iop);
- }
-}
-
-static void run_comp(struct io *c_iop, struct io *top, struct list_head *rmhd)
-{
- struct io *d_iop = dip_find_sec(c_iop->dip, IOP_D, BIT_START(c_iop));
-
- update_blks(c_iop);
- if (d_iop) {
- __u64 d2c = tdelta(d_iop, c_iop);
+ LIST_HEAD(rmhd);
- update_d2c(d_iop, d2c);
- latency_d2c(d_iop->dip, c_iop->t.time, d2c);
- iostat_complete(d_iop, c_iop);
+ if (remapper_dev(c_iop->t.device))
+ bilink_for_each_down(run_remap, c_iop, &rmhd, 1);
+ else
+ bilink_for_each_down(run_issue, c_iop, &rmhd, 1);
- __link(d_iop, c_iop);
- run_issue(d_iop, top, rmhd);
- __unlink(d_iop, c_iop);
- }
- else {
- LIST_HEAD(head);
- struct io *iop;
- struct list_head *p, *q;
-
- gen_c_list(c_iop, &head);
- list_for_each_safe(p, q, &head) {
- iop = list_entry(p, struct io, f_head);
- LIST_DEL(&iop->f_head);
-
- dump_level++;
- __link(iop, c_iop);
- run_comp(iop, top, rmhd);
- __unlink(iop, c_iop);
- dump_level--;
- }
- }
-
- dump_iop(per_io_ofp, c_iop, NULL, 0);
+ dump_iop(c_iop, 1);
LIST_DEL(&c_iop->c_pending);
del_retry(c_iop);
- list_add_tail(&c_iop->f_head, rmhd);
+
+ list_add_tail(&c_iop->f_head, &rmhd);
+ release_iops(&rmhd);
}
-static int ready_comp(struct io *c_iop,
- __attribute__((__unused__)) struct io *top)
+static int ready_complete_remapper(struct io *c_iop)
{
LIST_HEAD(head);
- struct io *iop;
struct list_head *p, *q;
- __u64 bl = c_iop->bytes_left;
+ struct io *l_iop, *a_iop;
- gen_c_list(c_iop, &head);
+ dip_foreach_list(c_iop, IOP_L, &head);
list_for_each_safe(p, q, &head) {
- iop = list_entry(p, struct io, f_head);
- LIST_DEL(&iop->f_head);
-
- __link(iop, c_iop);
- if (ready_complete(iop, c_iop))
- bl -= iop->bytes_left;
- __unlink(iop, c_iop);
+ l_iop = list_entry(p, struct io, f_head);
+ LIST_DEL(&l_iop->f_head);
+
+ ASSERT(!list_empty(&l_iop->up_list));
+ a_iop = bilink_first_up(l_iop, NULL);
+ if (ready_remap(a_iop, c_iop)) {
+ dip_rem(l_iop);
+ bilink(a_iop, c_iop);
+ c_iop->bytes_left -= a_iop->t.bytes;
+ }
}
- return bl == 0;
+ return c_iop->bytes_left == 0;
}
-void trace_complete(struct io *c_iop)
+int ready_complete(struct io *c_iop)
{
- if (!io_setup(c_iop, IOP_C)) {
- io_release(c_iop);
- return;
+ __u64 d2c;
+ struct io *d_iop;
+
+ if (c_iop->bytes_left == 0)
+ return 1;
+
+ if (remapper_dev(c_iop->t.device))
+ return ready_complete_remapper(c_iop);
+
+ if (!list_empty(&c_iop->down_list))
+ return 1;
+
+ d_iop = dip_find_sec(c_iop->dip, IOP_D, BIT_START(c_iop));
+ if (!d_iop)
+ return -1;
+
+ if (c_iop->t.bytes != d_iop->t.bytes) {
+ fprintf(stderr,
+ "\nFATAL: Probable time anomaly detected\n");
+ fprintf(stderr,
+ "D @ %15.9lf missing C, later C @ %15.9lf\n",
+ BIT_TIME(d_iop->t.time),
+ BIT_TIME(c_iop->t.time));
+ exit(1);
}
- list_add_tail(&c_iop->c_pending, &pending_cs);
- if (ready_complete(c_iop, c_iop)) {
- dump_level = 0;
- run_complete(c_iop);
- }
- else
- add_retry(c_iop);
-}
-
-int retry_complete(struct io *c_iop)
-{
- if (!ready_complete(c_iop, c_iop))
+ if (!ready_issue(d_iop, c_iop))
return 0;
- run_complete(c_iop);
+ c_iop->bytes_left = 0;
+
+ d2c = tdelta(d_iop, c_iop);
+ update_d2c(d_iop, d_iop->down_len, d2c);
+ latency_d2c(d_iop->dip, c_iop->t.time, d2c);
+ iostat_complete(d_iop, c_iop);
+
+ bilink(d_iop, c_iop);
+ dip_rem(d_iop);
return 1;
}
-int ready_complete(struct io *c_iop, struct io *top)
+void trace_complete(struct io *c_iop)
{
- struct io *d_iop = dip_find_sec(c_iop->dip, IOP_D, BIT_START(c_iop));
-
- if (d_iop) {
- ASSERT(d_iop->t.bytes == c_iop->bytes_left);
- return ready_issue(d_iop, top);
+ if (io_setup(c_iop, IOP_C)) {
+ update_blks(c_iop);
+ update_cregion(&all_regions, c_iop->t.time);
+ update_cregion(&c_iop->dip->regions, c_iop->t.time);
+ if (c_iop->pip)
+ update_cregion(&c_iop->pip->regions, c_iop->t.time);
+
+ list_add_tail(&c_iop->c_pending, &pending_cs);
+ switch (ready_complete(c_iop)) {
+ case 1:
+ __run_complete(c_iop);
+ break;
+ case 0:
+ add_retry(c_iop);
+ break;
+ case -1:
+ LIST_DEL(&c_iop->c_pending);
+ del_retry(c_iop);
+ io_release(c_iop);
+ break;
+ }
}
else
- return ready_comp(c_iop, top);
+ io_release(c_iop);
}
-void run_complete(struct io *c_iop)
+void retry_complete(struct io *c_iop, __u64 now)
{
- LIST_HEAD(rmhd);
-
- update_cregion(&all_regions, c_iop->t.time);
- update_cregion(&c_iop->dip->regions, c_iop->t.time);
- if (c_iop->pip)
- update_cregion(&c_iop->pip->regions, c_iop->t.time);
-
- run_comp(c_iop, c_iop, &rmhd);
- if (per_io_ofp) fprintf(per_io_ofp, "\n");
- release_iops(&rmhd);
+ double tc = BIT_TIME(c_iop->t.time);
+
+ switch (ready_complete(c_iop)) {
+ case 1:
+#if defined(DEBUG)
+ fprintf(stderr, "Retried %15.9lf success!\n", tc);
+#endif
+ __run_complete(c_iop);
+ break;
+ case 0:
+ if (now == 0 || ((BIT_TIME(now) - tc) < 1.0))
+ break;
+ if (!list_empty(&c_iop->down_list))
+ break;
+ /*FALLTHROUGH*/
+ case -1:
+ LIST_DEL(&c_iop->c_pending);
+ del_retry(c_iop);
+ io_release(c_iop);
+ break;
+ }
}