summaryrefslogtreecommitdiff
path: root/btt/trace_queue.c
diff options
context:
space:
mode:
authorAlan D. Brunelle <alan.brunelle@hp.com>2007-02-06 20:46:16 +0100
committerJens Axboe <jens.axboe@oracle.com>2007-02-06 20:46:16 +0100
commitd76c5b81b99faca0959afcdd2e73330c61f69bfc (patch)
treecfb3df57c89960cd144fcf6437e617ea39f78aa4 /btt/trace_queue.c
parent0ac4c20e46fc644b1ac9e3021e3ebaa88b9c536f (diff)
downloadblktrace-d76c5b81b99faca0959afcdd2e73330c61f69bfc.tar.gz
blktrace-d76c5b81b99faca0959afcdd2e73330c61f69bfc.tar.bz2
[PATCH]: btt - major fixes and speed improvements
From: Alan D. Brunelle <Alan.Brunelle@hp.com> Lots of changes to how we handle traces - adds robustness & quicker This large patch contains the following changes to the trace handling aspects of btt: 1. Use larger buffers for output options. 2. Use mmap to handle the input of trace data. 3. More precise btt statistics are output at the end. 4. Added in (under DEBUG) the display of unhandled traces. I was running into the problem where traces were not being connected, and the rb trees would get quite large. This would slow things down considerably. (See below for details on why traces weren't being handled.) 5. Sprinkled some ASSERTs (under DEBUG). 6. Added a new btt-specific trace type: "links" - since 'A' (remaps) contain two separate pieces of information, I broke them up into a link and a remap trace. [Thus, it is easy to find either end of the remap.] 7. Added in the notion of retries of completes (and requeues). I'm finding some discrepencies in the time stamps, in order to make btt handle these better, I've added the notion of keeping the trace around for a bit, to see if it gets linked up later. 8. Separated trace streams into: simple IOs, and remapped IOs. 9. Fixed up D2C averages - Q2I + I2D + D2C should equal Q2C averages. ---------------------------------------------------------------------------- I do not understand why it is so, but I am seeing two 'C' (complete) traces for the same IO track at times. The sequence number is different (+1 for the second one), and the time stamps are different (100's of microseconds apart). I'm investigating this. At least on an IA64, I am seeing time inconsistencies amongst CPUs on very heavy loads (48 disks, 4 CPUs, almost 300 million traces). I find the 'D' (issue) and 'C' (complete) traces coming out ahead of the associate 'I' (insert) and 'M' (merge) traces. It would be good to get this fixed in the kernel, but I figure it is also goodness to attempt to account for it in post-processing as well. ---------------------------------------------------------------------------- This work was done in order to handle some of these large data sets, and I've found that the performance is reasonable - here are some stats for very large file (the largest of which used to take well over 12 minutes, now it takes about 5 1/2 minutes - and a lot of that is just getting the 18GiB of data read in): Size Real User System ----- -------- -------- ------- 7GiB 123.445s 80.188s 11.392s 10GiB 179.148s 137.456s 16.680s 13GiB 237.561s 156.992s 21.968s 16GiB 283.262s 187.468s 26.748s 18GiB 336.345s 225.084s 31.200s Signed-off-by: Alan D. Brunelle <Alan.Brunelle@hp.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'btt/trace_queue.c')
-rw-r--r--btt/trace_queue.c83
1 files changed, 34 insertions, 49 deletions
diff --git a/btt/trace_queue.c b/btt/trace_queue.c
index d32e159..676684d 100644
--- a/btt/trace_queue.c
+++ b/btt/trace_queue.c
@@ -20,73 +20,58 @@
*/
#include "globals.h"
-void trace_queue(struct io *q_iop)
+static inline void __update_q2c(struct io *q_iop, struct io *c_iop)
{
- if (!io_setup(q_iop, IOP_Q)) {
- io_release(q_iop);
- return;
- }
+ __u64 q2c = tdelta(q_iop, c_iop);
- update_lq(&last_q, &all_avgs.q2q, q_iop->t.time);
- update_qregion(&all_regions, q_iop->t.time);
- dip_update_q(q_iop->dip, q_iop);
- pip_update_q(q_iop);
+ update_q2c(q_iop, q2c);
+ latency_q2c(q_iop->dip, q_iop->t.time, q2c);
}
-int ready_queue(struct io *q_iop, struct io *top)
+void run_queue(struct io *q_iop, struct io *c_iop, struct list_head *rmhd)
{
- struct io *a_iop = dip_find_sec(q_iop->dip, IOP_A, BIT_START(q_iop));
+ struct bilink *blp;
+ struct io *a_iop = bilink_first_down(q_iop, &blp);
if (a_iop) {
- ASSERT(a_iop->bytes_left == q_iop->bytes_left);
- return ready_remap(a_iop, top);
+ run_remap(a_iop, c_iop, rmhd);
+ biunlink(blp);
}
- return q_iop->t.device == top->t.device &&
- BIT_START(top) <= BIT_START(q_iop) &&
- BIT_END(q_iop) <= BIT_END(top);
+ __update_q2c(q_iop, c_iop);
+ dump_iop(q_iop, 0);
+
+ list_add_tail(&q_iop->f_head, rmhd);
}
-void run_queue(struct io *q_iop, struct io *top, struct list_head *del_head)
+int ready_queue(struct io *q_iop, struct io *c_iop)
{
- struct io *iop;
- struct io *a_iop = dip_find_sec(q_iop->dip, IOP_A, BIT_START(q_iop));
-
- if (a_iop) {
- __link(a_iop, q_iop);
- run_remap(a_iop, top, del_head);
- __unlink(a_iop, q_iop);
- }
+ struct io *a_iop;
- for (iop = q_iop; iop != NULL; iop = list_first_up(iop)) {
- if (iop->type == IOP_C && iop->t.device == q_iop->t.device) {
- __u64 q2c = tdelta(q_iop, iop);
+ if (!list_empty(&q_iop->down_list))
+ return 1;
- update_q2c(q_iop, q2c);
- latency_q2c(q_iop->dip, q_iop->t.time, q2c);
+ a_iop = dip_find_sec(q_iop->dip, IOP_A, BIT_START(q_iop));
+ if (!a_iop)
+ return 1;
- dump_iop(per_io_ofp, q_iop, NULL,
- (q_iop->t.device == top->t.device) ? -4 : 0);
+ if (!ready_remap(a_iop, c_iop))
+ return 0;
- break;
- }
- }
-
- iop = list_first_up(q_iop);
- q_iop->bytes_left -= iop->bytes_left;
- if (q_iop->bytes_left == 0)
- list_add_tail(&q_iop->f_head, del_head);
+ ASSERT(q_iop->t.bytes == a_iop->t.bytes);
+ bilink(a_iop, q_iop);
+ dip_rem(a_iop);
+ return 1;
}
-void run_unqueue(struct io *q_iop, struct list_head *del_head)
+void trace_queue(struct io *q_iop)
{
- struct io *a_iop = dip_find_sec(q_iop->dip, IOP_A, BIT_START(q_iop));
-
- if (a_iop) {
- __link(a_iop, q_iop);
- run_unremap(a_iop, del_head);
- __unlink(a_iop, q_iop);
+ if (io_setup(q_iop, IOP_Q)) {
+ update_lq(&last_q, &all_avgs.q2q, q_iop->t.time);
+ update_qregion(&all_regions, q_iop->t.time);
+ dip_update_q(q_iop->dip, q_iop);
+ pip_update_q(q_iop);
}
-
- list_add_tail(&q_iop->f_head, del_head);
+ else
+ io_release(q_iop);
}