summaryrefslogtreecommitdiff
path: root/btt/inlines.h
diff options
context:
space:
mode:
authorAlan David Brunelle <Alan.Brunelle@hp.com>2006-10-03 14:44:18 +0200
committerJens Axboe <jens.axboe@oracle.com>2006-10-03 14:44:18 +0200
commit6eb42155679cfa6fcd03d23199c5ba0a233b53e7 (patch)
tree88882783654bff6086c81f3c73bc2cefb00862cc /btt/inlines.h
parentd216e9ce50602b7a7f99e1196e42d52d00f1b4f5 (diff)
downloadblktrace-6eb42155679cfa6fcd03d23199c5ba0a233b53e7.tar.gz
blktrace-6eb42155679cfa6fcd03d23199c5ba0a233b53e7.tar.bz2
[PATCH] Convert to using on-the-fly RB trees, no post-traversal.
From: Alan D. Brunelle <Alan.Brunelle@hp.com> - Converted to using RB trees as much as possible - significant speed up in general. - Changed from constructing IO bushes to just doing things inline as we get the traces. Significant speed up and reduction in complexity. Lost ability to absolutely handle REQUEUE traces (may put out the wrong min/max information for certain stats). - Added btt/dip_rb.c - Removed btt/traverse.c btt/iofree.c btt/cylist.c - Fixed message concerning stats & range data to include '.dat' - Added in timing statistics (K traces per second handled) - Changed verbose to just update once per second - Added notions of "foreach" iterators for devices, processes, IO traces, ... - Removed a lot of redundant code in output (using iterators instead) - If not interested in seek information, don't calculate a lot of stuff - again, significant speed up. Signed-off-by: Alan D. Brunelle <Alan.Brunelle@hp.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'btt/inlines.h')
-rw-r--r--btt/inlines.h185
1 files changed, 77 insertions, 108 deletions
diff --git a/btt/inlines.h b/btt/inlines.h
index 6c7c640..c8945bf 100644
--- a/btt/inlines.h
+++ b/btt/inlines.h
@@ -18,40 +18,18 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
-static inline void dbg_ping(void) { }
-
-static inline void *zmalloc(size_t len)
-{
- return memset(malloc(len), 0, len);
-}
-
-static inline int is_dev(struct io *iop, unsigned int mjr, unsigned int mnr)
-{
- return MAJOR(iop->t.device) == mjr && MINOR(iop->t.device) == mnr;
-}
-
-static inline int in_bit(struct io *in, struct io *out)
-{
- return (BIT_START(out) <= BIT_START(in)) &&
- (BIT_END(in) <= BIT_END(out));
-}
-
-static inline int is_bit(struct io *i1, struct io *i2)
-{
- return (BIT_START(i1) == BIT_START(i2)) && (BIT_END(i1) == BIT_END(i2));
-}
static inline struct range_info *new_cur(__u64 time)
{
- struct range_info *cur = zmalloc(sizeof(*cur));
+ struct range_info *cur = malloc(sizeof(struct range_info));
INIT_LIST_HEAD(&cur->head);
cur->start = time;
return cur;
}
-static inline void update_range(struct list_head *head_p,
- struct range_info **cur_p, __u64 time)
+static inline void update_range(struct list_head *head_p,
+ struct range_info **cur_p, __u64 time)
{
if (*cur_p == NULL)
*cur_p = new_cur(time);
@@ -96,6 +74,12 @@ static inline void avg_update(struct avg_info *ap, __u64 t)
}
}
+static inline void avg_unupdate(struct avg_info *ap, __u64 t)
+{
+ ap->n--;
+ ap->total -= t;
+}
+
static inline void update_lq(__u64 *last_q, struct avg_info *avg, __u64 time)
{
if (*last_q != ((__u64)-1))
@@ -103,111 +87,48 @@ static inline void update_lq(__u64 *last_q, struct avg_info *avg, __u64 time)
*last_q = time;
}
-static inline struct list_head *dip_get_head(struct d_info *dip,
- enum iop_type type)
-{
- return &dip->iop_heads[type];
-}
-
-static inline struct list_head *dip_get_head_dev(__u32 dev, enum iop_type type)
-{
- struct d_info *dip = __dip_find(dev);
-
- if (!dip)
- return NULL;
- return dip_get_head(__dip_find(dev), type);
-}
-
static inline void dip_update_q(struct d_info *dip, struct io *iop)
{
update_lq(&dip->last_q, &dip->avgs.q2q, iop->t.time);
update_qregion(&dip->regions, iop->t.time);
}
-static inline void dip_rem(struct io *iop)
-{
- LIST_DEL(&iop->dev_head);
-}
-
-static inline void *my_malloc(struct my_mem **head_p, size_t len)
+static inline struct io *io_alloc(void)
{
- struct my_mem *this = *head_p;
+ struct io *iop;
- if (this)
- *head_p = this->next;
+ if (!list_empty(&free_ios)) {
+ iop = list_entry(free_ios.next, struct io, f_head);
+ LIST_DEL(&iop->f_head);
+ }
else
- this = malloc(len);
+ iop = malloc(sizeof(struct io));
- return this;
+ return memset(iop, 0, sizeof(struct io));
}
-static inline void *my_zmalloc(struct my_mem **head_p, size_t len)
-{
- return memset(my_malloc(head_p, len), 0, len);
-}
-
-static inline void my_free(struct my_mem **head_p, void *p)
+static inline void io_free(struct io *iop)
{
- struct my_mem *this = p;
-
- this->next = *head_p;
- *head_p = this;
+ list_add_tail(&iop->f_head, &free_ios);
}
-static inline void io_setup(struct io *iop, enum iop_type type)
+static inline void io_setup(struct io *iop, enum iop_type type, int link)
{
iop->type = type;
- iop->dip = dip_add(iop->t.device, iop);
+ iop->dip = dip_add(iop->t.device, iop, link);
iop->pip = find_process(iop->t.pid, NULL);
-
- n_io_allocs++;
- list_add_tail(&iop->all_head, &all_ios);
-}
-
-static inline void io_free(struct io *iop)
-{
- ASSERT(iop->users == 0);
-
- LIST_DEL(&iop->all_head);
- dip_rem(iop);
- IO_FREE(iop);
- n_io_frees++;
+ iop->linked = link;
}
-static inline void io_get(struct io *iop)
+static inline void io_release(struct io *iop)
{
- iop->users++;
-}
-
-
-static inline int __io_put(struct io *iop)
-{
- return --iop->users;
-}
-
-static inline void io_put(struct io *iop)
-{
- if (__io_put(iop) == 0) {
- io_free_resources(iop);
- io_free(iop);
+ if (iop->linked) {
+ dip_rem(iop);
+ iop->linked = 0;
}
-}
-
-static inline void io_link(struct io **p_dst, struct io *iop)
-{
- ASSERT(iop != NULL);
- io_get(iop);
- *p_dst = iop;
-}
-
-static inline void io_unlink(struct io **p_dst)
-{
- ASSERT(*p_dst != NULL);
- io_put(*p_dst);
-
-#if defined(DEBUG)
- *p_dst = NULL;
-#endif
+ if (iop->pdu)
+ free(iop->pdu);
+ io_free(iop);
}
#define UPDATE_AVGS(_avg, _iop, _pip, _time) do { \
@@ -216,6 +137,12 @@ static inline void io_unlink(struct io **p_dst)
if (_pip) avg_update(&_pip->avgs. _avg , _time); \
} while (0)
+#define UNUPDATE_AVGS(_avg, _iop, _pip, _time) do { \
+ avg_unupdate(&all_avgs. _avg , _time); \
+ avg_unupdate(&_iop->dip->avgs. _avg , _time); \
+ if (_pip) avg_unupdate(&_pip->avgs. _avg , _time); \
+ } while (0)
+
static inline void update_q2c(struct io *iop, __u64 c_time)
{
UPDATE_AVGS(q2c, iop, iop->pip, c_time);
@@ -236,6 +163,11 @@ static inline void update_i2d(struct io *iop, __u64 d_time)
UPDATE_AVGS(i2d, iop, iop->pip, d_time);
}
+static inline void unupdate_i2d(struct io *iop, __u64 d_time)
+{
+ UNUPDATE_AVGS(i2d, iop, iop->pip, d_time);
+}
+
static inline void update_d2c(struct io *iop, __u64 c_time)
{
UPDATE_AVGS(d2c, iop, iop->pip, c_time);
@@ -245,7 +177,44 @@ static inline void update_blks(struct io *iop)
{
__u64 nblks = iop->t.bytes >> 9;
avg_update(&all_avgs.blks, nblks);
+ ASSERT(iop->dip != NULL);
avg_update(&iop->dip->avgs.blks, nblks);
if (iop->pip)
avg_update(&iop->pip->avgs.blks, nblks);
}
+
+static inline struct rb_root *__get_root(struct d_info *dip, enum iop_type type)
+{
+ struct rb_root *roots = dip->heads;
+ return &roots[type];
+}
+
+static inline void *dip_rb_mkhds(void)
+{
+ size_t len = N_IOP_TYPES * sizeof(struct rb_root);
+ return memset(malloc(len), 0, len);
+}
+
+static inline void dip_rb_ins(struct d_info *dip, struct io *iop)
+{
+ rb_insert(__get_root(dip, iop->type), iop);
+}
+
+static inline void dip_rb_rem(struct io *iop)
+{
+ rb_erase(&iop->rb_node, __get_root(iop->dip, iop->type));
+}
+
+static inline void dip_rb_fe(struct d_info *dip, enum iop_type type,
+ struct io *iop,
+ void (*fnc)(struct io *iop, struct io *this),
+ struct list_head *head)
+{
+ rb_foreach(__get_root(dip, type)->rb_node, iop, fnc, head);
+}
+
+static inline struct io *dip_rb_find_sec(struct d_info *dip,
+ enum iop_type type, __u64 sec)
+{
+ return rb_find_sec(__get_root(dip, type), sec);
+}