From 6fe4709e97f29162e075f029a2ce6a7c4a444ef1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 29 Aug 2005 09:45:41 +0200 Subject: [PATCH] [PATCH] Always store trace data in big endian format --- CHANGELOG | 1 + blkparse.c | 22 ++------- blktrace.c | 17 +------ blktrace.h | 128 +++++++++++++++++++++++-------------------------- blktrace_api.h | 84 ++++++++++++++++++++++++++++++++ 5 files changed, 152 insertions(+), 100 deletions(-) create mode 100644 blktrace_api.h diff --git a/CHANGELOG b/CHANGELOG index 8975f71..26f2bae 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -6,6 +6,7 @@ - Fix RELAYFS Kconfig selection (kernel patch) - blktrace: Don't touch the stored trace, leave magic and version for blkparse to see as well. + - Always store trace data in big endian format 20050827: - Various cleanups and killing unused variables 20050826: diff --git a/blkparse.c b/blkparse.c index 9f60dcf..876668c 100644 --- a/blkparse.c +++ b/blkparse.c @@ -291,21 +291,6 @@ static inline int trace_rb_insert(struct trace *t) return 0; } -static inline int verify_trace(struct blk_io_trace *t) -{ - if (!CHECK_MAGIC(t)) { - fprintf(stderr, "bad trace magic %x\n", t->magic); - return 1; - } - if ((t->magic & 0xff) != SUPPORTED_VERSION) { - fprintf(stderr, "unsupported trace version %x\n", - t->magic & 0xff); - return 1; - } - - return 0; -} - static int sort_entries(void *traces, unsigned long offset, int cpu) { struct blk_io_trace *bit; @@ -315,13 +300,16 @@ static int sort_entries(void *traces, unsigned long offset, int cpu) memset(&rb_root, 0, sizeof(rb_root)); - do { + while (traces - start <= offset - sizeof(*bit)) { bit = traces; + t = malloc(sizeof(*t)); t->bit = bit; t->cpu = cpu; memset(&t->rb_node, 0, sizeof(t->rb_node)); + trace_to_cpu(bit); + if (verify_trace(bit)) break; @@ -330,7 +318,7 @@ static int sort_entries(void *traces, unsigned long offset, int cpu) traces += sizeof(*bit) + bit->pdu_len; nelems++; - } while (traces < start + offset + sizeof(*bit)); + } return nelems; } diff --git a/blktrace.c b/blktrace.c index 1064bd4..2a21c4c 100644 --- a/blktrace.c +++ b/blktrace.c @@ -71,21 +71,6 @@ static void stop_trace(void) close(devfd); } -static inline int verify_trace(struct blk_io_trace *t) -{ - if (!CHECK_MAGIC(t)) { - fprintf(stderr, "bad trace magic %x\n", t->magic); - return 1; - } - if ((t->magic & 0xff) != SUPPORTED_VERSION) { - fprintf(stderr, "unsupported trace version %x\n", - t->magic & 0xff); - return 1; - } - - return 0; -} - static void extract_data(int cpu, char *ifn, int ifd, char *ofn, int ofd, int nb) { @@ -176,6 +161,8 @@ static void *extract(void *arg) if (verify_trace(&t)) exit(1); + trace_to_be(&t); + ret = write(ofd, &t, sizeof(t)); if (ret < 0) { perror(op); diff --git a/blktrace.h b/blktrace.h index 2f35a45..388f219 100644 --- a/blktrace.h +++ b/blktrace.h @@ -1,79 +1,71 @@ -#if !defined(__BLKTRACE_H__) -#define __BLKTRACE_H__ 1 +#ifndef BLKTRACE_H +#define BLKTRACE_H +#include #include +#include +#include "blktrace_api.h" -enum { - BLK_TC_READ = 1 << 0, /* reads */ - BLK_TC_WRITE = 1 << 1, /* writes */ - BLK_TC_BARRIER = 1 << 2, /* barrier */ - BLK_TC_SYNC = 1 << 3, /* barrier */ - BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ - BLK_TC_REQUEUE = 1 << 5, /* requeueing */ - BLK_TC_ISSUE = 1 << 6, /* issue */ - BLK_TC_COMPLETE = 1 << 7, /* completions */ - BLK_TC_FS = 1 << 8, /* fs requests */ - BLK_TC_PC = 1 << 9, /* pc requests */ - - BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ -}; - -#define BLK_TC_SHIFT (16) -#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) - -/* - * Basic trace actions - */ -enum { - __BLK_TA_QUEUE = 1, /* queued */ - __BLK_TA_BACKMERGE, /* back merged to existing rq */ - __BLK_TA_FRONTMERGE, /* front merge to existing rq */ - __BLK_TA_GETRQ, /* allocated new request */ - __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ - __BLK_TA_REQUEUE, /* request requeued */ - __BLK_TA_ISSUE, /* sent to driver */ - __BLK_TA_COMPLETE, /* completed by driver */ -}; - -/* - * Trace actions in full. Additionally, read or write is masked - */ -#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) -#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) -#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) -#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) -#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) -#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) -#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) -#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) - -#define BLK_IO_TRACE_MAGIC (0x65617400) #define CHECK_MAGIC(t) (((t)->magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) #define SUPPORTED_VERSION (0x02) -struct blk_io_trace { - __u32 magic; - __u32 sequence; - __u64 time; - __u64 sector; - __u32 bytes; - __u32 action; - __u32 pid; - __u16 error; - __u16 pdu_len; -}; +#if defined(__LITTLE_ENDIAN_BITFIELD) +#define be16_to_cpu(x) __bswap_16(x) +#define be32_to_cpu(x) __bswap_32(x) +#define be64_to_cpu(x) __bswap_64(x) +#define cpu_to_be16(x) __bswap_16(x) +#define cpu_to_be32(x) __bswap_32(x) +#define cpu_to_be64(x) __bswap_64(x) +#elif defined(__BIG_ENDIAN_BITFIELD) +#define be16_to_cpu(x) (x) +#define be32_to_cpu(x) (x) +#define be64_to_cpu(x) (x) +#define cpu_to_be16(x) (x) +#define cpu_to_be32(x) (x) +#define cpu_to_be64(x) (x) +#else +#error "Bad arch" +#endif + +static inline int verify_trace(struct blk_io_trace *t) +{ + if (!CHECK_MAGIC(t)) { + fprintf(stderr, "bad trace magic %x\n", t->magic); + return 1; + } + if ((t->magic & 0xff) != SUPPORTED_VERSION) { + fprintf(stderr, "unsupported trace version %x\n", + t->magic & 0xff); + return 1; + } -struct blk_user_trace_setup { - char name[32]; - __u16 act_mask; - __u32 buf_size; - __u32 buf_nr; -}; + return 0; +} +static inline void trace_to_be(struct blk_io_trace *t) +{ + t->magic = cpu_to_be32(t->magic); + t->sequence = cpu_to_be32(t->sequence); + t->time = cpu_to_be64(t->time); + t->sector = cpu_to_be64(t->sector); + t->bytes = cpu_to_be32(t->bytes); + t->action = cpu_to_be32(t->action); + t->pid = cpu_to_be32(t->pid); + t->error = cpu_to_be16(t->error); + t->pdu_len = cpu_to_be16(t->pdu_len); +} -#ifndef BLKSTARTTRACE -#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup) -#define BLKSTOPTRACE _IO(0x12,116) -#endif +static inline void trace_to_cpu(struct blk_io_trace *t) +{ + t->magic = be32_to_cpu(t->magic); + t->sequence = be32_to_cpu(t->sequence); + t->time = be64_to_cpu(t->time); + t->sector = be64_to_cpu(t->sector); + t->bytes = be32_to_cpu(t->bytes); + t->action = be32_to_cpu(t->action); + t->pid = be32_to_cpu(t->pid); + t->error = be16_to_cpu(t->error); + t->pdu_len = be16_to_cpu(t->pdu_len); +} #endif diff --git a/blktrace_api.h b/blktrace_api.h new file mode 100644 index 0000000..3861ab6 --- /dev/null +++ b/blktrace_api.h @@ -0,0 +1,84 @@ +#ifndef BLKTRACEAPI_H +#define BLKTRACEAPI_H + +#include + +/* + * Trace categories + */ +enum { + BLK_TC_READ = 1 << 0, /* reads */ + BLK_TC_WRITE = 1 << 1, /* writes */ + BLK_TC_BARRIER = 1 << 2, /* barrier */ + BLK_TC_SYNC = 1 << 3, /* barrier */ + BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ + BLK_TC_REQUEUE = 1 << 5, /* requeueing */ + BLK_TC_ISSUE = 1 << 6, /* issue */ + BLK_TC_COMPLETE = 1 << 7, /* completions */ + BLK_TC_FS = 1 << 8, /* fs requests */ + BLK_TC_PC = 1 << 9, /* pc requests */ + + BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ +}; + +#define BLK_TC_SHIFT (16) +#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) + +/* + * Basic trace actions + */ +enum { + __BLK_TA_QUEUE = 1, /* queued */ + __BLK_TA_BACKMERGE, /* back merged to existing rq */ + __BLK_TA_FRONTMERGE, /* front merge to existing rq */ + __BLK_TA_GETRQ, /* allocated new request */ + __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ + __BLK_TA_REQUEUE, /* request requeued */ + __BLK_TA_ISSUE, /* sent to driver */ + __BLK_TA_COMPLETE, /* completed by driver */ +}; + +/* + * Trace actions in full. Additionally, read or write is masked + */ +#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) +#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) +#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) + +#define BLK_IO_TRACE_MAGIC 0x65617400 +#define BLK_IO_TRACE_VERSION 0x02 + +/* + * The trace itself + */ +struct blk_io_trace { + __u32 magic; /* MAGIC << 8 | version */ + __u32 sequence; /* event number */ + __u64 time; /* in microseconds */ + __u64 sector; /* disk offset */ + __u32 bytes; /* transfer length */ + __u32 action; /* what happened */ + __u32 pid; /* who did it */ + __u16 error; /* completion error */ + __u16 pdu_len; /* length of data after this trace */ +}; + +/* + * User setup structure passed with BLKSTARTTRACE + */ +struct blk_user_trace_setup { + char name[32]; /* output */ + __u16 act_mask; /* input */ + __u32 buf_size; /* input */ + __u32 buf_nr; /* input */ +}; + +#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup) +#define BLKSTOPTRACE _IO(0x12,116) + +#endif -- 2.25.1