From: Jens Axboe Date: Thu, 8 Sep 2005 06:37:33 +0000 (+0200) Subject: [PATCH] Add patch for Linus' kernel X-Git-Tag: blktrace-0.99~188 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=76c5a9c3ebaa11a8a9d408e9b0da878c7feaee6d;p=blktrace.git [PATCH] Add patch for Linus' kernel relayfs has been merged now, so it's easier to provide a mainline patch as well. --- diff --git a/kernel/blk-trace-2.6.13-git-E0 b/kernel/blk-trace-2.6.13-git-E0 new file mode 100644 index 0000000..b199b04 --- /dev/null +++ b/kernel/blk-trace-2.6.13-git-E0 @@ -0,0 +1,578 @@ +diff --git a/Documentation/fb/vesafb.txt b/Documentation/fb/vesafb.txt +diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig +--- a/drivers/block/Kconfig ++++ b/drivers/block/Kconfig +@@ -419,6 +419,14 @@ config LBD + your machine, or if you want to have a raid or loopback device + bigger than 2TB. Otherwise say N. + ++config BLK_DEV_IO_TRACE ++ bool "Support for tracing block io actions" ++ select RELAYFS_FS ++ help ++ Say Y here, if you want to be able to trace the block layer actions ++ on a given queue. ++ ++ + config CDROM_PKTCDVD + tristate "Packet writing on CD/DVD media" + depends on !UML +diff --git a/drivers/block/Makefile b/drivers/block/Makefile +--- a/drivers/block/Makefile ++++ b/drivers/block/Makefile +@@ -45,3 +45,5 @@ obj-$(CONFIG_VIODASD) += viodasd.o + obj-$(CONFIG_BLK_DEV_SX8) += sx8.o + obj-$(CONFIG_BLK_DEV_UB) += ub.o + ++obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o ++ +diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c +--- a/drivers/block/elevator.c ++++ b/drivers/block/elevator.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include + +@@ -371,6 +372,9 @@ struct request *elv_next_request(request + int ret; + + while ((rq = __elv_next_request(q)) != NULL) { ++ ++ blk_add_trace_rq(q, rq, BLK_TA_ISSUE); ++ + /* + * just mark as started even if we don't start it, a request + * that has been delayed should not be passed by new incoming +diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c +--- a/drivers/block/ioctl.c ++++ b/drivers/block/ioctl.c +@@ -4,6 +4,7 @@ + #include + #include + #include ++#include + #include + + static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) +@@ -188,6 +189,10 @@ static int blkdev_locked_ioctl(struct fi + return put_ulong(arg, bdev->bd_inode->i_size >> 9); + case BLKGETSIZE64: + return put_u64(arg, bdev->bd_inode->i_size); ++ case BLKSTARTTRACE: ++ return blk_start_trace(bdev, (char __user *) arg); ++ case BLKSTOPTRACE: ++ return blk_stop_trace(bdev); + } + return -ENOIOCTLCMD; + } +diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c +--- a/drivers/block/ll_rw_blk.c ++++ b/drivers/block/ll_rw_blk.c +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + + /* + * for max sense size +@@ -1624,6 +1625,11 @@ void blk_cleanup_queue(request_queue_t * + if (q->queue_tags) + __blk_queue_free_tags(q); + ++ if (q->blk_trace) { ++ blk_cleanup_trace(q->blk_trace); ++ q->blk_trace = NULL; ++ } ++ + blk_queue_ordered(q, QUEUE_ORDERED_NONE); + + kmem_cache_free(requestq_cachep, q); +@@ -1970,6 +1976,8 @@ rq_starved: + + rq_init(q, rq); + rq->rl = rl; ++ ++ blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); + out: + return rq; + } +@@ -1998,6 +2006,8 @@ static struct request *get_request_wait( + if (!rq) { + struct io_context *ioc; + ++ blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); ++ + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + io_schedule(); +@@ -2051,6 +2061,8 @@ EXPORT_SYMBOL(blk_get_request); + */ + void blk_requeue_request(request_queue_t *q, struct request *rq) + { ++ blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); ++ + if (blk_rq_tagged(rq)) + blk_queue_end_tag(q, rq); + +@@ -2714,6 +2726,8 @@ static int __make_request(request_queue_ + if (!q->back_merge_fn(q, req, bio)) + break; + ++ blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); ++ + req->biotail->bi_next = bio; + req->biotail = bio; + req->nr_sectors = req->hard_nr_sectors += nr_sectors; +@@ -2729,6 +2743,8 @@ static int __make_request(request_queue_ + if (!q->front_merge_fn(q, req, bio)) + break; + ++ blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); ++ + bio->bi_next = req->bio; + req->bio = bio; + +@@ -2794,6 +2810,8 @@ get_rq: + req->rq_disk = bio->bi_bdev->bd_disk; + req->start_time = jiffies; + ++ blk_add_trace_bio(q, bio, BLK_TA_QUEUE); ++ + spin_lock_irq(q->queue_lock); + if (elv_queue_empty(q)) + blk_plug_device(q); +@@ -3030,6 +3048,10 @@ end_io: + blk_partition_remap(bio); + + ret = q->make_request_fn(q, bio); ++ ++ if (ret) ++ blk_add_trace_bio(q, bio, BLK_TA_QUEUE); ++ + } while (ret); + } + +@@ -3148,6 +3170,8 @@ static int __end_that_request_first(stru + int total_bytes, bio_nbytes, error, next_idx = 0; + struct bio *bio; + ++ blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); ++ + /* + * extend uptodate bool to allow < 0 value to be direct io error + */ +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -22,6 +22,7 @@ typedef struct request_queue request_que + struct elevator_queue; + typedef struct elevator_queue elevator_t; + struct request_pm_state; ++struct blk_trace; + + #define BLKDEV_MIN_RQ 4 + #define BLKDEV_MAX_RQ 128 /* Default maximum */ +@@ -412,6 +413,8 @@ struct request_queue + */ + struct request *flush_rq; + unsigned char ordered; ++ ++ struct blk_trace *blk_trace; + }; + + enum { +diff --git a/include/linux/fs.h b/include/linux/fs.h +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -195,6 +195,8 @@ extern int dir_notify_enable; + #define BLKBSZGET _IOR(0x12,112,size_t) + #define BLKBSZSET _IOW(0x12,113,size_t) + #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ ++#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup) ++#define BLKSTOPTRACE _IO(0x12,116) + + #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ + #define FIBMAP _IO(0x00,1) /* bmap access */ +--- /dev/null 2005-09-03 12:52:15.000000000 +0200 ++++ linux-2.6/drivers/block/blktrace.c 2005-09-08 08:33:20.000000000 +0200 +@@ -0,0 +1,222 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; ++ ++void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ++ int rw, u32 what, int error, int pdu_len, char *pdu_data) ++{ ++ struct blk_io_trace t; ++ unsigned long flags; ++ int cpu; ++ ++ if (rw & (1 << BIO_RW_BARRIER)) ++ what |= BLK_TC_ACT(BLK_TC_BARRIER); ++ if (rw & (1 << BIO_RW_SYNC)) ++ what |= BLK_TC_ACT(BLK_TC_SYNC); ++ ++ if (rw & WRITE) ++ what |= BLK_TC_ACT(BLK_TC_WRITE); ++ else ++ what |= BLK_TC_ACT(BLK_TC_READ); ++ ++ if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) ++ return; ++ ++ t.magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; ++ t.sequence = atomic_add_return(1, &bt->sequence); ++ ++ cpu = get_cpu(); ++ t.cpu = cpu; ++ t.time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu); ++ put_cpu(); ++ ++ t.sector = sector; ++ t.bytes = bytes; ++ t.action = what; ++ t.error = error; ++ t.pdu_len = pdu_len; ++ ++ t.pid = current->pid; ++ memcpy(t.comm, current->comm, sizeof(t.comm)); ++ ++ local_irq_save(flags); ++ __relay_write(bt->rchan, &t, sizeof(t)); ++ if (pdu_len) ++ __relay_write(bt->rchan, pdu_data, pdu_len); ++ local_irq_restore(flags); ++} ++ ++static struct dentry *blk_tree_root; ++static DECLARE_MUTEX(blk_tree_mutex); ++ ++static inline void blk_remove_root(void) ++{ ++ if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY) ++ blk_tree_root = NULL; ++} ++ ++static void blk_remove_tree(struct dentry *dir) ++{ ++ down(&blk_tree_mutex); ++ relayfs_remove_dir(dir); ++ blk_remove_root(); ++ up(&blk_tree_mutex); ++} ++ ++static struct dentry *blk_create_tree(const char *blk_name) ++{ ++ struct dentry *dir = NULL; ++ ++ down(&blk_tree_mutex); ++ ++ if (!blk_tree_root) { ++ blk_tree_root = relayfs_create_dir("block", NULL); ++ if (!blk_tree_root) ++ goto err; ++ } ++ ++ dir = relayfs_create_dir(blk_name, blk_tree_root); ++ if (!dir) ++ blk_remove_root(); ++ ++err: ++ up(&blk_tree_mutex); ++ return dir; ++} ++ ++void blk_cleanup_trace(struct blk_trace *bt) ++{ ++ relay_close(bt->rchan); ++ blk_remove_tree(bt->dir); ++ kfree(bt); ++} ++ ++int blk_stop_trace(struct block_device *bdev) ++{ ++ request_queue_t *q = bdev_get_queue(bdev); ++ struct blk_trace *bt = NULL; ++ int ret = -EINVAL; ++ ++ if (!q) ++ return -ENXIO; ++ ++ down(&bdev->bd_sem); ++ ++ if (q->blk_trace) { ++ bt = q->blk_trace; ++ q->blk_trace = NULL; ++ ret = 0; ++ } ++ ++ up(&bdev->bd_sem); ++ ++ if (bt) ++ blk_cleanup_trace(bt); ++ ++ return ret; ++} ++ ++int blk_start_trace(struct block_device *bdev, char __user *arg) ++{ ++ request_queue_t *q = bdev_get_queue(bdev); ++ struct blk_user_trace_setup buts; ++ struct blk_trace *bt = NULL; ++ struct dentry *dir = NULL; ++ char b[BDEVNAME_SIZE]; ++ int ret; ++ ++ if (!q) ++ return -ENXIO; ++ ++ if (copy_from_user(&buts, arg, sizeof(buts))) ++ return -EFAULT; ++ ++ if (!buts.buf_size || !buts.buf_nr) ++ return -EINVAL; ++ ++ strcpy(buts.name, bdevname(bdev, b)); ++ ++ if (copy_to_user(arg, &buts, sizeof(buts))) ++ return -EFAULT; ++ ++ down(&bdev->bd_sem); ++ ret = -EBUSY; ++ if (q->blk_trace) ++ goto err; ++ ++ ret = -ENOMEM; ++ bt = kmalloc(sizeof(*bt), GFP_KERNEL); ++ if (!bt) ++ goto err; ++ ++ ret = -ENOENT; ++ dir = blk_create_tree(bdevname(bdev, b)); ++ if (!dir) ++ goto err; ++ ++ bt->dir = dir; ++ atomic_set(&bt->sequence, 0); ++ ++ ret = -EIO; ++ bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, NULL); ++ if (!bt->rchan) ++ goto err; ++ ++ bt->act_mask = buts.act_mask; ++ if (!bt->act_mask) ++ bt->act_mask = (u16) -1; ++ ++ q->blk_trace = bt; ++ up(&bdev->bd_sem); ++ return 0; ++err: ++ up(&bdev->bd_sem); ++ if (dir) ++ blk_remove_tree(dir); ++ if (bt) ++ kfree(bt); ++ return ret; ++} ++ ++static void blk_trace_check_cpu_time(void *data) ++{ ++ unsigned long long a, b, *t; ++ struct timeval tv; ++ int cpu = get_cpu(); ++ ++ t = &per_cpu(blk_trace_cpu_offset, cpu); ++ ++ a = sched_clock(); ++ do_gettimeofday(&tv); ++ b = sched_clock(); ++ ++ *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; ++ *t -= (a + b) / 2; ++ put_cpu(); ++} ++ ++static int blk_trace_calibrate_offsets(void) ++{ ++ unsigned long flags; ++ ++ smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1); ++ local_irq_save(flags); ++ blk_trace_check_cpu_time(NULL); ++ local_irq_restore(flags); ++ ++ return 0; ++} ++ ++static __init int blk_trace_init(void) ++{ ++ return blk_trace_calibrate_offsets(); ++} ++ ++module_init(blk_trace_init); ++ +--- /dev/null 2005-09-03 12:52:15.000000000 +0200 ++++ linux-2.6/include/linux/blktrace.h 2005-09-08 08:33:20.000000000 +0200 +@@ -0,0 +1,150 @@ ++#ifndef BLKTRACE_H ++#define BLKTRACE_H ++ ++#include ++#include ++#include ++ ++/* ++ * Trace categories ++ */ ++enum { ++ BLK_TC_READ = 1 << 0, /* reads */ ++ BLK_TC_WRITE = 1 << 1, /* writes */ ++ BLK_TC_BARRIER = 1 << 2, /* barrier */ ++ BLK_TC_SYNC = 1 << 3, /* barrier */ ++ BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ ++ BLK_TC_REQUEUE = 1 << 5, /* requeueing */ ++ BLK_TC_ISSUE = 1 << 6, /* issue */ ++ BLK_TC_COMPLETE = 1 << 7, /* completions */ ++ BLK_TC_FS = 1 << 8, /* fs requests */ ++ BLK_TC_PC = 1 << 9, /* pc requests */ ++ ++ BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ ++}; ++ ++#define BLK_TC_SHIFT (16) ++#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) ++ ++/* ++ * Basic trace actions ++ */ ++enum { ++ __BLK_TA_QUEUE = 1, /* queued */ ++ __BLK_TA_BACKMERGE, /* back merged to existing rq */ ++ __BLK_TA_FRONTMERGE, /* front merge to existing rq */ ++ __BLK_TA_GETRQ, /* allocated new request */ ++ __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ ++ __BLK_TA_REQUEUE, /* request requeued */ ++ __BLK_TA_ISSUE, /* sent to driver */ ++ __BLK_TA_COMPLETE, /* completed by driver */ ++}; ++ ++/* ++ * Trace actions in full. Additionally, read or write is masked ++ */ ++#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) ++#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) ++#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) ++ ++#define BLK_IO_TRACE_MAGIC 0x65617400 ++#define BLK_IO_TRACE_VERSION 0x04 ++ ++/* ++ * The trace itself ++ */ ++struct blk_io_trace { ++ u32 magic; /* MAGIC << 8 | version */ ++ u32 sequence; /* event number */ ++ u64 time; /* in microseconds */ ++ u64 sector; /* disk offset */ ++ u32 bytes; /* transfer length */ ++ u32 action; /* what happened */ ++ u32 pid; /* who did it */ ++ u32 cpu; /* on what cpu did it happen */ ++ u16 error; /* completion error */ ++ u16 pdu_len; /* length of data after this trace */ ++ char comm[16]; /* task command name (TASK_COMM_LEN) */ ++}; ++ ++struct blk_trace { ++ struct dentry *dir; ++ struct rchan *rchan; ++ atomic_t sequence; ++ u16 act_mask; ++}; ++ ++/* ++ * User setup structure passed with BLKSTARTTRACE ++ */ ++struct blk_user_trace_setup { ++ char name[BDEVNAME_SIZE]; /* output */ ++ u16 act_mask; /* input */ ++ u32 buf_size; /* input */ ++ u32 buf_nr; /* input */ ++}; ++ ++#if defined(CONFIG_BLK_DEV_IO_TRACE) ++extern int blk_start_trace(struct block_device *, char __user *); ++extern int blk_stop_trace(struct block_device *); ++extern void blk_cleanup_trace(struct blk_trace *); ++extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, char *); ++ ++static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, ++ u32 what) ++{ ++ struct blk_trace *bt = q->blk_trace; ++ int rw = rq->flags & 0x07; ++ ++ if (likely(!bt)) ++ return; ++ ++ if (blk_pc_request(rq)) { ++ what |= BLK_TC_ACT(BLK_TC_PC); ++ __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); ++ } else { ++ what |= BLK_TC_ACT(BLK_TC_FS); ++ __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); ++ } ++} ++ ++static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, ++ u32 what) ++{ ++ struct blk_trace *bt = q->blk_trace; ++ ++ if (likely(!bt)) ++ return; ++ ++ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); ++} ++ ++static inline void blk_add_trace_generic(struct request_queue *q, ++ struct bio *bio, int rw, u32 what) ++{ ++ struct blk_trace *bt = q->blk_trace; ++ ++ if (likely(!bt)) ++ return; ++ ++ if (bio) ++ blk_add_trace_bio(q, bio, what); ++ else ++ __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); ++} ++ ++#else /* !CONFIG_BLK_DEV_IO_TRACE */ ++#define blk_start_trace(bdev, arg) (-EINVAL) ++#define blk_stop_trace(bdev) (-EINVAL) ++#define blk_cleanup_trace(bt) do { } while (0) ++#define blk_add_trace_rq(q, rq, what) do { } while (0) ++#define blk_add_trace_bio(q, rq, what) do { } while (0) ++#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) ++#endif /* CONFIG_BLK_DEV_IO_TRACE */ ++ ++#endif