1 diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
2 --- a/drivers/block/Kconfig
3 +++ b/drivers/block/Kconfig
4 @@ -419,6 +419,14 @@ config LBD
5 your machine, or if you want to have a raid or loopback device
6 bigger than 2TB. Otherwise say N.
8 +config BLK_DEV_IO_TRACE
9 + bool "Support for tracing block io actions"
12 + Say Y here, if you want to be able to trace the block layer actions
17 tristate "Packet writing on CD/DVD media"
19 diff --git a/drivers/block/Makefile b/drivers/block/Makefile
20 --- a/drivers/block/Makefile
21 +++ b/drivers/block/Makefile
22 @@ -45,3 +45,5 @@ obj-$(CONFIG_VIODASD) += viodasd.o
23 obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
24 obj-$(CONFIG_BLK_DEV_UB) += ub.o
26 +obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
28 diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
29 --- a/drivers/block/elevator.c
30 +++ b/drivers/block/elevator.c
32 #include <linux/slab.h>
33 #include <linux/init.h>
34 #include <linux/compiler.h>
35 +#include <linux/blktrace.h>
37 #include <asm/uaccess.h>
39 @@ -305,6 +306,8 @@ void elv_requeue_request(request_queue_t
40 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
43 + blk_add_trace_rq(q, rq, BLK_TA_INSERT);
46 * barriers implicitly indicate back insertion
48 @@ -371,6 +374,9 @@ struct request *elv_next_request(request
51 while ((rq = __elv_next_request(q)) != NULL) {
53 + blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
56 * just mark as started even if we don't start it, a request
57 * that has been delayed should not be passed by new incoming
58 diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
59 --- a/drivers/block/ioctl.c
60 +++ b/drivers/block/ioctl.c
62 #include <linux/backing-dev.h>
63 #include <linux/buffer_head.h>
64 #include <linux/smp_lock.h>
65 +#include <linux/blktrace.h>
66 #include <asm/uaccess.h>
68 static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
69 @@ -188,6 +189,10 @@ static int blkdev_locked_ioctl(struct fi
70 return put_ulong(arg, bdev->bd_inode->i_size >> 9);
72 return put_u64(arg, bdev->bd_inode->i_size);
74 + return blk_start_trace(bdev, (char __user *) arg);
76 + return blk_stop_trace(bdev);
80 diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
81 --- a/drivers/block/ll_rw_blk.c
82 +++ b/drivers/block/ll_rw_blk.c
84 #include <linux/swap.h>
85 #include <linux/writeback.h>
86 #include <linux/blkdev.h>
87 +#include <linux/blktrace.h>
91 @@ -1422,8 +1423,10 @@ void blk_plug_device(request_queue_t *q)
92 if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
95 - if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
96 + if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
97 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
98 + blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
102 EXPORT_SYMBOL(blk_plug_device);
103 @@ -1487,14 +1490,21 @@ static void blk_backing_dev_unplug(struc
105 * devices don't necessarily have an ->unplug_fn defined
108 + if (q->unplug_fn) {
109 + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
110 + q->rq.count[READ] + q->rq.count[WRITE]);
116 static void blk_unplug_work(void *data)
118 request_queue_t *q = data;
120 + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
121 + q->rq.count[READ] + q->rq.count[WRITE]);
126 @@ -1502,6 +1512,9 @@ static void blk_unplug_timeout(unsigned
128 request_queue_t *q = (request_queue_t *)data;
130 + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
131 + q->rq.count[READ] + q->rq.count[WRITE]);
133 kblockd_schedule_work(&q->unplug_work);
136 @@ -1624,6 +1637,11 @@ void blk_cleanup_queue(request_queue_t *
138 __blk_queue_free_tags(q);
140 + if (q->blk_trace) {
141 + blk_cleanup_trace(q->blk_trace);
142 + q->blk_trace = NULL;
145 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
147 kmem_cache_free(requestq_cachep, q);
148 @@ -1970,6 +1988,8 @@ rq_starved:
153 + blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
157 @@ -1998,6 +2018,8 @@ static struct request *get_request_wait(
159 struct io_context *ioc;
161 + blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
163 __generic_unplug_device(q);
164 spin_unlock_irq(q->queue_lock);
166 @@ -2051,6 +2073,8 @@ EXPORT_SYMBOL(blk_get_request);
168 void blk_requeue_request(request_queue_t *q, struct request *rq)
170 + blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
172 if (blk_rq_tagged(rq))
173 blk_queue_end_tag(q, rq);
175 @@ -2676,6 +2700,8 @@ static int __make_request(request_queue_
176 if (!q->back_merge_fn(q, req, bio))
179 + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
181 req->biotail->bi_next = bio;
183 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
184 @@ -2691,6 +2717,8 @@ static int __make_request(request_queue_
185 if (!q->front_merge_fn(q, req, bio))
188 + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
190 bio->bi_next = req->bio;
193 @@ -2991,6 +3019,8 @@ end_io:
195 blk_partition_remap(bio);
197 + blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
199 ret = q->make_request_fn(q, bio);
202 @@ -3110,6 +3140,8 @@ static int __end_that_request_first(stru
203 int total_bytes, bio_nbytes, error, next_idx = 0;
206 + blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
209 * extend uptodate bool to allow < 0 value to be direct io error
211 diff --git a/fs/bio.c b/fs/bio.c
215 #include <linux/module.h>
216 #include <linux/mempool.h>
217 #include <linux/workqueue.h>
218 +#include <linux/blktrace.h>
219 #include <scsi/sg.h> /* for struct sg_iovec */
221 #define BIO_POOL_SIZE 256
222 @@ -1050,6 +1051,9 @@ struct bio_pair *bio_split(struct bio *b
226 + blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
227 + bi->bi_sector + first_sectors);
229 BUG_ON(bi->bi_vcnt != 1);
230 BUG_ON(bi->bi_idx != 0);
231 atomic_set(&bp->cnt, 3);
232 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
233 --- a/include/linux/blkdev.h
234 +++ b/include/linux/blkdev.h
235 @@ -22,6 +22,7 @@ typedef struct request_queue request_que
236 struct elevator_queue;
237 typedef struct elevator_queue elevator_t;
238 struct request_pm_state;
241 #define BLKDEV_MIN_RQ 4
242 #define BLKDEV_MAX_RQ 128 /* Default maximum */
243 @@ -412,6 +413,8 @@ struct request_queue
245 struct request *flush_rq;
246 unsigned char ordered;
248 + struct blk_trace *blk_trace;
252 diff --git a/include/linux/fs.h b/include/linux/fs.h
253 --- a/include/linux/fs.h
254 +++ b/include/linux/fs.h
255 @@ -196,6 +196,8 @@ extern int dir_notify_enable;
256 #define BLKBSZGET _IOR(0x12,112,size_t)
257 #define BLKBSZSET _IOW(0x12,113,size_t)
258 #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */
259 +#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup)
260 +#define BLKSTOPTRACE _IO(0x12,116)
262 #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
263 #define FIBMAP _IO(0x00,1) /* bmap access */
264 diff --git a/mm/highmem.c b/mm/highmem.c
268 #include <linux/init.h>
269 #include <linux/hash.h>
270 #include <linux/highmem.h>
271 +#include <linux/blktrace.h>
272 #include <asm/tlbflush.h>
274 static mempool_t *page_pool, *isa_page_pool;
275 @@ -479,6 +480,8 @@ void blk_queue_bounce(request_queue_t *q
276 pool = isa_page_pool;
279 + blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
284 --- /dev/null 2005-09-09 21:24:12.000000000 +0200
285 +++ linux-2.6/drivers/block/blktrace.c 2005-09-28 08:46:33.000000000 +0200
287 +#include <linux/config.h>
288 +#include <linux/kernel.h>
289 +#include <linux/blkdev.h>
290 +#include <linux/blktrace.h>
291 +#include <linux/percpu.h>
292 +#include <linux/init.h>
293 +#include <asm/uaccess.h>
295 +static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
297 +void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
298 + int rw, u32 what, int error, int pdu_len, void *pdu_data)
300 + struct blk_io_trace t;
301 + unsigned long flags;
304 + if (rw & (1 << BIO_RW_BARRIER))
305 + what |= BLK_TC_ACT(BLK_TC_BARRIER);
306 + if (rw & (1 << BIO_RW_SYNC))
307 + what |= BLK_TC_ACT(BLK_TC_SYNC);
310 + what |= BLK_TC_ACT(BLK_TC_WRITE);
312 + what |= BLK_TC_ACT(BLK_TC_READ);
314 + if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
317 + t.magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
319 + t.device = bt->dev;
324 + t.pdu_len = pdu_len;
326 + t.pid = current->pid;
327 + memcpy(t.comm, current->comm, sizeof(t.comm));
330 + * need to serialize this part on the local processor to prevent
331 + * interrupts for messing with sequence <-> time relation
333 + local_irq_save(flags);
335 + t.sequence = atomic_add_return(1, &bt->sequence);
337 + cpu = smp_processor_id();
339 + t.time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
341 + __relay_write(bt->rchan, &t, sizeof(t));
343 + __relay_write(bt->rchan, pdu_data, pdu_len);
345 + local_irq_restore(flags);
348 +EXPORT_SYMBOL_GPL(__blk_add_trace);
350 +static struct dentry *blk_tree_root;
351 +static DECLARE_MUTEX(blk_tree_mutex);
353 +static inline void blk_remove_root(void)
355 + if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY)
356 + blk_tree_root = NULL;
359 +static void blk_remove_tree(struct dentry *dir)
361 + down(&blk_tree_mutex);
362 + relayfs_remove_dir(dir);
364 + up(&blk_tree_mutex);
367 +static struct dentry *blk_create_tree(const char *blk_name)
369 + struct dentry *dir = NULL;
371 + down(&blk_tree_mutex);
373 + if (!blk_tree_root) {
374 + blk_tree_root = relayfs_create_dir("block", NULL);
375 + if (!blk_tree_root)
379 + dir = relayfs_create_dir(blk_name, blk_tree_root);
384 + up(&blk_tree_mutex);
388 +void blk_cleanup_trace(struct blk_trace *bt)
390 + relay_close(bt->rchan);
391 + blk_remove_tree(bt->dir);
395 +int blk_stop_trace(struct block_device *bdev)
397 + request_queue_t *q = bdev_get_queue(bdev);
398 + struct blk_trace *bt = NULL;
404 + down(&bdev->bd_sem);
406 + if (q->blk_trace) {
408 + q->blk_trace = NULL;
415 + blk_cleanup_trace(bt);
420 +int blk_start_trace(struct block_device *bdev, char __user *arg)
422 + request_queue_t *q = bdev_get_queue(bdev);
423 + struct blk_user_trace_setup buts;
424 + struct blk_trace *bt = NULL;
425 + struct dentry *dir = NULL;
426 + char b[BDEVNAME_SIZE];
432 + if (copy_from_user(&buts, arg, sizeof(buts)))
435 + if (!buts.buf_size || !buts.buf_nr)
438 + strcpy(buts.name, bdevname(bdev, b));
440 + if (copy_to_user(arg, &buts, sizeof(buts)))
443 + down(&bdev->bd_sem);
449 + bt = kmalloc(sizeof(*bt), GFP_KERNEL);
454 + dir = blk_create_tree(bdevname(bdev, b));
459 + bt->dev = bdev->bd_dev;
460 + atomic_set(&bt->sequence, 0);
463 + bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, NULL);
467 + bt->act_mask = buts.act_mask;
469 + bt->act_mask = (u16) -1;
477 + blk_remove_tree(dir);
483 +static void blk_trace_check_cpu_time(void *data)
485 + unsigned long long a, b, *t;
487 + int cpu = get_cpu();
489 + t = &per_cpu(blk_trace_cpu_offset, cpu);
492 + do_gettimeofday(&tv);
495 + *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
500 +static int blk_trace_calibrate_offsets(void)
502 + unsigned long flags;
504 + smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
505 + local_irq_save(flags);
506 + blk_trace_check_cpu_time(NULL);
507 + local_irq_restore(flags);
512 +static __init int blk_trace_init(void)
514 + return blk_trace_calibrate_offsets();
517 +module_init(blk_trace_init);
519 --- /dev/null 2005-09-09 21:24:12.000000000 +0200
520 +++ linux-2.6/include/linux/blktrace.h 2005-09-28 08:46:33.000000000 +0200
525 +#include <linux/config.h>
526 +#include <linux/blkdev.h>
527 +#include <linux/relayfs_fs.h>
533 + BLK_TC_READ = 1 << 0, /* reads */
534 + BLK_TC_WRITE = 1 << 1, /* writes */
535 + BLK_TC_BARRIER = 1 << 2, /* barrier */
536 + BLK_TC_SYNC = 1 << 3, /* barrier */
537 + BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
538 + BLK_TC_REQUEUE = 1 << 5, /* requeueing */
539 + BLK_TC_ISSUE = 1 << 6, /* issue */
540 + BLK_TC_COMPLETE = 1 << 7, /* completions */
541 + BLK_TC_FS = 1 << 8, /* fs requests */
542 + BLK_TC_PC = 1 << 9, /* pc requests */
544 + BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
547 +#define BLK_TC_SHIFT (16)
548 +#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
551 + * Basic trace actions
554 + __BLK_TA_QUEUE = 1, /* queued */
555 + __BLK_TA_BACKMERGE, /* back merged to existing rq */
556 + __BLK_TA_FRONTMERGE, /* front merge to existing rq */
557 + __BLK_TA_GETRQ, /* allocated new request */
558 + __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
559 + __BLK_TA_REQUEUE, /* request requeued */
560 + __BLK_TA_ISSUE, /* sent to driver */
561 + __BLK_TA_COMPLETE, /* completed by driver */
562 + __BLK_TA_PLUG, /* queue was plugged */
563 + __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
564 + __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
565 + __BLK_TA_INSERT, /* insert request */
566 + __BLK_TA_SPLIT, /* bio was split */
567 + __BLK_TA_BOUNCE, /* bio was bounced */
571 + * Trace actions in full. Additionally, read or write is masked
573 +#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
574 +#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
575 +#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
576 +#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
577 +#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
578 +#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
579 +#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
580 +#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
581 +#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
582 +#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
583 +#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
584 +#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
585 +#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
586 +#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
588 +#define BLK_IO_TRACE_MAGIC 0x65617400
589 +#define BLK_IO_TRACE_VERSION 0x05
594 +struct blk_io_trace {
595 + u32 magic; /* MAGIC << 8 | version */
596 + u32 sequence; /* event number */
597 + u64 time; /* in microseconds */
598 + u64 sector; /* disk offset */
599 + u32 bytes; /* transfer length */
600 + u32 action; /* what happened */
601 + u32 pid; /* who did it */
602 + u32 cpu; /* on what cpu did it happen */
603 + u16 error; /* completion error */
604 + u16 pdu_len; /* length of data after this trace */
605 + u32 device; /* device number */
606 + char comm[16]; /* task command name (TASK_COMM_LEN) */
610 + struct dentry *dir;
611 + struct rchan *rchan;
618 + * User setup structure passed with BLKSTARTTRACE
620 +struct blk_user_trace_setup {
621 + char name[BDEVNAME_SIZE]; /* output */
622 + u16 act_mask; /* input */
623 + u32 buf_size; /* input */
624 + u32 buf_nr; /* input */
627 +#if defined(CONFIG_BLK_DEV_IO_TRACE)
628 +extern int blk_start_trace(struct block_device *, char __user *);
629 +extern int blk_stop_trace(struct block_device *);
630 +extern void blk_cleanup_trace(struct blk_trace *);
631 +extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
633 +static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
636 + struct blk_trace *bt = q->blk_trace;
637 + int rw = rq->flags & 0x07;
642 + if (blk_pc_request(rq)) {
643 + what |= BLK_TC_ACT(BLK_TC_PC);
644 + __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
646 + what |= BLK_TC_ACT(BLK_TC_FS);
647 + __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
651 +static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
654 + struct blk_trace *bt = q->blk_trace;
659 + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
662 +static inline void blk_add_trace_generic(struct request_queue *q,
663 + struct bio *bio, int rw, u32 what)
665 + struct blk_trace *bt = q->blk_trace;
671 + blk_add_trace_bio(q, bio, what);
673 + __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
676 +static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
677 + struct bio *bio, unsigned int pdu)
679 + struct blk_trace *bt = q->blk_trace;
680 + u64 rpdu = cpu_to_be64(pdu);
686 + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
688 + __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
691 +#else /* !CONFIG_BLK_DEV_IO_TRACE */
692 +#define blk_start_trace(bdev, arg) (-EINVAL)
693 +#define blk_stop_trace(bdev) (-EINVAL)
694 +#define blk_cleanup_trace(bt) do { } while (0)
695 +#define blk_add_trace_rq(q, rq, what) do { } while (0)
696 +#define blk_add_trace_bio(q, rq, what) do { } while (0)
697 +#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
698 +#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
699 +#endif /* CONFIG_BLK_DEV_IO_TRACE */