1 diff --git a/block/Kconfig b/block/Kconfig
2 index 377f6dd..27eaed9 100644
5 @@ -11,4 +11,15 @@ config LBD
6 your machine, or if you want to have a raid or loopback device
7 bigger than 2TB. Otherwise say N.
9 +config BLK_DEV_IO_TRACE
10 + bool "Support for tracing block io actions"
13 + Say Y here, if you want to be able to trace the block layer actions
14 + on a given queue. Tracing allows you to see any traffic happening
15 + on a block device queue. For more information (and the user space
16 + support tools needed), fetch the blktrace app from:
18 + git://brick.kernel.dk/data/git/blktrace.git
20 source block/Kconfig.iosched
21 diff --git a/block/Makefile b/block/Makefile
22 index 7e4f93e..c05de0e 100644
25 @@ -8,3 +8,5 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosch
26 obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
27 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
28 obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
30 +obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
31 diff --git a/block/blktrace.c b/block/blktrace.c
33 index 0000000..5ffd46f
35 +++ b/block/blktrace.c
37 +#include <linux/config.h>
38 +#include <linux/kernel.h>
39 +#include <linux/blkdev.h>
40 +#include <linux/blktrace_api.h>
41 +#include <linux/percpu.h>
42 +#include <linux/init.h>
43 +#include <linux/mutex.h>
44 +#include <asm/uaccess.h>
46 +static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
48 +void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
49 + int rw, u32 what, int error, int pdu_len, void *pdu_data)
51 + struct blk_io_trace t;
52 + unsigned long flags;
56 + if (rw & (1 << BIO_RW_BARRIER))
57 + what |= BLK_TC_ACT(BLK_TC_BARRIER);
58 + if (rw & (1 << BIO_RW_SYNC))
59 + what |= BLK_TC_ACT(BLK_TC_SYNC);
62 + what |= BLK_TC_ACT(BLK_TC_WRITE);
64 + what |= BLK_TC_ACT(BLK_TC_READ);
66 + if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
68 + if (sector < bt->start_lba || sector > bt->end_lba)
72 + if (bt->pid && pid != bt->pid)
75 + t.magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
82 + t.pdu_len = pdu_len;
85 + memcpy(t.comm, current->comm, sizeof(t.comm));
88 + * need to serialize this part completely to prevent multiple CPUs
89 + * from misordering events
91 + spin_lock_irqsave(&bt->lock, flags);
93 + t.sequence = ++bt->sequence;
95 + cpu = smp_processor_id();
97 + t.time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
99 + __relay_write(bt->rchan, &t, sizeof(t));
101 + __relay_write(bt->rchan, pdu_data, pdu_len);
103 + spin_unlock_irqrestore(&bt->lock, flags);
106 +EXPORT_SYMBOL_GPL(__blk_add_trace);
108 +static struct dentry *blk_tree_root;
109 +static struct mutex blk_tree_mutex;
111 +static inline void blk_remove_root(void)
113 + if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY)
114 + blk_tree_root = NULL;
117 +static void blk_remove_tree(struct dentry *dir)
119 + mutex_lock(&blk_tree_mutex);
120 + relayfs_remove_dir(dir);
122 + mutex_unlock(&blk_tree_mutex);
125 +static struct dentry *blk_create_tree(const char *blk_name)
127 + struct dentry *dir = NULL;
129 + mutex_lock(&blk_tree_mutex);
131 + if (!blk_tree_root) {
132 + blk_tree_root = relayfs_create_dir("block", NULL);
133 + if (!blk_tree_root)
137 + dir = relayfs_create_dir(blk_name, blk_tree_root);
142 + mutex_unlock(&blk_tree_mutex);
146 +void blk_cleanup_trace(struct blk_trace *bt)
148 + relay_close(bt->rchan);
149 + relayfs_remove_file(bt->dropped_file);
150 + blk_remove_tree(bt->dir);
154 +int blk_stop_trace(struct block_device *bdev)
156 + request_queue_t *q = bdev_get_queue(bdev);
157 + struct blk_trace *bt = NULL;
163 + down(&bdev->bd_sem);
165 + if (q->blk_trace) {
167 + q->blk_trace = NULL;
174 + blk_cleanup_trace(bt);
179 +static int blk_dropped_open(struct inode *inode, struct file *filp)
181 + filp->private_data = inode->u.generic_ip;
186 +static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
187 + size_t count, loff_t *ppos)
189 + struct blk_trace *bt = filp->private_data;
192 + snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
194 + return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
197 +static struct file_operations blk_dropped_fops = {
198 + .owner = THIS_MODULE,
199 + .open = blk_dropped_open,
200 + .read = blk_dropped_read,
203 +static int blk_subbuf_start_callback (struct rchan_buf *buf,
206 + size_t prev_padding)
208 + if (relay_buf_full(buf)) {
209 + struct blk_trace *bt = buf->chan->private_data;
211 + atomic_inc(&bt->dropped);
218 +static struct rchan_callbacks blk_relay_callbacks = {
219 + .subbuf_start = blk_subbuf_start_callback,
222 +int blk_start_trace(struct block_device *bdev, char __user *arg)
224 + request_queue_t *q = bdev_get_queue(bdev);
225 + struct blk_user_trace_setup buts;
226 + struct blk_trace *bt = NULL;
227 + struct dentry *dir = NULL;
228 + char b[BDEVNAME_SIZE];
234 + if (copy_from_user(&buts, arg, sizeof(buts)))
237 + if (!buts.buf_size || !buts.buf_nr)
240 + strcpy(buts.name, bdevname(bdev, b));
243 + * some device names have larger paths - convert the slashes
244 + * to underscores for this to work as expected
246 + for (i = 0; i < strlen(buts.name); i++)
247 + if (buts.name[i] == '/')
248 + buts.name[i] = '_';
250 + if (copy_to_user(arg, &buts, sizeof(buts)))
253 + down(&bdev->bd_sem);
259 + bt = kmalloc(sizeof(*bt), GFP_KERNEL);
264 + dir = blk_create_tree(buts.name);
269 + bt->dev = bdev->bd_dev;
271 + spin_lock_init(&bt->lock);
272 + atomic_set(&bt->dropped, 0);
275 + bt->dropped_file = relayfs_create_file("dropped", dir, 0, &blk_dropped_fops, bt);
276 + if (!bt->dropped_file)
279 + bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks);
282 + bt->rchan->private_data = bt;
284 + bt->act_mask = buts.act_mask;
286 + bt->act_mask = (u16) -1;
288 + bt->start_lba = buts.start_lba;
289 + bt->end_lba = buts.end_lba;
291 + bt->end_lba = -1ULL;
293 + bt->pid = buts.pid;
300 + if (bt && bt->dropped_file)
301 + relayfs_remove_file(bt->dropped_file);
303 + blk_remove_tree(dir);
309 +static void blk_check_time(unsigned long long *t)
311 + unsigned long long a, b;
315 + do_gettimeofday(&tv);
318 + *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
322 +static void blk_trace_check_cpu_time(void *data)
324 + unsigned long long *t;
325 + int cpu = get_cpu();
327 + t = &per_cpu(blk_trace_cpu_offset, cpu);
335 +static void blk_trace_calibrate_offsets(void)
337 + unsigned long flags;
339 + smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
340 + local_irq_save(flags);
341 + blk_trace_check_cpu_time(NULL);
342 + local_irq_restore(flags);
345 +static __init int blk_trace_init(void)
349 + mutex_init(&blk_tree_mutex);
350 + blk_trace_calibrate_offsets();
353 + * now make sure HT siblings have the same time offset
356 + for_each_online_cpu(cpu) {
357 + unsigned long long *cpu_off, *sibling_off;
359 + for_each_cpu_mask(i, cpu_sibling_map[cpu]) {
363 + cpu_off = &per_cpu(blk_trace_cpu_offset, cpu);
364 + sibling_off = &per_cpu(blk_trace_cpu_offset, i);
365 + *sibling_off = *cpu_off;
373 +module_init(blk_trace_init);
375 diff --git a/block/elevator.c b/block/elevator.c
376 index c9f424d..793c686 100644
377 --- a/block/elevator.c
378 +++ b/block/elevator.c
380 #include <linux/init.h>
381 #include <linux/compiler.h>
382 #include <linux/delay.h>
383 +#include <linux/blktrace_api.h>
385 #include <asm/uaccess.h>
387 @@ -333,6 +334,8 @@ void __elv_add_request(request_queue_t *
388 struct list_head *pos;
391 + blk_add_trace_rq(q, rq, BLK_TA_INSERT);
394 rq->flags |= REQ_ORDERED_COLOR;
396 @@ -491,6 +494,7 @@ struct request *elv_next_request(request
397 * not be passed by new incoming requests
399 rq->flags |= REQ_STARTED;
400 + blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
403 if (!q->boundary_rq || q->boundary_rq == rq) {
404 diff --git a/block/ioctl.c b/block/ioctl.c
405 index e110949..63e67a2 100644
409 #include <linux/backing-dev.h>
410 #include <linux/buffer_head.h>
411 #include <linux/smp_lock.h>
412 +#include <linux/blktrace_api.h>
413 #include <asm/uaccess.h>
415 static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
416 @@ -189,6 +190,10 @@ static int blkdev_locked_ioctl(struct fi
417 return put_ulong(arg, bdev->bd_inode->i_size >> 9);
419 return put_u64(arg, bdev->bd_inode->i_size);
420 + case BLKSTARTTRACE:
421 + return blk_start_trace(bdev, (char __user *) arg);
423 + return blk_stop_trace(bdev);
427 diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
428 index 8e27d0a..bfcde0f 100644
429 --- a/block/ll_rw_blk.c
430 +++ b/block/ll_rw_blk.c
432 #include <linux/writeback.h>
433 #include <linux/interrupt.h>
434 #include <linux/cpu.h>
435 +#include <linux/blktrace_api.h>
439 @@ -1555,8 +1556,10 @@ void blk_plug_device(request_queue_t *q)
440 if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
443 - if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
444 + if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
445 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
446 + blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
450 EXPORT_SYMBOL(blk_plug_device);
451 @@ -1620,14 +1623,21 @@ static void blk_backing_dev_unplug(struc
453 * devices don't necessarily have an ->unplug_fn defined
456 + if (q->unplug_fn) {
457 + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
458 + q->rq.count[READ] + q->rq.count[WRITE]);
464 static void blk_unplug_work(void *data)
466 request_queue_t *q = data;
468 + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
469 + q->rq.count[READ] + q->rq.count[WRITE]);
474 @@ -1635,6 +1645,9 @@ static void blk_unplug_timeout(unsigned
476 request_queue_t *q = (request_queue_t *)data;
478 + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
479 + q->rq.count[READ] + q->rq.count[WRITE]);
481 kblockd_schedule_work(&q->unplug_work);
484 @@ -1757,6 +1770,11 @@ void blk_cleanup_queue(request_queue_t *
486 __blk_queue_free_tags(q);
488 + if (q->blk_trace) {
489 + blk_cleanup_trace(q->blk_trace);
490 + q->blk_trace = NULL;
493 kmem_cache_free(requestq_cachep, q);
496 @@ -2108,6 +2126,8 @@ rq_starved:
501 + blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
505 @@ -2136,6 +2156,8 @@ static struct request *get_request_wait(
507 struct io_context *ioc;
509 + blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
511 __generic_unplug_device(q);
512 spin_unlock_irq(q->queue_lock);
514 @@ -2189,6 +2211,8 @@ EXPORT_SYMBOL(blk_get_request);
516 void blk_requeue_request(request_queue_t *q, struct request *rq)
518 + blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
520 if (blk_rq_tagged(rq))
521 blk_queue_end_tag(q, rq);
523 @@ -2820,6 +2844,8 @@ static int __make_request(request_queue_
524 if (!q->back_merge_fn(q, req, bio))
527 + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
529 req->biotail->bi_next = bio;
531 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
532 @@ -2835,6 +2861,8 @@ static int __make_request(request_queue_
533 if (!q->front_merge_fn(q, req, bio))
536 + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
538 bio->bi_next = req->bio;
541 @@ -2952,6 +2980,7 @@ void generic_make_request(struct bio *bi
544 int ret, nr_sectors = bio_sectors(bio);
548 /* Test device or partition size, when known. */
549 @@ -2978,6 +3007,8 @@ void generic_make_request(struct bio *bi
550 * NOTE: we don't repeat the blk_size check for each new device.
551 * Stacking drivers are expected to know what they are doing.
556 char b[BDEVNAME_SIZE];
558 @@ -3010,6 +3041,15 @@ end_io:
560 blk_partition_remap(bio);
562 + if (maxsector != -1)
563 + blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
566 + blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
568 + maxsector = bio->bi_sector;
569 + old_dev = bio->bi_bdev->bd_dev;
571 ret = q->make_request_fn(q, bio);
574 @@ -3129,6 +3169,8 @@ static int __end_that_request_first(stru
575 int total_bytes, bio_nbytes, error, next_idx = 0;
578 + blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
581 * extend uptodate bool to allow < 0 value to be direct io error
583 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
584 index 12d7b9b..880892a 100644
585 --- a/drivers/block/cciss.c
586 +++ b/drivers/block/cciss.c
588 #include <linux/hdreg.h>
589 #include <linux/spinlock.h>
590 #include <linux/compat.h>
591 +#include <linux/blktrace_api.h>
592 #include <asm/uaccess.h>
595 @@ -2330,6 +2331,7 @@ static inline void complete_command( ctl
597 cmd->rq->completion_data = cmd;
598 cmd->rq->errors = status;
599 + blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
600 blk_complete_request(cmd->rq);
603 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
604 index 8c16359..8c979c2 100644
605 --- a/drivers/md/dm.c
606 +++ b/drivers/md/dm.c
608 #include <linux/mempool.h>
609 #include <linux/slab.h>
610 #include <linux/idr.h>
611 +#include <linux/blktrace_api.h>
613 static const char *_name = DM_NAME;
615 @@ -303,6 +304,8 @@ static void dec_pending(struct dm_io *io
616 /* nudge anyone waiting on suspend queue */
617 wake_up(&io->md->wait);
619 + blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE);
621 bio_endio(io->bio, io->bio->bi_size, io->error);
624 @@ -361,6 +364,7 @@ static void __map_bio(struct dm_target *
625 struct target_io *tio)
632 @@ -376,10 +380,17 @@ static void __map_bio(struct dm_target *
635 atomic_inc(&tio->io->io_count);
636 + sector = clone->bi_sector;
637 r = ti->type->map(ti, clone, &tio->info);
640 /* the bio has been remapped so dispatch it */
642 + blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
643 + tio->io->bio->bi_bdev->bd_dev, sector,
646 generic_make_request(clone);
650 /* error the io and bail out */
651 diff --git a/fs/bio.c b/fs/bio.c
652 index bbc442b..8a1b0b6 100644
656 #include <linux/module.h>
657 #include <linux/mempool.h>
658 #include <linux/workqueue.h>
659 +#include <linux/blktrace_api.h>
660 #include <scsi/sg.h> /* for struct sg_iovec */
662 #define BIO_POOL_SIZE 256
663 @@ -1094,6 +1095,9 @@ struct bio_pair *bio_split(struct bio *b
667 + blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
668 + bi->bi_sector + first_sectors);
670 BUG_ON(bi->bi_vcnt != 1);
671 BUG_ON(bi->bi_idx != 0);
672 atomic_set(&bp->cnt, 3);
673 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
674 index 5dd0207..010e02b 100644
675 --- a/fs/compat_ioctl.c
676 +++ b/fs/compat_ioctl.c
678 #include <linux/i2c-dev.h>
679 #include <linux/wireless.h>
680 #include <linux/atalk.h>
681 +#include <linux/blktrace_api.h>
683 #include <net/sock.h> /* siocdevprivate_ioctl */
684 #include <net/bluetooth/bluetooth.h>
685 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
686 index 02a585f..195c3b9 100644
687 --- a/include/linux/blkdev.h
688 +++ b/include/linux/blkdev.h
689 @@ -22,6 +22,7 @@ typedef struct request_queue request_que
690 struct elevator_queue;
691 typedef struct elevator_queue elevator_t;
692 struct request_pm_state;
695 #define BLKDEV_MIN_RQ 4
696 #define BLKDEV_MAX_RQ 128 /* Default maximum */
697 @@ -416,6 +417,8 @@ struct request_queue
698 unsigned int sg_reserved_size;
701 + struct blk_trace *blk_trace;
704 * reserved for flush operations
706 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
708 index 0000000..d6b4317
710 +++ b/include/linux/blktrace_api.h
715 +#include <linux/config.h>
716 +#include <linux/blkdev.h>
717 +#include <linux/relayfs_fs.h>
723 + BLK_TC_READ = 1 << 0, /* reads */
724 + BLK_TC_WRITE = 1 << 1, /* writes */
725 + BLK_TC_BARRIER = 1 << 2, /* barrier */
726 + BLK_TC_SYNC = 1 << 3, /* barrier */
727 + BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
728 + BLK_TC_REQUEUE = 1 << 5, /* requeueing */
729 + BLK_TC_ISSUE = 1 << 6, /* issue */
730 + BLK_TC_COMPLETE = 1 << 7, /* completions */
731 + BLK_TC_FS = 1 << 8, /* fs requests */
732 + BLK_TC_PC = 1 << 9, /* pc requests */
734 + BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
737 +#define BLK_TC_SHIFT (16)
738 +#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
741 + * Basic trace actions
744 + __BLK_TA_QUEUE = 1, /* queued */
745 + __BLK_TA_BACKMERGE, /* back merged to existing rq */
746 + __BLK_TA_FRONTMERGE, /* front merge to existing rq */
747 + __BLK_TA_GETRQ, /* allocated new request */
748 + __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
749 + __BLK_TA_REQUEUE, /* request requeued */
750 + __BLK_TA_ISSUE, /* sent to driver */
751 + __BLK_TA_COMPLETE, /* completed by driver */
752 + __BLK_TA_PLUG, /* queue was plugged */
753 + __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
754 + __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
755 + __BLK_TA_INSERT, /* insert request */
756 + __BLK_TA_SPLIT, /* bio was split */
757 + __BLK_TA_BOUNCE, /* bio was bounced */
758 + __BLK_TA_REMAP, /* bio was remapped */
762 + * Trace actions in full. Additionally, read or write is masked
764 +#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
765 +#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
766 +#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
767 +#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
768 +#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
769 +#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
770 +#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
771 +#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
772 +#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
773 +#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
774 +#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
775 +#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
776 +#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
777 +#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
778 +#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
780 +#define BLK_IO_TRACE_MAGIC 0x65617400
781 +#define BLK_IO_TRACE_VERSION 0x05
786 +struct blk_io_trace {
787 + u32 magic; /* MAGIC << 8 | version */
788 + u32 sequence; /* event number */
789 + u64 time; /* in microseconds */
790 + u64 sector; /* disk offset */
791 + u32 bytes; /* transfer length */
792 + u32 action; /* what happened */
793 + u32 pid; /* who did it */
794 + u32 cpu; /* on what cpu did it happen */
795 + u16 error; /* completion error */
796 + u16 pdu_len; /* length of data after this trace */
797 + u32 device; /* device number */
798 + char comm[16]; /* task command name (TASK_COMM_LEN) */
804 +struct blk_io_trace_remap {
811 + struct dentry *dir;
812 + struct rchan *rchan;
813 + struct dentry *dropped_file;
816 + unsigned long sequence;
825 + * User setup structure passed with BLKSTARTTRACE
827 +struct blk_user_trace_setup {
828 + char name[BDEVNAME_SIZE]; /* output */
829 + u16 act_mask; /* input */
830 + u32 buf_size; /* input */
831 + u32 buf_nr; /* input */
837 +#if defined(CONFIG_BLK_DEV_IO_TRACE)
838 +extern int blk_start_trace(struct block_device *, char __user *);
839 +extern int blk_stop_trace(struct block_device *);
840 +extern void blk_cleanup_trace(struct blk_trace *);
841 +extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
843 +static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
846 + struct blk_trace *bt = q->blk_trace;
847 + int rw = rq->flags & 0x07;
852 + if (blk_pc_request(rq)) {
853 + what |= BLK_TC_ACT(BLK_TC_PC);
854 + __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
856 + what |= BLK_TC_ACT(BLK_TC_FS);
857 + __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
861 +static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
864 + struct blk_trace *bt = q->blk_trace;
869 + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
872 +static inline void blk_add_trace_generic(struct request_queue *q,
873 + struct bio *bio, int rw, u32 what)
875 + struct blk_trace *bt = q->blk_trace;
881 + blk_add_trace_bio(q, bio, what);
883 + __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
886 +static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
887 + struct bio *bio, unsigned int pdu)
889 + struct blk_trace *bt = q->blk_trace;
890 + u64 rpdu = cpu_to_be64(pdu);
896 + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
898 + __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
901 +static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
902 + dev_t dev, sector_t from, sector_t to)
904 + struct blk_trace *bt = q->blk_trace;
905 + struct blk_io_trace_remap r;
910 + r.device = cpu_to_be32(dev);
911 + r.sector = cpu_to_be64(to);
913 + __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
916 +#else /* !CONFIG_BLK_DEV_IO_TRACE */
917 +#define blk_start_trace(bdev, arg) (-EINVAL)
918 +#define blk_stop_trace(bdev) (-EINVAL)
919 +#define blk_cleanup_trace(bt) do { } while (0)
920 +#define blk_add_trace_rq(q, rq, what) do { } while (0)
921 +#define blk_add_trace_bio(q, rq, what) do { } while (0)
922 +#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
923 +#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
924 +#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
925 +#endif /* CONFIG_BLK_DEV_IO_TRACE */
928 diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
929 index 8fad50f..5bed09a 100644
930 --- a/include/linux/compat_ioctl.h
931 +++ b/include/linux/compat_ioctl.h
932 @@ -97,6 +97,8 @@ COMPATIBLE_IOCTL(BLKRRPART)
933 COMPATIBLE_IOCTL(BLKFLSBUF)
934 COMPATIBLE_IOCTL(BLKSECTSET)
935 COMPATIBLE_IOCTL(BLKSSZGET)
936 +COMPATIBLE_IOCTL(BLKSTARTTRACE)
937 +COMPATIBLE_IOCTL(BLKSTOPTRACE)
938 ULONG_IOCTL(BLKRASET)
939 ULONG_IOCTL(BLKFRASET)
941 diff --git a/include/linux/fs.h b/include/linux/fs.h
942 index b77f260..5575284 100644
943 --- a/include/linux/fs.h
944 +++ b/include/linux/fs.h
945 @@ -196,6 +196,8 @@ extern int dir_notify_enable;
946 #define BLKBSZGET _IOR(0x12,112,size_t)
947 #define BLKBSZSET _IOW(0x12,113,size_t)
948 #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */
949 +#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup)
950 +#define BLKSTOPTRACE _IO(0x12,116)
952 #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
953 #define FIBMAP _IO(0x00,1) /* bmap access */
954 diff --git a/mm/highmem.c b/mm/highmem.c
955 index ce2e7e8..d0ea1ee 100644
959 #include <linux/init.h>
960 #include <linux/hash.h>
961 #include <linux/highmem.h>
962 +#include <linux/blktrace_api.h>
963 #include <asm/tlbflush.h>
965 static mempool_t *page_pool, *isa_page_pool;
966 @@ -483,6 +484,8 @@ void blk_queue_bounce(request_queue_t *q
967 pool = isa_page_pool;
970 + blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);