1 diff --git a/Documentation/fb/vesafb.txt b/Documentation/fb/vesafb.txt
2 diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
3 --- a/drivers/block/Kconfig
4 +++ b/drivers/block/Kconfig
5 @@ -419,6 +419,14 @@ config LBD
6 your machine, or if you want to have a raid or loopback device
7 bigger than 2TB. Otherwise say N.
9 +config BLK_DEV_IO_TRACE
10 + bool "Support for tracing block io actions"
13 + Say Y here, if you want to be able to trace the block layer actions
18 tristate "Packet writing on CD/DVD media"
20 diff --git a/drivers/block/Makefile b/drivers/block/Makefile
21 --- a/drivers/block/Makefile
22 +++ b/drivers/block/Makefile
23 @@ -45,3 +45,5 @@ obj-$(CONFIG_VIODASD) += viodasd.o
24 obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
25 obj-$(CONFIG_BLK_DEV_UB) += ub.o
27 +obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
29 diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
30 --- a/drivers/block/elevator.c
31 +++ b/drivers/block/elevator.c
33 #include <linux/slab.h>
34 #include <linux/init.h>
35 #include <linux/compiler.h>
36 +#include <linux/blktrace.h>
38 #include <asm/uaccess.h>
40 @@ -371,6 +372,9 @@ struct request *elv_next_request(request
43 while ((rq = __elv_next_request(q)) != NULL) {
45 + blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
48 * just mark as started even if we don't start it, a request
49 * that has been delayed should not be passed by new incoming
50 diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
51 --- a/drivers/block/ioctl.c
52 +++ b/drivers/block/ioctl.c
54 #include <linux/backing-dev.h>
55 #include <linux/buffer_head.h>
56 #include <linux/smp_lock.h>
57 +#include <linux/blktrace.h>
58 #include <asm/uaccess.h>
60 static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
61 @@ -188,6 +189,10 @@ static int blkdev_locked_ioctl(struct fi
62 return put_ulong(arg, bdev->bd_inode->i_size >> 9);
64 return put_u64(arg, bdev->bd_inode->i_size);
66 + return blk_start_trace(bdev, (char __user *) arg);
68 + return blk_stop_trace(bdev);
72 diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
73 --- a/drivers/block/ll_rw_blk.c
74 +++ b/drivers/block/ll_rw_blk.c
76 #include <linux/swap.h>
77 #include <linux/writeback.h>
78 #include <linux/blkdev.h>
79 +#include <linux/blktrace.h>
83 @@ -1624,6 +1625,11 @@ void blk_cleanup_queue(request_queue_t *
85 __blk_queue_free_tags(q);
88 + blk_cleanup_trace(q->blk_trace);
89 + q->blk_trace = NULL;
92 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
94 kmem_cache_free(requestq_cachep, q);
95 @@ -1970,6 +1976,8 @@ rq_starved:
100 + blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
104 @@ -1998,6 +2006,8 @@ static struct request *get_request_wait(
106 struct io_context *ioc;
108 + blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
110 __generic_unplug_device(q);
111 spin_unlock_irq(q->queue_lock);
113 @@ -2051,6 +2061,8 @@ EXPORT_SYMBOL(blk_get_request);
115 void blk_requeue_request(request_queue_t *q, struct request *rq)
117 + blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
119 if (blk_rq_tagged(rq))
120 blk_queue_end_tag(q, rq);
122 @@ -2714,6 +2726,8 @@ static int __make_request(request_queue_
123 if (!q->back_merge_fn(q, req, bio))
126 + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
128 req->biotail->bi_next = bio;
130 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
131 @@ -2729,6 +2743,8 @@ static int __make_request(request_queue_
132 if (!q->front_merge_fn(q, req, bio))
135 + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
137 bio->bi_next = req->bio;
140 @@ -2794,6 +2810,8 @@ get_rq:
141 req->rq_disk = bio->bi_bdev->bd_disk;
142 req->start_time = jiffies;
144 + blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
146 spin_lock_irq(q->queue_lock);
147 if (elv_queue_empty(q))
149 @@ -3030,6 +3048,10 @@ end_io:
150 blk_partition_remap(bio);
152 ret = q->make_request_fn(q, bio);
155 + blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
160 @@ -3148,6 +3170,8 @@ static int __end_that_request_first(stru
161 int total_bytes, bio_nbytes, error, next_idx = 0;
164 + blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
167 * extend uptodate bool to allow < 0 value to be direct io error
169 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
170 --- a/include/linux/blkdev.h
171 +++ b/include/linux/blkdev.h
172 @@ -22,6 +22,7 @@ typedef struct request_queue request_que
173 struct elevator_queue;
174 typedef struct elevator_queue elevator_t;
175 struct request_pm_state;
178 #define BLKDEV_MIN_RQ 4
179 #define BLKDEV_MAX_RQ 128 /* Default maximum */
180 @@ -412,6 +413,8 @@ struct request_queue
182 struct request *flush_rq;
183 unsigned char ordered;
185 + struct blk_trace *blk_trace;
189 diff --git a/include/linux/fs.h b/include/linux/fs.h
190 --- a/include/linux/fs.h
191 +++ b/include/linux/fs.h
192 @@ -195,6 +195,8 @@ extern int dir_notify_enable;
193 #define BLKBSZGET _IOR(0x12,112,size_t)
194 #define BLKBSZSET _IOW(0x12,113,size_t)
195 #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */
196 +#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup)
197 +#define BLKSTOPTRACE _IO(0x12,116)
199 #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
200 #define FIBMAP _IO(0x00,1) /* bmap access */
201 --- /dev/null 2005-09-03 12:52:15.000000000 +0200
202 +++ linux-2.6/drivers/block/blktrace.c 2005-09-08 08:33:20.000000000 +0200
204 +#include <linux/config.h>
205 +#include <linux/kernel.h>
206 +#include <linux/blkdev.h>
207 +#include <linux/blktrace.h>
208 +#include <linux/percpu.h>
209 +#include <linux/init.h>
210 +#include <asm/uaccess.h>
212 +static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
214 +void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
215 + int rw, u32 what, int error, int pdu_len, char *pdu_data)
217 + struct blk_io_trace t;
218 + unsigned long flags;
221 + if (rw & (1 << BIO_RW_BARRIER))
222 + what |= BLK_TC_ACT(BLK_TC_BARRIER);
223 + if (rw & (1 << BIO_RW_SYNC))
224 + what |= BLK_TC_ACT(BLK_TC_SYNC);
227 + what |= BLK_TC_ACT(BLK_TC_WRITE);
229 + what |= BLK_TC_ACT(BLK_TC_READ);
231 + if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
234 + t.magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
235 + t.sequence = atomic_add_return(1, &bt->sequence);
239 + t.time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
246 + t.pdu_len = pdu_len;
248 + t.pid = current->pid;
249 + memcpy(t.comm, current->comm, sizeof(t.comm));
251 + local_irq_save(flags);
252 + __relay_write(bt->rchan, &t, sizeof(t));
254 + __relay_write(bt->rchan, pdu_data, pdu_len);
255 + local_irq_restore(flags);
258 +static struct dentry *blk_tree_root;
259 +static DECLARE_MUTEX(blk_tree_mutex);
261 +static inline void blk_remove_root(void)
263 + if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY)
264 + blk_tree_root = NULL;
267 +static void blk_remove_tree(struct dentry *dir)
269 + down(&blk_tree_mutex);
270 + relayfs_remove_dir(dir);
272 + up(&blk_tree_mutex);
275 +static struct dentry *blk_create_tree(const char *blk_name)
277 + struct dentry *dir = NULL;
279 + down(&blk_tree_mutex);
281 + if (!blk_tree_root) {
282 + blk_tree_root = relayfs_create_dir("block", NULL);
283 + if (!blk_tree_root)
287 + dir = relayfs_create_dir(blk_name, blk_tree_root);
292 + up(&blk_tree_mutex);
296 +void blk_cleanup_trace(struct blk_trace *bt)
298 + relay_close(bt->rchan);
299 + blk_remove_tree(bt->dir);
303 +int blk_stop_trace(struct block_device *bdev)
305 + request_queue_t *q = bdev_get_queue(bdev);
306 + struct blk_trace *bt = NULL;
312 + down(&bdev->bd_sem);
314 + if (q->blk_trace) {
316 + q->blk_trace = NULL;
323 + blk_cleanup_trace(bt);
328 +int blk_start_trace(struct block_device *bdev, char __user *arg)
330 + request_queue_t *q = bdev_get_queue(bdev);
331 + struct blk_user_trace_setup buts;
332 + struct blk_trace *bt = NULL;
333 + struct dentry *dir = NULL;
334 + char b[BDEVNAME_SIZE];
340 + if (copy_from_user(&buts, arg, sizeof(buts)))
343 + if (!buts.buf_size || !buts.buf_nr)
346 + strcpy(buts.name, bdevname(bdev, b));
348 + if (copy_to_user(arg, &buts, sizeof(buts)))
351 + down(&bdev->bd_sem);
357 + bt = kmalloc(sizeof(*bt), GFP_KERNEL);
362 + dir = blk_create_tree(bdevname(bdev, b));
367 + atomic_set(&bt->sequence, 0);
370 + bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, NULL);
374 + bt->act_mask = buts.act_mask;
376 + bt->act_mask = (u16) -1;
384 + blk_remove_tree(dir);
390 +static void blk_trace_check_cpu_time(void *data)
392 + unsigned long long a, b, *t;
394 + int cpu = get_cpu();
396 + t = &per_cpu(blk_trace_cpu_offset, cpu);
399 + do_gettimeofday(&tv);
402 + *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
407 +static int blk_trace_calibrate_offsets(void)
409 + unsigned long flags;
411 + smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
412 + local_irq_save(flags);
413 + blk_trace_check_cpu_time(NULL);
414 + local_irq_restore(flags);
419 +static __init int blk_trace_init(void)
421 + return blk_trace_calibrate_offsets();
424 +module_init(blk_trace_init);
426 --- /dev/null 2005-09-03 12:52:15.000000000 +0200
427 +++ linux-2.6/include/linux/blktrace.h 2005-09-08 08:33:20.000000000 +0200
432 +#include <linux/config.h>
433 +#include <linux/blkdev.h>
434 +#include <linux/relayfs_fs.h>
440 + BLK_TC_READ = 1 << 0, /* reads */
441 + BLK_TC_WRITE = 1 << 1, /* writes */
442 + BLK_TC_BARRIER = 1 << 2, /* barrier */
443 + BLK_TC_SYNC = 1 << 3, /* barrier */
444 + BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
445 + BLK_TC_REQUEUE = 1 << 5, /* requeueing */
446 + BLK_TC_ISSUE = 1 << 6, /* issue */
447 + BLK_TC_COMPLETE = 1 << 7, /* completions */
448 + BLK_TC_FS = 1 << 8, /* fs requests */
449 + BLK_TC_PC = 1 << 9, /* pc requests */
451 + BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
454 +#define BLK_TC_SHIFT (16)
455 +#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
458 + * Basic trace actions
461 + __BLK_TA_QUEUE = 1, /* queued */
462 + __BLK_TA_BACKMERGE, /* back merged to existing rq */
463 + __BLK_TA_FRONTMERGE, /* front merge to existing rq */
464 + __BLK_TA_GETRQ, /* allocated new request */
465 + __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
466 + __BLK_TA_REQUEUE, /* request requeued */
467 + __BLK_TA_ISSUE, /* sent to driver */
468 + __BLK_TA_COMPLETE, /* completed by driver */
472 + * Trace actions in full. Additionally, read or write is masked
474 +#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
475 +#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
476 +#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
477 +#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
478 +#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
479 +#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
480 +#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
481 +#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
483 +#define BLK_IO_TRACE_MAGIC 0x65617400
484 +#define BLK_IO_TRACE_VERSION 0x04
489 +struct blk_io_trace {
490 + u32 magic; /* MAGIC << 8 | version */
491 + u32 sequence; /* event number */
492 + u64 time; /* in microseconds */
493 + u64 sector; /* disk offset */
494 + u32 bytes; /* transfer length */
495 + u32 action; /* what happened */
496 + u32 pid; /* who did it */
497 + u32 cpu; /* on what cpu did it happen */
498 + u16 error; /* completion error */
499 + u16 pdu_len; /* length of data after this trace */
500 + char comm[16]; /* task command name (TASK_COMM_LEN) */
504 + struct dentry *dir;
505 + struct rchan *rchan;
511 + * User setup structure passed with BLKSTARTTRACE
513 +struct blk_user_trace_setup {
514 + char name[BDEVNAME_SIZE]; /* output */
515 + u16 act_mask; /* input */
516 + u32 buf_size; /* input */
517 + u32 buf_nr; /* input */
520 +#if defined(CONFIG_BLK_DEV_IO_TRACE)
521 +extern int blk_start_trace(struct block_device *, char __user *);
522 +extern int blk_stop_trace(struct block_device *);
523 +extern void blk_cleanup_trace(struct blk_trace *);
524 +extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, char *);
526 +static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
529 + struct blk_trace *bt = q->blk_trace;
530 + int rw = rq->flags & 0x07;
535 + if (blk_pc_request(rq)) {
536 + what |= BLK_TC_ACT(BLK_TC_PC);
537 + __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
539 + what |= BLK_TC_ACT(BLK_TC_FS);
540 + __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
544 +static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
547 + struct blk_trace *bt = q->blk_trace;
552 + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
555 +static inline void blk_add_trace_generic(struct request_queue *q,
556 + struct bio *bio, int rw, u32 what)
558 + struct blk_trace *bt = q->blk_trace;
564 + blk_add_trace_bio(q, bio, what);
566 + __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
569 +#else /* !CONFIG_BLK_DEV_IO_TRACE */
570 +#define blk_start_trace(bdev, arg) (-EINVAL)
571 +#define blk_stop_trace(bdev) (-EINVAL)
572 +#define blk_cleanup_trace(bt) do { } while (0)
573 +#define blk_add_trace_rq(q, rq, what) do { } while (0)
574 +#define blk_add_trace_bio(q, rq, what) do { } while (0)
575 +#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
576 +#endif /* CONFIG_BLK_DEV_IO_TRACE */