[PATCH] blk_io_trace_remap structure needs padding on 32-bit
[blktrace.git] / kernel / blk-trace-2.6.14-git-N0
CommitLineData
892ca0d3 1diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
6c8d81e4 2index 51b0af1..77ddfe9 100644
892ca0d3
JA
3--- a/drivers/block/Kconfig
4+++ b/drivers/block/Kconfig
6c8d81e4 5@@ -419,6 +419,18 @@ config LBD
892ca0d3
JA
6 your machine, or if you want to have a raid or loopback device
7 bigger than 2TB. Otherwise say N.
8
9+config BLK_DEV_IO_TRACE
10+ bool "Support for tracing block io actions"
11+ select RELAYFS_FS
12+ help
13+ Say Y here, if you want to be able to trace the block layer actions
6c8d81e4
JA
14+ on a given queue. Tracing allows you to see any traffic happening
15+ on a block device queue. For more information (and the user space
16+ support tools needed), fetch the blktrace app from:
17+
18+ git://brick.kernel.dk/data/git/blktrace.git
892ca0d3
JA
19+
20+
21 config CDROM_PKTCDVD
22 tristate "Packet writing on CD/DVD media"
23 depends on !UML
24diff --git a/drivers/block/Makefile b/drivers/block/Makefile
aa61fd87 25index 1cf09a1..ddf986a 100644
892ca0d3
JA
26--- a/drivers/block/Makefile
27+++ b/drivers/block/Makefile
28@@ -45,3 +45,5 @@ obj-$(CONFIG_VIODASD) += viodasd.o
29 obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
30 obj-$(CONFIG_BLK_DEV_UB) += ub.o
31
32+obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
33+
6c8d81e4
JA
34diff --git a/drivers/block/blktrace.c b/drivers/block/blktrace.c
35new file mode 100644
36index 0000000..904db2f
37--- /dev/null
38+++ b/drivers/block/blktrace.c
39@@ -0,0 +1,246 @@
40+#include <linux/config.h>
41+#include <linux/kernel.h>
42+#include <linux/blkdev.h>
43+#include <linux/blktrace.h>
44+#include <linux/percpu.h>
45+#include <linux/init.h>
46+#include <asm/uaccess.h>
47+
48+static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
49+
50+void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
51+ int rw, u32 what, int error, int pdu_len, void *pdu_data)
52+{
53+ struct blk_io_trace t;
54+ unsigned long flags;
55+ pid_t pid;
56+ int cpu;
57+
58+ if (rw & (1 << BIO_RW_BARRIER))
59+ what |= BLK_TC_ACT(BLK_TC_BARRIER);
60+ if (rw & (1 << BIO_RW_SYNC))
61+ what |= BLK_TC_ACT(BLK_TC_SYNC);
62+
63+ if (rw & WRITE)
64+ what |= BLK_TC_ACT(BLK_TC_WRITE);
65+ else
66+ what |= BLK_TC_ACT(BLK_TC_READ);
67+
68+ if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
69+ return;
70+ if (sector < bt->start_lba || sector > bt->end_lba)
71+ return;
72+
73+ pid = current->pid;
74+ if (bt->pid && pid != bt->pid)
75+ return;
76+
77+ t.magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
78+
79+ t.device = bt->dev;
80+ t.sector = sector;
81+ t.bytes = bytes;
82+ t.action = what;
83+ t.error = error;
84+ t.pdu_len = pdu_len;
85+
86+ t.pid = pid;
87+ memcpy(t.comm, current->comm, sizeof(t.comm));
88+
89+ /*
90+ * need to serialize this part on the local processor to prevent
91+ * interrupts for messing with sequence <-> time relation
92+ */
93+ local_irq_save(flags);
94+
95+ t.sequence = atomic_add_return(1, &bt->sequence);
96+
97+ cpu = smp_processor_id();
98+ t.cpu = cpu;
99+ t.time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
100+
101+ __relay_write(bt->rchan, &t, sizeof(t));
102+ if (pdu_len)
103+ __relay_write(bt->rchan, pdu_data, pdu_len);
104+
105+ local_irq_restore(flags);
106+}
107+
108+EXPORT_SYMBOL_GPL(__blk_add_trace);
109+
110+static struct dentry *blk_tree_root;
111+static DECLARE_MUTEX(blk_tree_mutex);
112+
113+static inline void blk_remove_root(void)
114+{
115+ if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY)
116+ blk_tree_root = NULL;
117+}
118+
119+static void blk_remove_tree(struct dentry *dir)
120+{
121+ down(&blk_tree_mutex);
122+ relayfs_remove_dir(dir);
123+ blk_remove_root();
124+ up(&blk_tree_mutex);
125+}
126+
127+static struct dentry *blk_create_tree(const char *blk_name)
128+{
129+ struct dentry *dir = NULL;
130+
131+ down(&blk_tree_mutex);
132+
133+ if (!blk_tree_root) {
134+ blk_tree_root = relayfs_create_dir("block", NULL);
135+ if (!blk_tree_root)
136+ goto err;
137+ }
138+
139+ dir = relayfs_create_dir(blk_name, blk_tree_root);
140+ if (!dir)
141+ blk_remove_root();
142+
143+err:
144+ up(&blk_tree_mutex);
145+ return dir;
146+}
147+
148+void blk_cleanup_trace(struct blk_trace *bt)
149+{
150+ relay_close(bt->rchan);
151+ blk_remove_tree(bt->dir);
152+ kfree(bt);
153+}
154+
155+int blk_stop_trace(struct block_device *bdev)
156+{
157+ request_queue_t *q = bdev_get_queue(bdev);
158+ struct blk_trace *bt = NULL;
159+ int ret = -EINVAL;
160+
161+ if (!q)
162+ return -ENXIO;
163+
164+ down(&bdev->bd_sem);
165+
166+ if (q->blk_trace) {
167+ bt = q->blk_trace;
168+ q->blk_trace = NULL;
169+ ret = 0;
170+ }
171+
172+ up(&bdev->bd_sem);
173+
174+ if (bt)
175+ blk_cleanup_trace(bt);
176+
177+ return ret;
178+}
179+
180+int blk_start_trace(struct block_device *bdev, char __user *arg)
181+{
182+ request_queue_t *q = bdev_get_queue(bdev);
183+ struct blk_user_trace_setup buts;
184+ struct blk_trace *bt = NULL;
185+ struct dentry *dir = NULL;
186+ char b[BDEVNAME_SIZE];
187+ int ret;
188+
189+ if (!q)
190+ return -ENXIO;
191+
192+ if (copy_from_user(&buts, arg, sizeof(buts)))
193+ return -EFAULT;
194+
195+ if (!buts.buf_size || !buts.buf_nr)
196+ return -EINVAL;
197+
198+ strcpy(buts.name, bdevname(bdev, b));
199+
200+ if (copy_to_user(arg, &buts, sizeof(buts)))
201+ return -EFAULT;
202+
203+ down(&bdev->bd_sem);
204+ ret = -EBUSY;
205+ if (q->blk_trace)
206+ goto err;
207+
208+ ret = -ENOMEM;
209+ bt = kmalloc(sizeof(*bt), GFP_KERNEL);
210+ if (!bt)
211+ goto err;
212+
213+ ret = -ENOENT;
214+ dir = blk_create_tree(bdevname(bdev, b));
215+ if (!dir)
216+ goto err;
217+
218+ bt->dir = dir;
219+ bt->dev = bdev->bd_dev;
220+ atomic_set(&bt->sequence, 0);
221+
222+ ret = -EIO;
223+ bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, NULL);
224+ if (!bt->rchan)
225+ goto err;
226+
227+ bt->act_mask = buts.act_mask;
228+ if (!bt->act_mask)
229+ bt->act_mask = (u16) -1;
230+
231+ bt->start_lba = buts.start_lba;
232+ bt->end_lba = buts.end_lba;
233+ if (!bt->end_lba)
234+ bt->end_lba = -1ULL;
235+
236+ bt->pid = buts.pid;
237+
238+ q->blk_trace = bt;
239+ up(&bdev->bd_sem);
240+ return 0;
241+err:
242+ up(&bdev->bd_sem);
243+ if (dir)
244+ blk_remove_tree(dir);
245+ if (bt)
246+ kfree(bt);
247+ return ret;
248+}
249+
250+static void blk_trace_check_cpu_time(void *data)
251+{
252+ unsigned long long a, b, *t;
253+ struct timeval tv;
254+ int cpu = get_cpu();
255+
256+ t = &per_cpu(blk_trace_cpu_offset, cpu);
257+
258+ a = sched_clock();
259+ do_gettimeofday(&tv);
260+ b = sched_clock();
261+
262+ *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
263+ *t -= (a + b) / 2;
264+ put_cpu();
265+}
266+
267+static int blk_trace_calibrate_offsets(void)
268+{
269+ unsigned long flags;
270+
271+ smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
272+ local_irq_save(flags);
273+ blk_trace_check_cpu_time(NULL);
274+ local_irq_restore(flags);
275+
276+ return 0;
277+}
278+
279+static __init int blk_trace_init(void)
280+{
281+ return blk_trace_calibrate_offsets();
282+}
283+
284+module_init(blk_trace_init);
285+
892ca0d3 286diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
6c8d81e4 287index 36f1057..b481403 100644
892ca0d3
JA
288--- a/drivers/block/elevator.c
289+++ b/drivers/block/elevator.c
6c8d81e4 290@@ -35,6 +35,7 @@
892ca0d3
JA
291 #include <linux/init.h>
292 #include <linux/compiler.h>
6c8d81e4 293 #include <linux/delay.h>
892ca0d3
JA
294+#include <linux/blktrace.h>
295
296 #include <asm/uaccess.h>
297
6c8d81e4 298@@ -317,6 +318,8 @@ void elv_requeue_request(request_queue_t
892ca0d3
JA
299 void __elv_add_request(request_queue_t *q, struct request *rq, int where,
300 int plug)
301 {
302+ blk_add_trace_rq(q, rq, BLK_TA_INSERT);
303+
6c8d81e4
JA
304 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
305 /*
306 * barriers implicitly indicate back insertion
307@@ -450,6 +453,8 @@ struct request *elv_next_request(request
308 rq->flags |= REQ_STARTED;
309 }
892ca0d3 310
892ca0d3
JA
311+ blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
312+
6c8d81e4
JA
313 if (!q->boundary_rq || q->boundary_rq == rq) {
314 q->end_sector = rq_end_sector(rq);
315 q->boundary_rq = NULL;
892ca0d3 316diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
aa61fd87 317index 6e27847..a73c11a 100644
892ca0d3
JA
318--- a/drivers/block/ioctl.c
319+++ b/drivers/block/ioctl.c
320@@ -4,6 +4,7 @@
321 #include <linux/backing-dev.h>
322 #include <linux/buffer_head.h>
323 #include <linux/smp_lock.h>
324+#include <linux/blktrace.h>
325 #include <asm/uaccess.h>
326
327 static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
328@@ -188,6 +189,10 @@ static int blkdev_locked_ioctl(struct fi
329 return put_ulong(arg, bdev->bd_inode->i_size >> 9);
330 case BLKGETSIZE64:
331 return put_u64(arg, bdev->bd_inode->i_size);
332+ case BLKSTARTTRACE:
333+ return blk_start_trace(bdev, (char __user *) arg);
334+ case BLKSTOPTRACE:
335+ return blk_stop_trace(bdev);
336 }
337 return -ENOIOCTLCMD;
338 }
339diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
6c8d81e4 340index 0af7351..b40a90d 100644
892ca0d3
JA
341--- a/drivers/block/ll_rw_blk.c
342+++ b/drivers/block/ll_rw_blk.c
343@@ -29,6 +29,7 @@
344 #include <linux/swap.h>
345 #include <linux/writeback.h>
346 #include <linux/blkdev.h>
347+#include <linux/blktrace.h>
348
349 /*
350 * for max sense size
6c8d81e4 351@@ -1424,8 +1425,10 @@ void blk_plug_device(request_queue_t *q)
892ca0d3
JA
352 if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
353 return;
354
355- if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
356+ if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
357 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
358+ blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
359+ }
360 }
361
362 EXPORT_SYMBOL(blk_plug_device);
6c8d81e4 363@@ -1489,14 +1492,21 @@ static void blk_backing_dev_unplug(struc
892ca0d3
JA
364 /*
365 * devices don't necessarily have an ->unplug_fn defined
366 */
367- if (q->unplug_fn)
368+ if (q->unplug_fn) {
369+ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
370+ q->rq.count[READ] + q->rq.count[WRITE]);
371+
372 q->unplug_fn(q);
373+ }
374 }
375
376 static void blk_unplug_work(void *data)
bc4fd908
JA
377 {
378 request_queue_t *q = data;
379
380+ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
381+ q->rq.count[READ] + q->rq.count[WRITE]);
382+
383 q->unplug_fn(q);
384 }
385
6c8d81e4 386@@ -1504,6 +1514,9 @@ static void blk_unplug_timeout(unsigned
ca1880de
JA
387 {
388 request_queue_t *q = (request_queue_t *)data;
389
390+ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
391+ q->rq.count[READ] + q->rq.count[WRITE]);
392+
393 kblockd_schedule_work(&q->unplug_work);
394 }
395
6c8d81e4 396@@ -1626,6 +1639,11 @@ void blk_cleanup_queue(request_queue_t *
892ca0d3
JA
397 if (q->queue_tags)
398 __blk_queue_free_tags(q);
399
400+ if (q->blk_trace) {
401+ blk_cleanup_trace(q->blk_trace);
402+ q->blk_trace = NULL;
403+ }
404+
405 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
406
407 kmem_cache_free(requestq_cachep, q);
6c8d81e4 408@@ -1977,6 +1995,8 @@ rq_starved:
892ca0d3
JA
409
410 rq_init(q, rq);
411 rq->rl = rl;
412+
413+ blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
414 out:
415 return rq;
416 }
6c8d81e4 417@@ -2005,6 +2025,8 @@ static struct request *get_request_wait(
892ca0d3
JA
418 if (!rq) {
419 struct io_context *ioc;
420
421+ blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
422+
423 __generic_unplug_device(q);
424 spin_unlock_irq(q->queue_lock);
425 io_schedule();
6c8d81e4 426@@ -2058,6 +2080,8 @@ EXPORT_SYMBOL(blk_get_request);
892ca0d3
JA
427 */
428 void blk_requeue_request(request_queue_t *q, struct request *rq)
429 {
430+ blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
431+
432 if (blk_rq_tagged(rq))
433 blk_queue_end_tag(q, rq);
434
6c8d81e4 435@@ -2686,6 +2710,8 @@ static int __make_request(request_queue_
892ca0d3
JA
436 if (!q->back_merge_fn(q, req, bio))
437 break;
438
439+ blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
440+
441 req->biotail->bi_next = bio;
442 req->biotail = bio;
443 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
6c8d81e4 444@@ -2701,6 +2727,8 @@ static int __make_request(request_queue_
892ca0d3
JA
445 if (!q->front_merge_fn(q, req, bio))
446 break;
447
448+ blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
449+
450 bio->bi_next = req->bio;
451 req->bio = bio;
452
6c8d81e4 453@@ -2850,6 +2878,7 @@ void generic_make_request(struct bio *bi
a8f30e64
JA
454 request_queue_t *q;
455 sector_t maxsector;
456 int ret, nr_sectors = bio_sectors(bio);
457+ dev_t old_dev;
458
459 might_sleep();
460 /* Test device or partition size, when known. */
6c8d81e4 461@@ -2876,6 +2905,8 @@ void generic_make_request(struct bio *bi
a8f30e64
JA
462 * NOTE: we don't repeat the blk_size check for each new device.
463 * Stacking drivers are expected to know what they are doing.
464 */
465+ maxsector = -1;
466+ old_dev = 0;
467 do {
468 char b[BDEVNAME_SIZE];
469
6c8d81e4 470@@ -2908,6 +2939,15 @@ end_io:
892ca0d3
JA
471 */
472 blk_partition_remap(bio);
473
a8f30e64 474+ if (maxsector != -1)
aa61fd87
AB
475+ blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
476+ maxsector);
a8f30e64 477+
892ca0d3 478+ blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
a8f30e64
JA
479+
480+ maxsector = bio->bi_sector;
481+ old_dev = bio->bi_bdev->bd_dev;
892ca0d3
JA
482+
483 ret = q->make_request_fn(q, bio);
484 } while (ret);
485 }
6c8d81e4 486@@ -3027,6 +3067,8 @@ static int __end_that_request_first(stru
892ca0d3
JA
487 int total_bytes, bio_nbytes, error, next_idx = 0;
488 struct bio *bio;
489
490+ blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
491+
492 /*
493 * extend uptodate bool to allow < 0 value to be direct io error
494 */
aa61fd87
AB
495diff --git a/drivers/md/dm.c b/drivers/md/dm.c
496index 930b9fc..4573599 100644
497--- a/drivers/md/dm.c
498+++ b/drivers/md/dm.c
499@@ -17,6 +17,7 @@
500 #include <linux/mempool.h>
501 #include <linux/slab.h>
502 #include <linux/idr.h>
503+#include <linux/blktrace.h>
504
505 static const char *_name = DM_NAME;
506
507@@ -302,6 +303,8 @@ static inline void dec_pending(struct dm
508 /* nudge anyone waiting on suspend queue */
509 wake_up(&io->md->wait);
510
511+ blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE);
512+
513 bio_endio(io->bio, io->bio->bi_size, io->error);
514 free_io(io->md, io);
515 }
516@@ -360,6 +363,7 @@ static void __map_bio(struct dm_target *
517 struct target_io *tio)
518 {
519 int r;
520+ sector_t sector;
521
522 /*
523 * Sanity checks.
524@@ -375,10 +379,17 @@ static void __map_bio(struct dm_target *
525 * this io.
526 */
527 atomic_inc(&tio->io->io_count);
528+ sector = clone->bi_sector;
529 r = ti->type->map(ti, clone, &tio->info);
530- if (r > 0)
531+ if (r > 0) {
532 /* the bio has been remapped so dispatch it */
533+
534+ blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
535+ tio->io->bio->bi_bdev->bd_dev, sector,
536+ clone->bi_sector);
537+
538 generic_make_request(clone);
539+ }
540
541 else if (r < 0) {
542 /* error the io and bail out */
892ca0d3 543diff --git a/fs/bio.c b/fs/bio.c
6c8d81e4 544index 460554b..cd95166 100644
892ca0d3
JA
545--- a/fs/bio.c
546+++ b/fs/bio.c
547@@ -25,6 +25,7 @@
548 #include <linux/module.h>
549 #include <linux/mempool.h>
550 #include <linux/workqueue.h>
551+#include <linux/blktrace.h>
552 #include <scsi/sg.h> /* for struct sg_iovec */
553
554 #define BIO_POOL_SIZE 256
555@@ -1050,6 +1051,9 @@ struct bio_pair *bio_split(struct bio *b
556 if (!bp)
557 return bp;
558
559+ blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
560+ bi->bi_sector + first_sectors);
561+
562 BUG_ON(bi->bi_vcnt != 1);
563 BUG_ON(bi->bi_idx != 0);
564 atomic_set(&bp->cnt, 3);
565diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
6c8d81e4 566index 025a7f0..749f370 100644
892ca0d3
JA
567--- a/include/linux/blkdev.h
568+++ b/include/linux/blkdev.h
569@@ -22,6 +22,7 @@ typedef struct request_queue request_que
570 struct elevator_queue;
571 typedef struct elevator_queue elevator_t;
572 struct request_pm_state;
573+struct blk_trace;
574
575 #define BLKDEV_MIN_RQ 4
576 #define BLKDEV_MAX_RQ 128 /* Default maximum */
6c8d81e4 577@@ -420,6 +421,8 @@ struct request_queue
892ca0d3
JA
578 */
579 struct request *flush_rq;
580 unsigned char ordered;
581+
582+ struct blk_trace *blk_trace;
583 };
584
585 enum {
6c8d81e4
JA
586diff --git a/include/linux/blktrace.h b/include/linux/blktrace.h
587new file mode 100644
588index 0000000..0c36714
589--- /dev/null
590+++ b/include/linux/blktrace.h
591@@ -0,0 +1,212 @@
892ca0d3
JA
592+#ifndef BLKTRACE_H
593+#define BLKTRACE_H
594+
595+#include <linux/config.h>
596+#include <linux/blkdev.h>
597+#include <linux/relayfs_fs.h>
598+
599+/*
600+ * Trace categories
601+ */
a8f30e64 602+enum blktrace_cat {
892ca0d3
JA
603+ BLK_TC_READ = 1 << 0, /* reads */
604+ BLK_TC_WRITE = 1 << 1, /* writes */
605+ BLK_TC_BARRIER = 1 << 2, /* barrier */
606+ BLK_TC_SYNC = 1 << 3, /* barrier */
607+ BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
608+ BLK_TC_REQUEUE = 1 << 5, /* requeueing */
609+ BLK_TC_ISSUE = 1 << 6, /* issue */
610+ BLK_TC_COMPLETE = 1 << 7, /* completions */
611+ BLK_TC_FS = 1 << 8, /* fs requests */
612+ BLK_TC_PC = 1 << 9, /* pc requests */
613+
614+ BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
615+};
616+
617+#define BLK_TC_SHIFT (16)
618+#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT)
619+
620+/*
621+ * Basic trace actions
622+ */
a8f30e64 623+enum blktrace_act {
892ca0d3
JA
624+ __BLK_TA_QUEUE = 1, /* queued */
625+ __BLK_TA_BACKMERGE, /* back merged to existing rq */
626+ __BLK_TA_FRONTMERGE, /* front merge to existing rq */
627+ __BLK_TA_GETRQ, /* allocated new request */
628+ __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */
629+ __BLK_TA_REQUEUE, /* request requeued */
630+ __BLK_TA_ISSUE, /* sent to driver */
631+ __BLK_TA_COMPLETE, /* completed by driver */
632+ __BLK_TA_PLUG, /* queue was plugged */
633+ __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */
634+ __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
635+ __BLK_TA_INSERT, /* insert request */
636+ __BLK_TA_SPLIT, /* bio was split */
637+ __BLK_TA_BOUNCE, /* bio was bounced */
a8f30e64 638+ __BLK_TA_REMAP, /* bio was remapped */
892ca0d3
JA
639+};
640+
641+/*
642+ * Trace actions in full. Additionally, read or write is masked
643+ */
644+#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
645+#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
646+#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
647+#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
648+#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
649+#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
650+#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
651+#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
652+#define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
653+#define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
654+#define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
655+#define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
656+#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
657+#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
a8f30e64 658+#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
892ca0d3
JA
659+
660+#define BLK_IO_TRACE_MAGIC 0x65617400
661+#define BLK_IO_TRACE_VERSION 0x05
662+
663+/*
664+ * The trace itself
665+ */
666+struct blk_io_trace {
667+ u32 magic; /* MAGIC << 8 | version */
668+ u32 sequence; /* event number */
669+ u64 time; /* in microseconds */
670+ u64 sector; /* disk offset */
671+ u32 bytes; /* transfer length */
672+ u32 action; /* what happened */
673+ u32 pid; /* who did it */
674+ u32 cpu; /* on what cpu did it happen */
675+ u16 error; /* completion error */
676+ u16 pdu_len; /* length of data after this trace */
677+ u32 device; /* device number */
678+ char comm[16]; /* task command name (TASK_COMM_LEN) */
679+};
680+
a8f30e64
JA
681+/*
682+ * The remap event
683+ */
684+struct blk_io_trace_remap {
685+ u32 device;
686+ u64 sector;
687+};
688+
892ca0d3
JA
689+struct blk_trace {
690+ struct dentry *dir;
691+ struct rchan *rchan;
692+ atomic_t sequence;
693+ u32 dev;
694+ u16 act_mask;
6c8d81e4
JA
695+ u64 start_lba;
696+ u64 end_lba;
697+ u32 pid;
892ca0d3
JA
698+};
699+
700+/*
701+ * User setup structure passed with BLKSTARTTRACE
702+ */
703+struct blk_user_trace_setup {
704+ char name[BDEVNAME_SIZE]; /* output */
705+ u16 act_mask; /* input */
706+ u32 buf_size; /* input */
707+ u32 buf_nr; /* input */
6c8d81e4
JA
708+ u64 start_lba;
709+ u64 end_lba;
710+ u32 pid;
892ca0d3
JA
711+};
712+
713+#if defined(CONFIG_BLK_DEV_IO_TRACE)
714+extern int blk_start_trace(struct block_device *, char __user *);
715+extern int blk_stop_trace(struct block_device *);
716+extern void blk_cleanup_trace(struct blk_trace *);
717+extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
718+
719+static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
720+ u32 what)
721+{
722+ struct blk_trace *bt = q->blk_trace;
723+ int rw = rq->flags & 0x07;
724+
725+ if (likely(!bt))
726+ return;
727+
728+ if (blk_pc_request(rq)) {
729+ what |= BLK_TC_ACT(BLK_TC_PC);
730+ __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
731+ } else {
732+ what |= BLK_TC_ACT(BLK_TC_FS);
733+ __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
734+ }
735+}
736+
737+static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
738+ u32 what)
739+{
740+ struct blk_trace *bt = q->blk_trace;
741+
742+ if (likely(!bt))
743+ return;
744+
745+ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
746+}
747+
748+static inline void blk_add_trace_generic(struct request_queue *q,
749+ struct bio *bio, int rw, u32 what)
750+{
751+ struct blk_trace *bt = q->blk_trace;
752+
753+ if (likely(!bt))
754+ return;
755+
756+ if (bio)
757+ blk_add_trace_bio(q, bio, what);
758+ else
759+ __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
760+}
761+
762+static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
763+ struct bio *bio, unsigned int pdu)
764+{
765+ struct blk_trace *bt = q->blk_trace;
766+ u64 rpdu = cpu_to_be64(pdu);
767+
768+ if (likely(!bt))
769+ return;
770+
771+ if (bio)
772+ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
773+ else
774+ __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
775+}
776+
aa61fd87
AB
777+static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
778+ dev_t dev, sector_t from, sector_t to)
a8f30e64
JA
779+{
780+ struct blk_trace *bt = q->blk_trace;
781+ struct blk_io_trace_remap r;
782+
783+ if (likely(!bt))
784+ return;
785+
786+ r.device = cpu_to_be32(dev);
aa61fd87 787+ r.sector = cpu_to_be64(to);
a8f30e64 788+
aa61fd87 789+ __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
a8f30e64
JA
790+}
791+
892ca0d3
JA
792+#else /* !CONFIG_BLK_DEV_IO_TRACE */
793+#define blk_start_trace(bdev, arg) (-EINVAL)
794+#define blk_stop_trace(bdev) (-EINVAL)
795+#define blk_cleanup_trace(bt) do { } while (0)
796+#define blk_add_trace_rq(q, rq, what) do { } while (0)
797+#define blk_add_trace_bio(q, rq, what) do { } while (0)
798+#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
799+#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
aa61fd87 800+#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
892ca0d3
JA
801+#endif /* CONFIG_BLK_DEV_IO_TRACE */
802+
803+#endif
6c8d81e4
JA
804diff --git a/include/linux/fs.h b/include/linux/fs.h
805index 6d62267..78adc99 100644
806--- a/include/linux/fs.h
807+++ b/include/linux/fs.h
808@@ -196,6 +196,8 @@ extern int dir_notify_enable;
809 #define BLKBSZGET _IOR(0x12,112,size_t)
810 #define BLKBSZSET _IOW(0x12,113,size_t)
811 #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */
812+#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup)
813+#define BLKSTOPTRACE _IO(0x12,116)
814
815 #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
816 #define FIBMAP _IO(0x00,1) /* bmap access */
817diff --git a/mm/highmem.c b/mm/highmem.c
818index ce2e7e8..800aa91 100644
819--- a/mm/highmem.c
820+++ b/mm/highmem.c
821@@ -26,6 +26,7 @@
822 #include <linux/init.h>
823 #include <linux/hash.h>
824 #include <linux/highmem.h>
825+#include <linux/blktrace.h>
826 #include <asm/tlbflush.h>
827
828 static mempool_t *page_pool, *isa_page_pool;
829@@ -483,6 +484,8 @@ void blk_queue_bounce(request_queue_t *q
830 pool = isa_page_pool;
831 }
832
833+ blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
834+
835 /*
836 * slow path
837 */