[PATCH] kernel: update to recent fixes
authorJens Axboe <axboe@suse.de>
Thu, 2 Feb 2006 17:39:40 +0000 (18:39 +0100)
committerJens Axboe <axboe@suse.de>
Thu, 2 Feb 2006 17:39:40 +0000 (18:39 +0100)
kernel/blk-trace-2.6.16-rc1-git-U0 [deleted file]
kernel/blk-trace-2.6.16-rc1-git-U1 [new file with mode: 0644]

diff --git a/kernel/blk-trace-2.6.16-rc1-git-U0 b/kernel/blk-trace-2.6.16-rc1-git-U0
deleted file mode 100644 (file)
index 677852b..0000000
+++ /dev/null
@@ -1,997 +0,0 @@
-diff --git a/block/Kconfig b/block/Kconfig
-index 377f6dd..27eaed9 100644
---- a/block/Kconfig
-+++ b/block/Kconfig
-@@ -11,4 +11,15 @@ config LBD
-         your machine, or if you want to have a raid or loopback device
-         bigger than 2TB.  Otherwise say N.
-+config BLK_DEV_IO_TRACE
-+      bool "Support for tracing block io actions"
-+      select RELAYFS_FS
-+      help
-+        Say Y here, if you want to be able to trace the block layer actions
-+        on a given queue. Tracing allows you to see any traffic happening
-+        on a block device queue. For more information (and the user space
-+        support tools needed), fetch the blktrace app from:
-+
-+        git://brick.kernel.dk/data/git/blktrace.git
-+
- source block/Kconfig.iosched
-diff --git a/block/Makefile b/block/Makefile
-index 7e4f93e..c05de0e 100644
---- a/block/Makefile
-+++ b/block/Makefile
-@@ -8,3 +8,5 @@ obj-$(CONFIG_IOSCHED_NOOP)     += noop-iosch
- obj-$(CONFIG_IOSCHED_AS)      += as-iosched.o
- obj-$(CONFIG_IOSCHED_DEADLINE)        += deadline-iosched.o
- obj-$(CONFIG_IOSCHED_CFQ)     += cfq-iosched.o
-+
-+obj-$(CONFIG_BLK_DEV_IO_TRACE)        += blktrace.o
-diff --git a/block/blktrace.c b/block/blktrace.c
-new file mode 100644
-index 0000000..21b381d
---- /dev/null
-+++ b/block/blktrace.c
-@@ -0,0 +1,362 @@
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/blkdev.h>
-+#include <linux/blktrace_api.h>
-+#include <linux/percpu.h>
-+#include <linux/init.h>
-+#include <linux/mutex.h>
-+#include <asm/uaccess.h>
-+
-+static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
-+
-+void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
-+                   int rw, u32 what, int error, int pdu_len, void *pdu_data)
-+{
-+      struct blk_io_trace *t;
-+      unsigned long flags;
-+      unsigned long *sequence;
-+      pid_t pid;
-+      int cpu;
-+
-+      if (rw & (1 << BIO_RW_BARRIER))
-+              what |= BLK_TC_ACT(BLK_TC_BARRIER);
-+      if (rw & (1 << BIO_RW_SYNC))
-+              what |= BLK_TC_ACT(BLK_TC_SYNC);
-+
-+      if (rw & WRITE)
-+              what |= BLK_TC_ACT(BLK_TC_WRITE);
-+      else
-+              what |= BLK_TC_ACT(BLK_TC_READ);
-+
-+      if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
-+              return;
-+      if (sector < bt->start_lba || sector > bt->end_lba)
-+              return;
-+
-+      pid = current->pid;
-+      if (bt->pid && pid != bt->pid)
-+              return;
-+
-+      local_irq_save(flags);
-+
-+      t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
-+      if (unlikely(!t)) {
-+              local_irq_restore(flags);
-+              return;
-+      }
-+
-+      cpu = smp_processor_id();
-+      sequence = per_cpu_ptr(bt->sequence, cpu);
-+      t->sequence = ++(*sequence);
-+      t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
-+      t->cpu = cpu;
-+
-+      local_irq_restore(flags);
-+
-+      t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
-+      t->device = bt->dev;
-+      t->sector = sector;
-+      t->bytes = bytes;
-+      t->action = what;
-+      t->error = error;
-+      t->pdu_len = pdu_len;
-+
-+      t->pid = pid;
-+      memcpy(t->comm, current->comm, sizeof(t->comm));
-+
-+      if (pdu_len)
-+              memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
-+}
-+
-+EXPORT_SYMBOL_GPL(__blk_add_trace);
-+
-+static struct dentry *blk_tree_root;
-+static struct mutex blk_tree_mutex;
-+
-+static inline void blk_remove_root(void)
-+{
-+      if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY)
-+              blk_tree_root = NULL;
-+}
-+
-+static void blk_remove_tree(struct dentry *dir)
-+{
-+      mutex_lock(&blk_tree_mutex);
-+      relayfs_remove_dir(dir);
-+      blk_remove_root();
-+      mutex_unlock(&blk_tree_mutex);
-+}
-+
-+static struct dentry *blk_create_tree(const char *blk_name)
-+{
-+      struct dentry *dir = NULL;
-+
-+      mutex_lock(&blk_tree_mutex);
-+
-+      if (!blk_tree_root) {
-+              blk_tree_root = relayfs_create_dir("block", NULL);
-+              if (!blk_tree_root)
-+                      goto err;
-+      }
-+
-+      dir = relayfs_create_dir(blk_name, blk_tree_root);
-+      if (!dir)
-+              blk_remove_root();
-+
-+err:
-+      mutex_unlock(&blk_tree_mutex);
-+      return dir;
-+}
-+
-+void blk_cleanup_trace(struct blk_trace *bt)
-+{
-+      relay_close(bt->rchan);
-+      relayfs_remove_file(bt->dropped_file);
-+      blk_remove_tree(bt->dir);
-+      kfree(bt);
-+}
-+
-+int blk_stop_trace(struct block_device *bdev)
-+{
-+      request_queue_t *q = bdev_get_queue(bdev);
-+      struct blk_trace *bt = NULL;
-+      int ret = -EINVAL;
-+
-+      if (!q)
-+              return -ENXIO;
-+
-+      down(&bdev->bd_sem);
-+
-+      if (q->blk_trace) {
-+              bt = q->blk_trace;
-+              q->blk_trace = NULL;
-+              ret = 0;
-+      }
-+
-+      up(&bdev->bd_sem);
-+
-+      if (bt)
-+              blk_cleanup_trace(bt);
-+
-+      return ret;
-+}
-+
-+static int blk_dropped_open(struct inode *inode, struct file *filp)
-+{
-+      filp->private_data = inode->u.generic_ip;
-+      
-+      return 0;
-+}
-+
-+static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
-+                              size_t count, loff_t *ppos)
-+{
-+      struct blk_trace *bt = filp->private_data;
-+      char buf[16];
-+
-+      snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
-+      
-+      return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
-+}
-+
-+static struct file_operations blk_dropped_fops = {
-+      .owner =        THIS_MODULE,
-+      .open =         blk_dropped_open,
-+      .read =         blk_dropped_read,
-+};
-+
-+static int blk_subbuf_start_callback (struct rchan_buf *buf,
-+                                    void *subbuf,
-+                                    void *prev_subbuf,
-+                                    size_t prev_padding)
-+{
-+      if (relay_buf_full(buf)) {
-+              struct blk_trace *bt = buf->chan->private_data;
-+
-+              atomic_inc(&bt->dropped);
-+              return 0;
-+      }
-+
-+      return 1;
-+}
-+
-+static struct rchan_callbacks blk_relay_callbacks = {
-+      .subbuf_start = blk_subbuf_start_callback,
-+};
-+
-+int blk_start_trace(struct block_device *bdev, char __user *arg)
-+{
-+      request_queue_t *q = bdev_get_queue(bdev);
-+      struct blk_user_trace_setup buts;
-+      struct blk_trace *bt = NULL;
-+      struct dentry *dir = NULL;
-+      char b[BDEVNAME_SIZE];
-+      int ret, i;
-+
-+      if (!q)
-+              return -ENXIO;
-+
-+      if (copy_from_user(&buts, arg, sizeof(buts)))
-+              return -EFAULT;
-+
-+      if (!buts.buf_size || !buts.buf_nr)
-+              return -EINVAL;
-+
-+      strcpy(buts.name, bdevname(bdev, b));
-+
-+      /*
-+       * some device names have larger paths - convert the slashes
-+       * to underscores for this to work as expected
-+       */
-+      for (i = 0; i < strlen(buts.name); i++)
-+              if (buts.name[i] == '/')
-+                      buts.name[i] = '_';
-+
-+      if (copy_to_user(arg, &buts, sizeof(buts)))
-+              return -EFAULT;
-+
-+      down(&bdev->bd_sem);
-+      ret = -EBUSY;
-+      if (q->blk_trace)
-+              goto err;
-+
-+      ret = -ENOMEM;
-+      bt = kzalloc(sizeof(*bt), GFP_KERNEL);
-+      if (!bt)
-+              goto err;
-+
-+      bt->sequence = alloc_percpu(unsigned long);
-+      if (!bt->sequence)
-+              goto err;
-+
-+      ret = -ENOENT;
-+      dir = blk_create_tree(buts.name);
-+      if (!dir)
-+              goto err;
-+
-+      bt->dir = dir;
-+      bt->dev = bdev->bd_dev;
-+      atomic_set(&bt->dropped, 0);
-+
-+      ret = -EIO;
-+      bt->dropped_file = relayfs_create_file("dropped", dir, 0, &blk_dropped_fops, bt);
-+      if (!bt->dropped_file)
-+              goto err;
-+
-+      bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks);
-+      if (!bt->rchan)
-+              goto err;
-+      bt->rchan->private_data = bt;
-+
-+      bt->act_mask = buts.act_mask;
-+      if (!bt->act_mask)
-+              bt->act_mask = (u16) -1;
-+
-+      bt->start_lba = buts.start_lba;
-+      bt->end_lba = buts.end_lba;
-+      if (!bt->end_lba)
-+              bt->end_lba = -1ULL;
-+
-+      bt->pid = buts.pid;
-+
-+      q->blk_trace = bt;
-+      up(&bdev->bd_sem);
-+      return 0;
-+err:
-+      up(&bdev->bd_sem);
-+      if (bt && bt->dropped_file)
-+              relayfs_remove_file(bt->dropped_file);
-+      if (dir)
-+              blk_remove_tree(dir);
-+      if (bt) {
-+              if (bt->sequence)
-+                      free_percpu(bt->sequence);
-+              kfree(bt);
-+      }
-+      return ret;
-+}
-+
-+/*
-+ * Average offset over two calls to sched_clock() with a gettimeofday()
-+ * in the middle
-+ */
-+static void blk_check_time(unsigned long long *t)
-+{
-+      unsigned long long a, b;
-+      struct timeval tv;
-+
-+      a = sched_clock();
-+      do_gettimeofday(&tv);
-+      b = sched_clock();
-+
-+      *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
-+      *t -= (a + b) / 2;
-+}
-+
-+static void blk_trace_check_cpu_time(void *data)
-+{
-+      unsigned long long *t;
-+      int cpu = get_cpu();
-+
-+      t = &per_cpu(blk_trace_cpu_offset, cpu);
-+
-+      /*
-+       * Just call it twice, hopefully the second call will be cache hot
-+       * and a little more precise
-+       */
-+      blk_check_time(t);
-+      blk_check_time(t);
-+
-+      put_cpu();
-+}
-+
-+/*
-+ * Call blk_trace_check_cpu_time() on each CPU to calibrate our inter-CPU
-+ * timings
-+ */
-+static void blk_trace_calibrate_offsets(void)
-+{
-+      unsigned long flags;
-+
-+      smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
-+      local_irq_save(flags);
-+      blk_trace_check_cpu_time(NULL);
-+      local_irq_restore(flags);
-+}
-+
-+static void blk_trace_set_ht_offsets(void)
-+{
-+#if defined(CONFIG_SCHED_SMT)
-+      int cpu, i;
-+
-+      /*
-+       * now make sure HT siblings have the same time offset
-+       */
-+      preempt_disable();
-+      for_each_online_cpu(cpu) {
-+              unsigned long long *cpu_off, *sibling_off;
-+
-+              for_each_cpu_mask(i, cpu_sibling_map[cpu]) {
-+                      if (i == cpu)
-+                              continue;
-+
-+                      cpu_off = &per_cpu(blk_trace_cpu_offset, cpu);
-+                      sibling_off = &per_cpu(blk_trace_cpu_offset, i);
-+                      *sibling_off = *cpu_off;
-+              }
-+      }
-+      preempt_enable();
-+#endif
-+}
-+
-+static __init int blk_trace_init(void)
-+{
-+      mutex_init(&blk_tree_mutex);
-+      blk_trace_calibrate_offsets();
-+      blk_trace_set_ht_offsets();
-+
-+      return 0;
-+}
-+
-+module_init(blk_trace_init);
-+
-diff --git a/block/elevator.c b/block/elevator.c
-index c9f424d..793c686 100644
---- a/block/elevator.c
-+++ b/block/elevator.c
-@@ -33,6 +33,7 @@
- #include <linux/init.h>
- #include <linux/compiler.h>
- #include <linux/delay.h>
-+#include <linux/blktrace_api.h>
- #include <asm/uaccess.h>
-@@ -333,6 +334,8 @@ void __elv_add_request(request_queue_t *
-       struct list_head *pos;
-       unsigned ordseq;
-+      blk_add_trace_rq(q, rq, BLK_TA_INSERT);
-+
-       if (q->ordcolor)
-               rq->flags |= REQ_ORDERED_COLOR;
-@@ -491,6 +494,7 @@ struct request *elv_next_request(request
-                        * not be passed by new incoming requests
-                        */
-                       rq->flags |= REQ_STARTED;
-+                      blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
-               }
-               if (!q->boundary_rq || q->boundary_rq == rq) {
-diff --git a/block/ioctl.c b/block/ioctl.c
-index e110949..63e67a2 100644
---- a/block/ioctl.c
-+++ b/block/ioctl.c
-@@ -5,6 +5,7 @@
- #include <linux/backing-dev.h>
- #include <linux/buffer_head.h>
- #include <linux/smp_lock.h>
-+#include <linux/blktrace_api.h>
- #include <asm/uaccess.h>
- static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
-@@ -189,6 +190,10 @@ static int blkdev_locked_ioctl(struct fi
-               return put_ulong(arg, bdev->bd_inode->i_size >> 9);
-       case BLKGETSIZE64:
-               return put_u64(arg, bdev->bd_inode->i_size);
-+      case BLKSTARTTRACE:
-+              return blk_start_trace(bdev, (char __user *) arg);
-+      case BLKSTOPTRACE:
-+              return blk_stop_trace(bdev);
-       }
-       return -ENOIOCTLCMD;
- }
-diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
-index 8e27d0a..bfcde0f 100644
---- a/block/ll_rw_blk.c
-+++ b/block/ll_rw_blk.c
-@@ -28,6 +28,7 @@
- #include <linux/writeback.h>
- #include <linux/interrupt.h>
- #include <linux/cpu.h>
-+#include <linux/blktrace_api.h>
- /*
-  * for max sense size
-@@ -1555,8 +1556,10 @@ void blk_plug_device(request_queue_t *q)
-       if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
-               return;
--      if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
-+      if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
-               mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
-+              blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
-+      }
- }
- EXPORT_SYMBOL(blk_plug_device);
-@@ -1620,14 +1623,21 @@ static void blk_backing_dev_unplug(struc
-       /*
-        * devices don't necessarily have an ->unplug_fn defined
-        */
--      if (q->unplug_fn)
-+      if (q->unplug_fn) {
-+              blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-+                                      q->rq.count[READ] + q->rq.count[WRITE]);
-+
-               q->unplug_fn(q);
-+      }
- }
- static void blk_unplug_work(void *data)
- {
-       request_queue_t *q = data;
-+      blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-+                              q->rq.count[READ] + q->rq.count[WRITE]);
-+
-       q->unplug_fn(q);
- }
-@@ -1635,6 +1645,9 @@ static void blk_unplug_timeout(unsigned 
- {
-       request_queue_t *q = (request_queue_t *)data;
-+      blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
-+                              q->rq.count[READ] + q->rq.count[WRITE]);
-+
-       kblockd_schedule_work(&q->unplug_work);
- }
-@@ -1757,6 +1770,11 @@ void blk_cleanup_queue(request_queue_t *
-       if (q->queue_tags)
-               __blk_queue_free_tags(q);
-+      if (q->blk_trace) {
-+              blk_cleanup_trace(q->blk_trace);
-+              q->blk_trace = NULL;
-+      }
-+
-       kmem_cache_free(requestq_cachep, q);
- }
-@@ -2108,6 +2126,8 @@ rq_starved:
-       
-       rq_init(q, rq);
-       rq->rl = rl;
-+
-+      blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
- out:
-       return rq;
- }
-@@ -2136,6 +2156,8 @@ static struct request *get_request_wait(
-               if (!rq) {
-                       struct io_context *ioc;
-+                      blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
-+
-                       __generic_unplug_device(q);
-                       spin_unlock_irq(q->queue_lock);
-                       io_schedule();
-@@ -2189,6 +2211,8 @@ EXPORT_SYMBOL(blk_get_request);
-  */
- void blk_requeue_request(request_queue_t *q, struct request *rq)
- {
-+      blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
-+
-       if (blk_rq_tagged(rq))
-               blk_queue_end_tag(q, rq);
-@@ -2820,6 +2844,8 @@ static int __make_request(request_queue_
-                       if (!q->back_merge_fn(q, req, bio))
-                               break;
-+                      blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
-+
-                       req->biotail->bi_next = bio;
-                       req->biotail = bio;
-                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-@@ -2835,6 +2861,8 @@ static int __make_request(request_queue_
-                       if (!q->front_merge_fn(q, req, bio))
-                               break;
-+                      blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
-+
-                       bio->bi_next = req->bio;
-                       req->bio = bio;
-@@ -2952,6 +2980,7 @@ void generic_make_request(struct bio *bi
-       request_queue_t *q;
-       sector_t maxsector;
-       int ret, nr_sectors = bio_sectors(bio);
-+      dev_t old_dev;
-       might_sleep();
-       /* Test device or partition size, when known. */
-@@ -2978,6 +3007,8 @@ void generic_make_request(struct bio *bi
-        * NOTE: we don't repeat the blk_size check for each new device.
-        * Stacking drivers are expected to know what they are doing.
-        */
-+      maxsector = -1;
-+      old_dev = 0;
-       do {
-               char b[BDEVNAME_SIZE];
-@@ -3010,6 +3041,15 @@ end_io:
-                */
-               blk_partition_remap(bio);
-+              if (maxsector != -1)
-+                      blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
-+                                          maxsector);
-+
-+              blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
-+
-+              maxsector = bio->bi_sector;
-+              old_dev = bio->bi_bdev->bd_dev;
-+
-               ret = q->make_request_fn(q, bio);
-       } while (ret);
- }
-@@ -3129,6 +3169,8 @@ static int __end_that_request_first(stru
-       int total_bytes, bio_nbytes, error, next_idx = 0;
-       struct bio *bio;
-+      blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
-+
-       /*
-        * extend uptodate bool to allow < 0 value to be direct io error
-        */
-diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
-index 12d7b9b..880892a 100644
---- a/drivers/block/cciss.c
-+++ b/drivers/block/cciss.c
-@@ -38,6 +38,7 @@
- #include <linux/hdreg.h>
- #include <linux/spinlock.h>
- #include <linux/compat.h>
-+#include <linux/blktrace_api.h>
- #include <asm/uaccess.h>
- #include <asm/io.h>
-@@ -2330,6 +2331,7 @@ static inline void complete_command( ctl
-       cmd->rq->completion_data = cmd;
-       cmd->rq->errors = status;
-+      blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
-       blk_complete_request(cmd->rq);
- }
-diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index 8c16359..8c979c2 100644
---- a/drivers/md/dm.c
-+++ b/drivers/md/dm.c
-@@ -17,6 +17,7 @@
- #include <linux/mempool.h>
- #include <linux/slab.h>
- #include <linux/idr.h>
-+#include <linux/blktrace_api.h>
- static const char *_name = DM_NAME;
-@@ -303,6 +304,8 @@ static void dec_pending(struct dm_io *io
-                       /* nudge anyone waiting on suspend queue */
-                       wake_up(&io->md->wait);
-+              blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE);
-+
-               bio_endio(io->bio, io->bio->bi_size, io->error);
-               free_io(io->md, io);
-       }
-@@ -361,6 +364,7 @@ static void __map_bio(struct dm_target *
-                     struct target_io *tio)
- {
-       int r;
-+      sector_t sector;
-       /*
-        * Sanity checks.
-@@ -376,10 +380,17 @@ static void __map_bio(struct dm_target *
-        * this io.
-        */
-       atomic_inc(&tio->io->io_count);
-+      sector = clone->bi_sector;
-       r = ti->type->map(ti, clone, &tio->info);
--      if (r > 0)
-+      if (r > 0) {
-               /* the bio has been remapped so dispatch it */
-+
-+              blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, 
-+                                  tio->io->bio->bi_bdev->bd_dev, sector, 
-+                                  clone->bi_sector);
-+
-               generic_make_request(clone);
-+      }
-       else if (r < 0) {
-               /* error the io and bail out */
-diff --git a/fs/bio.c b/fs/bio.c
-index bbc442b..8a1b0b6 100644
---- a/fs/bio.c
-+++ b/fs/bio.c
-@@ -25,6 +25,7 @@
- #include <linux/module.h>
- #include <linux/mempool.h>
- #include <linux/workqueue.h>
-+#include <linux/blktrace_api.h>
- #include <scsi/sg.h>          /* for struct sg_iovec */
- #define BIO_POOL_SIZE 256
-@@ -1094,6 +1095,9 @@ struct bio_pair *bio_split(struct bio *b
-       if (!bp)
-               return bp;
-+      blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
-+                              bi->bi_sector + first_sectors);
-+
-       BUG_ON(bi->bi_vcnt != 1);
-       BUG_ON(bi->bi_idx != 0);
-       atomic_set(&bp->cnt, 3);
-diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
-index 5dd0207..010e02b 100644
---- a/fs/compat_ioctl.c
-+++ b/fs/compat_ioctl.c
-@@ -72,6 +72,7 @@
- #include <linux/i2c-dev.h>
- #include <linux/wireless.h>
- #include <linux/atalk.h>
-+#include <linux/blktrace_api.h>
- #include <net/sock.h>          /* siocdevprivate_ioctl */
- #include <net/bluetooth/bluetooth.h>
-diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index 02a585f..195c3b9 100644
---- a/include/linux/blkdev.h
-+++ b/include/linux/blkdev.h
-@@ -22,6 +22,7 @@ typedef struct request_queue request_que
- struct elevator_queue;
- typedef struct elevator_queue elevator_t;
- struct request_pm_state;
-+struct blk_trace;
- #define BLKDEV_MIN_RQ 4
- #define BLKDEV_MAX_RQ 128     /* Default maximum */
-@@ -416,6 +417,8 @@ struct request_queue
-       unsigned int            sg_reserved_size;
-       int                     node;
-+      struct blk_trace        *blk_trace;
-+
-       /*
-        * reserved for flush operations
-        */
-diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
-new file mode 100644
-index 0000000..026b995
---- /dev/null
-+++ b/include/linux/blktrace_api.h
-@@ -0,0 +1,215 @@
-+#ifndef BLKTRACE_H
-+#define BLKTRACE_H
-+
-+#include <linux/config.h>
-+#include <linux/blkdev.h>
-+#include <linux/relayfs_fs.h>
-+
-+/*
-+ * Trace categories
-+ */
-+enum blktrace_cat {
-+      BLK_TC_READ     = 1 << 0,       /* reads */
-+      BLK_TC_WRITE    = 1 << 1,       /* writes */
-+      BLK_TC_BARRIER  = 1 << 2,       /* barrier */
-+      BLK_TC_SYNC     = 1 << 3,       /* barrier */
-+      BLK_TC_QUEUE    = 1 << 4,       /* queueing/merging */
-+      BLK_TC_REQUEUE  = 1 << 5,       /* requeueing */
-+      BLK_TC_ISSUE    = 1 << 6,       /* issue */
-+      BLK_TC_COMPLETE = 1 << 7,       /* completions */
-+      BLK_TC_FS       = 1 << 8,       /* fs requests */
-+      BLK_TC_PC       = 1 << 9,       /* pc requests */
-+
-+      BLK_TC_END      = 1 << 15,      /* only 16-bits, reminder */
-+};
-+
-+#define BLK_TC_SHIFT          (16)
-+#define BLK_TC_ACT(act)               ((act) << BLK_TC_SHIFT)
-+
-+/*
-+ * Basic trace actions
-+ */
-+enum blktrace_act {
-+      __BLK_TA_QUEUE = 1,             /* queued */
-+      __BLK_TA_BACKMERGE,             /* back merged to existing rq */
-+      __BLK_TA_FRONTMERGE,            /* front merge to existing rq */
-+      __BLK_TA_GETRQ,                 /* allocated new request */
-+      __BLK_TA_SLEEPRQ,               /* sleeping on rq allocation */
-+      __BLK_TA_REQUEUE,               /* request requeued */
-+      __BLK_TA_ISSUE,                 /* sent to driver */
-+      __BLK_TA_COMPLETE,              /* completed by driver */
-+      __BLK_TA_PLUG,                  /* queue was plugged */
-+      __BLK_TA_UNPLUG_IO,             /* queue was unplugged by io */
-+      __BLK_TA_UNPLUG_TIMER,          /* queue was unplugged by timer */
-+      __BLK_TA_INSERT,                /* insert request */
-+      __BLK_TA_SPLIT,                 /* bio was split */
-+      __BLK_TA_BOUNCE,                /* bio was bounced */
-+      __BLK_TA_REMAP,                 /* bio was remapped */
-+};
-+
-+/*
-+ * Trace actions in full. Additionally, read or write is masked
-+ */
-+#define BLK_TA_QUEUE          (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
-+#define BLK_TA_BACKMERGE      (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
-+#define BLK_TA_FRONTMERGE     (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
-+#define       BLK_TA_GETRQ            (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
-+#define       BLK_TA_SLEEPRQ          (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
-+#define       BLK_TA_REQUEUE          (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
-+#define BLK_TA_ISSUE          (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
-+#define BLK_TA_COMPLETE               (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
-+#define BLK_TA_PLUG           (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
-+#define BLK_TA_UNPLUG_IO      (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
-+#define BLK_TA_UNPLUG_TIMER   (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
-+#define BLK_TA_INSERT         (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
-+#define BLK_TA_SPLIT          (__BLK_TA_SPLIT)
-+#define BLK_TA_BOUNCE         (__BLK_TA_BOUNCE)
-+#define BLK_TA_REMAP          (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
-+
-+#define BLK_IO_TRACE_MAGIC    0x65617400
-+#define BLK_IO_TRACE_VERSION  0x06
-+
-+/*
-+ * The trace itself
-+ */
-+struct blk_io_trace {
-+      u32 magic;              /* MAGIC << 8 | version */
-+      u32 sequence;           /* event number */
-+      u64 time;               /* in microseconds */
-+      u64 sector;             /* disk offset */
-+      u32 bytes;              /* transfer length */
-+      u32 action;             /* what happened */
-+      u32 pid;                /* who did it */
-+      u32 cpu;                /* on what cpu did it happen */
-+      u16 error;              /* completion error */
-+      u16 pdu_len;            /* length of data after this trace */
-+      u32 device;             /* device number */
-+      char comm[16];          /* task command name (TASK_COMM_LEN) */
-+};
-+
-+/*
-+ * The remap event
-+ */
-+struct blk_io_trace_remap {
-+      u32 device;
-+      u32 __pad;
-+      u64 sector;
-+};
-+
-+struct blk_trace {
-+      struct dentry *dir;
-+      struct rchan *rchan;
-+      struct dentry *dropped_file;
-+      atomic_t dropped;
-+      unsigned long *sequence;
-+      u32 dev;
-+      u16 act_mask;
-+      u64 start_lba;
-+      u64 end_lba;
-+      u32 pid;
-+};
-+
-+/*
-+ * User setup structure passed with BLKSTARTTRACE
-+ */
-+struct blk_user_trace_setup {
-+      char name[BDEVNAME_SIZE];       /* output */
-+      u16 act_mask;                   /* input */
-+      u32 buf_size;                   /* input */
-+      u32 buf_nr;                     /* input */
-+      u64 start_lba;
-+      u64 end_lba;
-+      u32 pid;
-+};
-+
-+#if defined(CONFIG_BLK_DEV_IO_TRACE)
-+extern int blk_start_trace(struct block_device *, char __user *);
-+extern int blk_stop_trace(struct block_device *);
-+extern void blk_cleanup_trace(struct blk_trace *);
-+extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
-+
-+static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
-+                                  u32 what)
-+{
-+      struct blk_trace *bt = q->blk_trace;
-+      int rw = rq->flags & 0x07;
-+
-+      if (likely(!bt))
-+              return;
-+
-+      if (blk_pc_request(rq)) {
-+              what |= BLK_TC_ACT(BLK_TC_PC);
-+              __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
-+      } else  {
-+              what |= BLK_TC_ACT(BLK_TC_FS);
-+              __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
-+      }
-+}
-+
-+static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-+                                   u32 what)
-+{
-+      struct blk_trace *bt = q->blk_trace;
-+
-+      if (likely(!bt))
-+              return;
-+
-+      __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
-+}
-+
-+static inline void blk_add_trace_generic(struct request_queue *q,
-+                                       struct bio *bio, int rw, u32 what)
-+{
-+      struct blk_trace *bt = q->blk_trace;
-+
-+      if (likely(!bt))
-+              return;
-+
-+      if (bio)
-+              blk_add_trace_bio(q, bio, what);
-+      else
-+              __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
-+}
-+
-+static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
-+                                       struct bio *bio, unsigned int pdu)
-+{
-+      struct blk_trace *bt = q->blk_trace;
-+      u64 rpdu = cpu_to_be64(pdu);
-+
-+      if (likely(!bt))
-+              return;
-+
-+      if (bio)
-+              __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
-+      else
-+              __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
-+}
-+
-+static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
-+                                     dev_t dev, sector_t from, sector_t to)
-+{
-+      struct blk_trace *bt = q->blk_trace;
-+      struct blk_io_trace_remap r;
-+
-+      if (likely(!bt))
-+              return;
-+
-+      r.device = cpu_to_be32(dev);
-+      r.sector = cpu_to_be64(to);
-+
-+      __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
-+}
-+
-+#else /* !CONFIG_BLK_DEV_IO_TRACE */
-+#define blk_start_trace(bdev, arg)            (-EINVAL)
-+#define blk_stop_trace(bdev)                  (-EINVAL)
-+#define blk_cleanup_trace(bt)                 do { } while (0)
-+#define blk_add_trace_rq(q, rq, what)         do { } while (0)
-+#define blk_add_trace_bio(q, rq, what)                do { } while (0)
-+#define blk_add_trace_generic(q, rq, rw, what)        do { } while (0)
-+#define blk_add_trace_pdu_int(q, what, bio, pdu)      do { } while (0)
-+#define blk_add_trace_remap(q, bio, dev, f, t)        do {} while (0)
-+#endif /* CONFIG_BLK_DEV_IO_TRACE */
-+
-+#endif
-diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
-index 8fad50f..5bed09a 100644
---- a/include/linux/compat_ioctl.h
-+++ b/include/linux/compat_ioctl.h
-@@ -97,6 +97,8 @@ COMPATIBLE_IOCTL(BLKRRPART)
- COMPATIBLE_IOCTL(BLKFLSBUF)
- COMPATIBLE_IOCTL(BLKSECTSET)
- COMPATIBLE_IOCTL(BLKSSZGET)
-+COMPATIBLE_IOCTL(BLKSTARTTRACE)
-+COMPATIBLE_IOCTL(BLKSTOPTRACE)
- ULONG_IOCTL(BLKRASET)
- ULONG_IOCTL(BLKFRASET)
- /* RAID */
-diff --git a/include/linux/fs.h b/include/linux/fs.h
-index b77f260..5575284 100644
---- a/include/linux/fs.h
-+++ b/include/linux/fs.h
-@@ -196,6 +196,8 @@ extern int dir_notify_enable;
- #define BLKBSZGET  _IOR(0x12,112,size_t)
- #define BLKBSZSET  _IOW(0x12,113,size_t)
- #define BLKGETSIZE64 _IOR(0x12,114,size_t)    /* return device size in bytes (u64 *arg) */
-+#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup)
-+#define BLKSTOPTRACE _IO(0x12,116)
- #define BMAP_IOCTL 1          /* obsolete - kept for compatibility */
- #define FIBMAP           _IO(0x00,1)  /* bmap access */
-diff --git a/mm/highmem.c b/mm/highmem.c
-index ce2e7e8..d0ea1ee 100644
---- a/mm/highmem.c
-+++ b/mm/highmem.c
-@@ -26,6 +26,7 @@
- #include <linux/init.h>
- #include <linux/hash.h>
- #include <linux/highmem.h>
-+#include <linux/blktrace_api.h>
- #include <asm/tlbflush.h>
- static mempool_t *page_pool, *isa_page_pool;
-@@ -483,6 +484,8 @@ void blk_queue_bounce(request_queue_t *q
-               pool = isa_page_pool;
-       }
-+      blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
-+
-       /*
-        * slow path
-        */
diff --git a/kernel/blk-trace-2.6.16-rc1-git-U1 b/kernel/blk-trace-2.6.16-rc1-git-U1
new file mode 100644 (file)
index 0000000..9acf2e7
--- /dev/null
@@ -0,0 +1,1074 @@
+diff --git a/block/Kconfig b/block/Kconfig
+index 27eaed9..377f6dd 100644
+--- a/block/Kconfig
++++ b/block/Kconfig
+@@ -11,15 +11,4 @@ config LBD
+         your machine, or if you want to have a raid or loopback device
+         bigger than 2TB.  Otherwise say N.
+-config BLK_DEV_IO_TRACE
+-      bool "Support for tracing block io actions"
+-      select RELAYFS_FS
+-      help
+-        Say Y here, if you want to be able to trace the block layer actions
+-        on a given queue. Tracing allows you to see any traffic happening
+-        on a block device queue. For more information (and the user space
+-        support tools needed), fetch the blktrace app from:
+-
+-        git://brick.kernel.dk/data/git/blktrace.git
+-
+ source block/Kconfig.iosched
+diff --git a/block/Makefile b/block/Makefile
+index c05de0e..7e4f93e 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -8,5 +8,3 @@ obj-$(CONFIG_IOSCHED_NOOP)     += noop-iosch
+ obj-$(CONFIG_IOSCHED_AS)      += as-iosched.o
+ obj-$(CONFIG_IOSCHED_DEADLINE)        += deadline-iosched.o
+ obj-$(CONFIG_IOSCHED_CFQ)     += cfq-iosched.o
+-
+-obj-$(CONFIG_BLK_DEV_IO_TRACE)        += blktrace.o
+diff --git a/block/blktrace.c b/block/blktrace.c
+deleted file mode 100644
+index 6bfcdb8..0000000
+--- a/block/blktrace.c
++++ /dev/null
+@@ -1,382 +0,0 @@
+-#include <linux/config.h>
+-#include <linux/kernel.h>
+-#include <linux/blkdev.h>
+-#include <linux/blktrace_api.h>
+-#include <linux/percpu.h>
+-#include <linux/init.h>
+-#include <linux/mutex.h>
+-#include <asm/uaccess.h>
+-
+-static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
+-
+-/*
+- * The worker for the various blk_add_trace*() types. Fills out a
+- * blk_io_trace structure and places it in a per-cpu subbuffer.
+- */
+-void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+-                   int rw, u32 what, int error, int pdu_len, void *pdu_data)
+-{
+-      struct blk_io_trace *t;
+-      unsigned long flags;
+-      unsigned long *sequence;
+-      pid_t pid;
+-      int cpu;
+-
+-      if (rw & (1 << BIO_RW_BARRIER))
+-              what |= BLK_TC_ACT(BLK_TC_BARRIER);
+-      if (rw & (1 << BIO_RW_SYNC))
+-              what |= BLK_TC_ACT(BLK_TC_SYNC);
+-
+-      if (rw & WRITE)
+-              what |= BLK_TC_ACT(BLK_TC_WRITE);
+-      else
+-              what |= BLK_TC_ACT(BLK_TC_READ);
+-
+-      if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
+-              return;
+-      if (sector < bt->start_lba || sector > bt->end_lba)
+-              return;
+-
+-      pid = current->pid;
+-      if (bt->pid && pid != bt->pid)
+-              return;
+-
+-      /*
+-       * A word about the locking here - we disable interrupts to reserve
+-       * some space in the relayfs per-cpu buffer, to prevent an irq
+-       * from coming in and stepping on our toes. Once reserved, it's
+-       * enough to get preemption disabled to prevent read of this data
+-       * before we are through filling it. get_cpu()/put_cpu() does this
+-       * for us
+-       */
+-      local_irq_save(flags);
+-
+-      t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
+-      if (unlikely(!t)) {
+-              local_irq_restore(flags);
+-              return;
+-      }
+-
+-      cpu = get_cpu();
+-
+-      sequence = per_cpu_ptr(bt->sequence, cpu);
+-      t->sequence = ++(*sequence);
+-      t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
+-      t->cpu = cpu;
+-
+-      local_irq_restore(flags);
+-
+-      t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+-      t->device = bt->dev;
+-      t->sector = sector;
+-      t->bytes = bytes;
+-      t->action = what;
+-      t->error = error;
+-      t->pdu_len = pdu_len;
+-
+-      t->pid = pid;
+-      strncpy(t->comm, current->comm, sizeof(t->comm));
+-
+-      if (pdu_len)
+-              memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
+-
+-      put_cpu();
+-}
+-
+-EXPORT_SYMBOL_GPL(__blk_add_trace);
+-
+-static struct dentry *blk_tree_root;
+-static struct mutex blk_tree_mutex;
+-
+-static inline void blk_remove_root(void)
+-{
+-      if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY)
+-              blk_tree_root = NULL;
+-}
+-
+-static void blk_remove_tree(struct dentry *dir)
+-{
+-      mutex_lock(&blk_tree_mutex);
+-      relayfs_remove_dir(dir);
+-      blk_remove_root();
+-      mutex_unlock(&blk_tree_mutex);
+-}
+-
+-static struct dentry *blk_create_tree(const char *blk_name)
+-{
+-      struct dentry *dir = NULL;
+-
+-      mutex_lock(&blk_tree_mutex);
+-
+-      if (!blk_tree_root) {
+-              blk_tree_root = relayfs_create_dir("block", NULL);
+-              if (!blk_tree_root)
+-                      goto err;
+-      }
+-
+-      dir = relayfs_create_dir(blk_name, blk_tree_root);
+-      if (!dir)
+-              blk_remove_root();
+-
+-err:
+-      mutex_unlock(&blk_tree_mutex);
+-      return dir;
+-}
+-
+-void blk_cleanup_trace(struct blk_trace *bt)
+-{
+-      relay_close(bt->rchan);
+-      relayfs_remove_file(bt->dropped_file);
+-      blk_remove_tree(bt->dir);
+-      kfree(bt);
+-}
+-
+-int blk_stop_trace(struct block_device *bdev)
+-{
+-      request_queue_t *q = bdev_get_queue(bdev);
+-      struct blk_trace *bt = NULL;
+-      int ret = -EINVAL;
+-
+-      if (!q)
+-              return -ENXIO;
+-
+-      down(&bdev->bd_sem);
+-
+-      if (q->blk_trace) {
+-              bt = q->blk_trace;
+-              q->blk_trace = NULL;
+-              ret = 0;
+-      }
+-
+-      up(&bdev->bd_sem);
+-
+-      if (bt)
+-              blk_cleanup_trace(bt);
+-
+-      return ret;
+-}
+-
+-static int blk_dropped_open(struct inode *inode, struct file *filp)
+-{
+-      filp->private_data = inode->u.generic_ip;
+-      
+-      return 0;
+-}
+-
+-static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
+-                              size_t count, loff_t *ppos)
+-{
+-      struct blk_trace *bt = filp->private_data;
+-      char buf[16];
+-
+-      snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
+-      
+-      return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+-}
+-
+-static struct file_operations blk_dropped_fops = {
+-      .owner =        THIS_MODULE,
+-      .open =         blk_dropped_open,
+-      .read =         blk_dropped_read,
+-};
+-
+-/*
+- * Keep track of how many times we encountered a full subbuffer, to aid
+- * the user space app in telling how many lost events there were.
+- */
+-static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
+-                                   void *prev_subbuf, size_t prev_padding)
+-{
+-      struct blk_trace *bt;
+-
+-      if (!relay_buf_full(buf))
+-              return 1;
+-
+-      bt = buf->chan->private_data;
+-      atomic_inc(&bt->dropped);
+-      return 0;
+-}
+-
+-static struct rchan_callbacks blk_relay_callbacks = {
+-      .subbuf_start = blk_subbuf_start_callback,
+-};
+-
+-/*
+- * Setup everything required to start tracing
+- */
+-int blk_start_trace(struct block_device *bdev, char __user *arg)
+-{
+-      request_queue_t *q = bdev_get_queue(bdev);
+-      struct blk_user_trace_setup buts;
+-      struct blk_trace *bt = NULL;
+-      struct dentry *dir = NULL;
+-      char b[BDEVNAME_SIZE];
+-      int ret, i;
+-
+-      if (!q)
+-              return -ENXIO;
+-
+-      if (copy_from_user(&buts, arg, sizeof(buts)))
+-              return -EFAULT;
+-
+-      if (!buts.buf_size || !buts.buf_nr)
+-              return -EINVAL;
+-
+-      strcpy(buts.name, bdevname(bdev, b));
+-
+-      /*
+-       * some device names have larger paths - convert the slashes
+-       * to underscores for this to work as expected
+-       */
+-      for (i = 0; i < strlen(buts.name); i++)
+-              if (buts.name[i] == '/')
+-                      buts.name[i] = '_';
+-
+-      if (copy_to_user(arg, &buts, sizeof(buts)))
+-              return -EFAULT;
+-
+-      down(&bdev->bd_sem);
+-      ret = -EBUSY;
+-      if (q->blk_trace)
+-              goto err;
+-
+-      ret = -ENOMEM;
+-      bt = kzalloc(sizeof(*bt), GFP_KERNEL);
+-      if (!bt)
+-              goto err;
+-
+-      bt->sequence = alloc_percpu(unsigned long);
+-      if (!bt->sequence)
+-              goto err;
+-
+-      ret = -ENOENT;
+-      dir = blk_create_tree(buts.name);
+-      if (!dir)
+-              goto err;
+-
+-      bt->dir = dir;
+-      bt->dev = bdev->bd_dev;
+-      atomic_set(&bt->dropped, 0);
+-
+-      ret = -EIO;
+-      bt->dropped_file = relayfs_create_file("dropped", dir, 0, &blk_dropped_fops, bt);
+-      if (!bt->dropped_file)
+-              goto err;
+-
+-      bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks);
+-      if (!bt->rchan)
+-              goto err;
+-      bt->rchan->private_data = bt;
+-
+-      bt->act_mask = buts.act_mask;
+-      if (!bt->act_mask)
+-              bt->act_mask = (u16) -1;
+-
+-      bt->start_lba = buts.start_lba;
+-      bt->end_lba = buts.end_lba;
+-      if (!bt->end_lba)
+-              bt->end_lba = -1ULL;
+-
+-      bt->pid = buts.pid;
+-
+-      q->blk_trace = bt;
+-      up(&bdev->bd_sem);
+-      return 0;
+-err:
+-      up(&bdev->bd_sem);
+-      if (bt && bt->dropped_file)
+-              relayfs_remove_file(bt->dropped_file);
+-      if (dir)
+-              blk_remove_tree(dir);
+-      if (bt) {
+-              if (bt->sequence)
+-                      free_percpu(bt->sequence);
+-              kfree(bt);
+-      }
+-      return ret;
+-}
+-
+-/*
+- * Average offset over two calls to sched_clock() with a gettimeofday()
+- * in the middle
+- */
+-static void blk_check_time(unsigned long long *t)
+-{
+-      unsigned long long a, b;
+-      struct timeval tv;
+-
+-      a = sched_clock();
+-      do_gettimeofday(&tv);
+-      b = sched_clock();
+-
+-      *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
+-      *t -= (a + b) / 2;
+-}
+-
+-static void blk_trace_check_cpu_time(void *data)
+-{
+-      unsigned long long *t;
+-      int cpu = get_cpu();
+-
+-      t = &per_cpu(blk_trace_cpu_offset, cpu);
+-
+-      /*
+-       * Just call it twice, hopefully the second call will be cache hot
+-       * and a little more precise
+-       */
+-      blk_check_time(t);
+-      blk_check_time(t);
+-
+-      put_cpu();
+-}
+-
+-/*
+- * Call blk_trace_check_cpu_time() on each CPU to calibrate our inter-CPU
+- * timings
+- */
+-static void blk_trace_calibrate_offsets(void)
+-{
+-      unsigned long flags;
+-
+-      smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
+-      local_irq_save(flags);
+-      blk_trace_check_cpu_time(NULL);
+-      local_irq_restore(flags);
+-}
+-
+-static void blk_trace_set_ht_offsets(void)
+-{
+-#if defined(CONFIG_SCHED_SMT)
+-      int cpu, i;
+-
+-      /*
+-       * now make sure HT siblings have the same time offset
+-       */
+-      preempt_disable();
+-      for_each_online_cpu(cpu) {
+-              unsigned long long *cpu_off, *sibling_off;
+-
+-              for_each_cpu_mask(i, cpu_sibling_map[cpu]) {
+-                      if (i == cpu)
+-                              continue;
+-
+-                      cpu_off = &per_cpu(blk_trace_cpu_offset, cpu);
+-                      sibling_off = &per_cpu(blk_trace_cpu_offset, i);
+-                      *sibling_off = *cpu_off;
+-              }
+-      }
+-      preempt_enable();
+-#endif
+-}
+-
+-static __init int blk_trace_init(void)
+-{
+-      mutex_init(&blk_tree_mutex);
+-      blk_trace_calibrate_offsets();
+-      blk_trace_set_ht_offsets();
+-
+-      return 0;
+-}
+-
+-module_init(blk_trace_init);
+-
+diff --git a/block/elevator.c b/block/elevator.c
+index af75304..96a61e0 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -33,7 +33,6 @@
+ #include <linux/init.h>
+ #include <linux/compiler.h>
+ #include <linux/delay.h>
+-#include <linux/blktrace_api.h>
+ #include <asm/uaccess.h>
+@@ -317,8 +316,6 @@ void __elv_add_request(request_queue_t *
+       struct list_head *pos;
+       unsigned ordseq;
+-      blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+-
+       if (q->ordcolor)
+               rq->flags |= REQ_ORDERED_COLOR;
+@@ -477,7 +474,6 @@ struct request *elv_next_request(request
+                        * not be passed by new incoming requests
+                        */
+                       rq->flags |= REQ_STARTED;
+-                      blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+               }
+               if (!q->boundary_rq || q->boundary_rq == rq) {
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 63e67a2..e110949 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -5,7 +5,6 @@
+ #include <linux/backing-dev.h>
+ #include <linux/buffer_head.h>
+ #include <linux/smp_lock.h>
+-#include <linux/blktrace_api.h>
+ #include <asm/uaccess.h>
+ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
+@@ -190,10 +189,6 @@ static int blkdev_locked_ioctl(struct fi
+               return put_ulong(arg, bdev->bd_inode->i_size >> 9);
+       case BLKGETSIZE64:
+               return put_u64(arg, bdev->bd_inode->i_size);
+-      case BLKSTARTTRACE:
+-              return blk_start_trace(bdev, (char __user *) arg);
+-      case BLKSTOPTRACE:
+-              return blk_stop_trace(bdev);
+       }
+       return -ENOIOCTLCMD;
+ }
+diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
+index 4295103..f9fc07e 100644
+--- a/block/ll_rw_blk.c
++++ b/block/ll_rw_blk.c
+@@ -28,7 +28,6 @@
+ #include <linux/writeback.h>
+ #include <linux/interrupt.h>
+ #include <linux/cpu.h>
+-#include <linux/blktrace_api.h>
+ /*
+  * for max sense size
+@@ -1558,10 +1557,8 @@ void blk_plug_device(request_queue_t *q)
+       if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+               return;
+-      if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
++      if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+               mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
+-              blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+-      }
+ }
+ EXPORT_SYMBOL(blk_plug_device);
+@@ -1625,21 +1622,14 @@ static void blk_backing_dev_unplug(struc
+       /*
+        * devices don't necessarily have an ->unplug_fn defined
+        */
+-      if (q->unplug_fn) {
+-              blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+-                                      q->rq.count[READ] + q->rq.count[WRITE]);
+-
++      if (q->unplug_fn)
+               q->unplug_fn(q);
+-      }
+ }
+ static void blk_unplug_work(void *data)
+ {
+       request_queue_t *q = data;
+-      blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+-                              q->rq.count[READ] + q->rq.count[WRITE]);
+-
+       q->unplug_fn(q);
+ }
+@@ -1647,9 +1637,6 @@ static void blk_unplug_timeout(unsigned 
+ {
+       request_queue_t *q = (request_queue_t *)data;
+-      blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
+-                              q->rq.count[READ] + q->rq.count[WRITE]);
+-
+       kblockd_schedule_work(&q->unplug_work);
+ }
+@@ -1772,11 +1759,6 @@ void blk_cleanup_queue(request_queue_t *
+       if (q->queue_tags)
+               __blk_queue_free_tags(q);
+-      if (q->blk_trace) {
+-              blk_cleanup_trace(q->blk_trace);
+-              q->blk_trace = NULL;
+-      }
+-
+       kmem_cache_free(requestq_cachep, q);
+ }
+@@ -2128,8 +2110,6 @@ rq_starved:
+       
+       rq_init(q, rq);
+       rq->rl = rl;
+-
+-      blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
+ out:
+       return rq;
+ }
+@@ -2158,8 +2138,6 @@ static struct request *get_request_wait(
+               if (!rq) {
+                       struct io_context *ioc;
+-                      blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+-
+                       __generic_unplug_device(q);
+                       spin_unlock_irq(q->queue_lock);
+                       io_schedule();
+@@ -2213,8 +2191,6 @@ EXPORT_SYMBOL(blk_get_request);
+  */
+ void blk_requeue_request(request_queue_t *q, struct request *rq)
+ {
+-      blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+-
+       if (blk_rq_tagged(rq))
+               blk_queue_end_tag(q, rq);
+@@ -2849,8 +2825,6 @@ static int __make_request(request_queue_
+                       if (!q->back_merge_fn(q, req, bio))
+                               break;
+-                      blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+-
+                       req->biotail->bi_next = bio;
+                       req->biotail = bio;
+                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+@@ -2866,8 +2840,6 @@ static int __make_request(request_queue_
+                       if (!q->front_merge_fn(q, req, bio))
+                               break;
+-                      blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+-
+                       bio->bi_next = req->bio;
+                       req->bio = bio;
+@@ -2985,7 +2957,6 @@ void generic_make_request(struct bio *bi
+       request_queue_t *q;
+       sector_t maxsector;
+       int ret, nr_sectors = bio_sectors(bio);
+-      dev_t old_dev;
+       might_sleep();
+       /* Test device or partition size, when known. */
+@@ -3012,8 +2983,6 @@ void generic_make_request(struct bio *bi
+        * NOTE: we don't repeat the blk_size check for each new device.
+        * Stacking drivers are expected to know what they are doing.
+        */
+-      maxsector = -1;
+-      old_dev = 0;
+       do {
+               char b[BDEVNAME_SIZE];
+@@ -3046,15 +3015,6 @@ end_io:
+                */
+               blk_partition_remap(bio);
+-              if (maxsector != -1)
+-                      blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
+-                                          maxsector);
+-
+-              blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+-
+-              maxsector = bio->bi_sector;
+-              old_dev = bio->bi_bdev->bd_dev;
+-
+               ret = q->make_request_fn(q, bio);
+       } while (ret);
+ }
+@@ -3174,8 +3134,6 @@ static int __end_that_request_first(stru
+       int total_bytes, bio_nbytes, error, next_idx = 0;
+       struct bio *bio;
+-      blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+-
+       /*
+        * extend uptodate bool to allow < 0 value to be direct io error
+        */
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index 880892a..12d7b9b 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -38,7 +38,6 @@
+ #include <linux/hdreg.h>
+ #include <linux/spinlock.h>
+ #include <linux/compat.h>
+-#include <linux/blktrace_api.h>
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -2331,7 +2330,6 @@ static inline void complete_command( ctl
+       cmd->rq->completion_data = cmd;
+       cmd->rq->errors = status;
+-      blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
+       blk_complete_request(cmd->rq);
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index c8f3aa2..e9adeb9 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -17,7 +17,6 @@
+ #include <linux/mempool.h>
+ #include <linux/slab.h>
+ #include <linux/idr.h>
+-#include <linux/blktrace_api.h>
+ static const char *_name = DM_NAME;
+@@ -335,8 +334,6 @@ static void dec_pending(struct dm_io *io
+                       /* nudge anyone waiting on suspend queue */
+                       wake_up(&io->md->wait);
+-              blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE);
+-
+               bio_endio(io->bio, io->bio->bi_size, io->error);
+               free_io(io->md, io);
+       }
+@@ -395,7 +392,6 @@ static void __map_bio(struct dm_target *
+                     struct target_io *tio)
+ {
+       int r;
+-      sector_t sector;
+       /*
+        * Sanity checks.
+@@ -411,17 +407,10 @@ static void __map_bio(struct dm_target *
+        * this io.
+        */
+       atomic_inc(&tio->io->io_count);
+-      sector = clone->bi_sector;
+       r = ti->type->map(ti, clone, &tio->info);
+-      if (r > 0) {
++      if (r > 0)
+               /* the bio has been remapped so dispatch it */
+-
+-              blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, 
+-                                  tio->io->bio->bi_bdev->bd_dev, sector, 
+-                                  clone->bi_sector);
+-
+               generic_make_request(clone);
+-      }
+       else if (r < 0) {
+               /* error the io and bail out */
+diff --git a/fs/bio.c b/fs/bio.c
+index 0dd0d81..1f3bb50 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -25,7 +25,6 @@
+ #include <linux/module.h>
+ #include <linux/mempool.h>
+ #include <linux/workqueue.h>
+-#include <linux/blktrace_api.h>
+ #include <scsi/sg.h>          /* for struct sg_iovec */
+ #define BIO_POOL_SIZE 256
+@@ -1096,9 +1095,6 @@ struct bio_pair *bio_split(struct bio *b
+       if (!bp)
+               return bp;
+-      blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
+-                              bi->bi_sector + first_sectors);
+-
+       BUG_ON(bi->bi_vcnt != 1);
+       BUG_ON(bi->bi_idx != 0);
+       atomic_set(&bp->cnt, 3);
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index 010e02b..5dd0207 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -72,7 +72,6 @@
+ #include <linux/i2c-dev.h>
+ #include <linux/wireless.h>
+ #include <linux/atalk.h>
+-#include <linux/blktrace_api.h>
+ #include <net/sock.h>          /* siocdevprivate_ioctl */
+ #include <net/bluetooth/bluetooth.h>
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 266ce9d..860e7a4 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -22,7 +22,6 @@ typedef struct request_queue request_que
+ struct elevator_queue;
+ typedef struct elevator_queue elevator_t;
+ struct request_pm_state;
+-struct blk_trace;
+ #define BLKDEV_MIN_RQ 4
+ #define BLKDEV_MAX_RQ 128     /* Default maximum */
+@@ -417,8 +416,6 @@ struct request_queue
+       unsigned int            sg_reserved_size;
+       int                     node;
+-      struct blk_trace        *blk_trace;
+-
+       /*
+        * reserved for flush operations
+        */
+diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
+deleted file mode 100644
+index b0fc5cf..0000000
+--- a/include/linux/blktrace_api.h
++++ /dev/null
+@@ -1,272 +0,0 @@
+-#ifndef BLKTRACE_H
+-#define BLKTRACE_H
+-
+-#include <linux/config.h>
+-#include <linux/blkdev.h>
+-#include <linux/relayfs_fs.h>
+-
+-/*
+- * Trace categories
+- */
+-enum blktrace_cat {
+-      BLK_TC_READ     = 1 << 0,       /* reads */
+-      BLK_TC_WRITE    = 1 << 1,       /* writes */
+-      BLK_TC_BARRIER  = 1 << 2,       /* barrier */
+-      BLK_TC_SYNC     = 1 << 3,       /* barrier */
+-      BLK_TC_QUEUE    = 1 << 4,       /* queueing/merging */
+-      BLK_TC_REQUEUE  = 1 << 5,       /* requeueing */
+-      BLK_TC_ISSUE    = 1 << 6,       /* issue */
+-      BLK_TC_COMPLETE = 1 << 7,       /* completions */
+-      BLK_TC_FS       = 1 << 8,       /* fs requests */
+-      BLK_TC_PC       = 1 << 9,       /* pc requests */
+-
+-      BLK_TC_END      = 1 << 15,      /* only 16-bits, reminder */
+-};
+-
+-#define BLK_TC_SHIFT          (16)
+-#define BLK_TC_ACT(act)               ((act) << BLK_TC_SHIFT)
+-
+-/*
+- * Basic trace actions
+- */
+-enum blktrace_act {
+-      __BLK_TA_QUEUE = 1,             /* queued */
+-      __BLK_TA_BACKMERGE,             /* back merged to existing rq */
+-      __BLK_TA_FRONTMERGE,            /* front merge to existing rq */
+-      __BLK_TA_GETRQ,                 /* allocated new request */
+-      __BLK_TA_SLEEPRQ,               /* sleeping on rq allocation */
+-      __BLK_TA_REQUEUE,               /* request requeued */
+-      __BLK_TA_ISSUE,                 /* sent to driver */
+-      __BLK_TA_COMPLETE,              /* completed by driver */
+-      __BLK_TA_PLUG,                  /* queue was plugged */
+-      __BLK_TA_UNPLUG_IO,             /* queue was unplugged by io */
+-      __BLK_TA_UNPLUG_TIMER,          /* queue was unplugged by timer */
+-      __BLK_TA_INSERT,                /* insert request */
+-      __BLK_TA_SPLIT,                 /* bio was split */
+-      __BLK_TA_BOUNCE,                /* bio was bounced */
+-      __BLK_TA_REMAP,                 /* bio was remapped */
+-};
+-
+-/*
+- * Trace actions in full. Additionally, read or write is masked
+- */
+-#define BLK_TA_QUEUE          (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
+-#define BLK_TA_BACKMERGE      (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
+-#define BLK_TA_FRONTMERGE     (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
+-#define       BLK_TA_GETRQ            (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
+-#define       BLK_TA_SLEEPRQ          (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
+-#define       BLK_TA_REQUEUE          (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
+-#define BLK_TA_ISSUE          (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
+-#define BLK_TA_COMPLETE               (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
+-#define BLK_TA_PLUG           (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
+-#define BLK_TA_UNPLUG_IO      (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
+-#define BLK_TA_UNPLUG_TIMER   (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
+-#define BLK_TA_INSERT         (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
+-#define BLK_TA_SPLIT          (__BLK_TA_SPLIT)
+-#define BLK_TA_BOUNCE         (__BLK_TA_BOUNCE)
+-#define BLK_TA_REMAP          (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
+-
+-#define BLK_IO_TRACE_MAGIC    0x65617400
+-#define BLK_IO_TRACE_VERSION  0x06
+-
+-/*
+- * The trace itself
+- */
+-struct blk_io_trace {
+-      u32 magic;              /* MAGIC << 8 | version */
+-      u32 sequence;           /* event number */
+-      u64 time;               /* in microseconds */
+-      u64 sector;             /* disk offset */
+-      u32 bytes;              /* transfer length */
+-      u32 action;             /* what happened */
+-      u32 pid;                /* who did it */
+-      u32 cpu;                /* on what cpu did it happen */
+-      u16 error;              /* completion error */
+-      u16 pdu_len;            /* length of data after this trace */
+-      u32 device;             /* device number */
+-      char comm[16];          /* task command name (TASK_COMM_LEN) */
+-};
+-
+-/*
+- * The remap event
+- */
+-struct blk_io_trace_remap {
+-      u32 device;
+-      u32 __pad;
+-      u64 sector;
+-};
+-
+-struct blk_trace {
+-      struct dentry *dir;
+-      struct rchan *rchan;
+-      struct dentry *dropped_file;
+-      atomic_t dropped;
+-      unsigned long *sequence;
+-      u32 dev;
+-      u16 act_mask;
+-      u64 start_lba;
+-      u64 end_lba;
+-      u32 pid;
+-};
+-
+-/*
+- * User setup structure passed with BLKSTARTTRACE
+- */
+-struct blk_user_trace_setup {
+-      char name[BDEVNAME_SIZE];       /* output */
+-      u16 act_mask;                   /* input */
+-      u32 buf_size;                   /* input */
+-      u32 buf_nr;                     /* input */
+-      u64 start_lba;
+-      u64 end_lba;
+-      u32 pid;
+-};
+-
+-#if defined(CONFIG_BLK_DEV_IO_TRACE)
+-extern int blk_start_trace(struct block_device *, char __user *);
+-extern int blk_stop_trace(struct block_device *);
+-extern void blk_cleanup_trace(struct blk_trace *);
+-extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
+-
+-/**
+- * blk_add_trace_rq - Add a trace for a request oriented action
+- * @q:                queue the io is for
+- * @rq:               the source request
+- * @what:     the action
+- *
+- * Description:
+- *     Records an action against a request. Will log the bio offset + size.
+- *
+- **/
+-static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+-                                  u32 what)
+-{
+-      struct blk_trace *bt = q->blk_trace;
+-      int rw = rq->flags & 0x07;
+-
+-      if (likely(!bt))
+-              return;
+-
+-      if (blk_pc_request(rq)) {
+-              what |= BLK_TC_ACT(BLK_TC_PC);
+-              __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
+-      } else  {
+-              what |= BLK_TC_ACT(BLK_TC_FS);
+-              __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
+-      }
+-}
+-
+-/**
+- * blk_add_trace_bio - Add a trace for a bio oriented action
+- * @q:                queue the io is for
+- * @bio:      the source bio
+- * @what:     the action
+- *
+- * Description:
+- *     Records an action against a bio. Will log the bio offset + size.
+- *
+- **/
+-static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
+-                                   u32 what)
+-{
+-      struct blk_trace *bt = q->blk_trace;
+-
+-      if (likely(!bt))
+-              return;
+-
+-      __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
+-}
+-
+-/**
+- * blk_add_trace_generic - Add a trace for a generic action
+- * @q:                queue the io is for
+- * @bio:      the source bio
+- * @rw:               the data direction
+- * @what:     the action
+- *
+- * Description:
+- *     Records a simple trace
+- *
+- **/
+-static inline void blk_add_trace_generic(struct request_queue *q,
+-                                       struct bio *bio, int rw, u32 what)
+-{
+-      struct blk_trace *bt = q->blk_trace;
+-
+-      if (likely(!bt))
+-              return;
+-
+-      if (bio)
+-              blk_add_trace_bio(q, bio, what);
+-      else
+-              __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
+-}
+-
+-/**
+- * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
+- * @q:                queue the io is for
+- * @what:     the action
+- * @bio:      the source bio
+- * @pdu:      the integer payload
+- *
+- * Description:
+- *     Adds a trace with some integer payload. This might be an unplug
+- *     option given as the action, with the depth at unplug time given
+- *     as the payload
+- *
+- **/
+-static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
+-                                       struct bio *bio, unsigned int pdu)
+-{
+-      struct blk_trace *bt = q->blk_trace;
+-      u64 rpdu = cpu_to_be64(pdu);
+-
+-      if (likely(!bt))
+-              return;
+-
+-      if (bio)
+-              __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
+-      else
+-              __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
+-}
+-
+-/**
+- * blk_add_trace_remap - Add a trace for a remap operation
+- * @q:                queue the io is for
+- * @bio:      the source bio
+- * @dev:      target device
+- * @from:     source sector
+- * @to:               target sector
+- *
+- * Description:
+- *     Device mapper or raid target sometimes need to split a bio because
+- *     it spans a stripe (or similar). Add a trace for that action.
+- *
+- **/
+-static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
+-                                     dev_t dev, sector_t from, sector_t to)
+-{
+-      struct blk_trace *bt = q->blk_trace;
+-      struct blk_io_trace_remap r;
+-
+-      if (likely(!bt))
+-              return;
+-
+-      r.device = cpu_to_be32(dev);
+-      r.sector = cpu_to_be64(to);
+-
+-      __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
+-}
+-
+-#else /* !CONFIG_BLK_DEV_IO_TRACE */
+-#define blk_start_trace(bdev, arg)            (-EINVAL)
+-#define blk_stop_trace(bdev)                  (-EINVAL)
+-#define blk_cleanup_trace(bt)                 do { } while (0)
+-#define blk_add_trace_rq(q, rq, what)         do { } while (0)
+-#define blk_add_trace_bio(q, rq, what)                do { } while (0)
+-#define blk_add_trace_generic(q, rq, rw, what)        do { } while (0)
+-#define blk_add_trace_pdu_int(q, what, bio, pdu)      do { } while (0)
+-#define blk_add_trace_remap(q, bio, dev, f, t)        do {} while (0)
+-#endif /* CONFIG_BLK_DEV_IO_TRACE */
+-
+-#endif
+diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
+index 5bed09a..8fad50f 100644
+--- a/include/linux/compat_ioctl.h
++++ b/include/linux/compat_ioctl.h
+@@ -97,8 +97,6 @@ COMPATIBLE_IOCTL(BLKRRPART)
+ COMPATIBLE_IOCTL(BLKFLSBUF)
+ COMPATIBLE_IOCTL(BLKSECTSET)
+ COMPATIBLE_IOCTL(BLKSSZGET)
+-COMPATIBLE_IOCTL(BLKSTARTTRACE)
+-COMPATIBLE_IOCTL(BLKSTOPTRACE)
+ ULONG_IOCTL(BLKRASET)
+ ULONG_IOCTL(BLKFRASET)
+ /* RAID */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 35c6b45..e059da9 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -196,8 +196,6 @@ extern int dir_notify_enable;
+ #define BLKBSZGET  _IOR(0x12,112,size_t)
+ #define BLKBSZSET  _IOW(0x12,113,size_t)
+ #define BLKGETSIZE64 _IOR(0x12,114,size_t)    /* return device size in bytes (u64 *arg) */
+-#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup)
+-#define BLKSTOPTRACE _IO(0x12,116)
+ #define BMAP_IOCTL 1          /* obsolete - kept for compatibility */
+ #define FIBMAP           _IO(0x00,1)  /* bmap access */
+diff --git a/mm/highmem.c b/mm/highmem.c
+index d0ea1ee..ce2e7e8 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -26,7 +26,6 @@
+ #include <linux/init.h>
+ #include <linux/hash.h>
+ #include <linux/highmem.h>
+-#include <linux/blktrace_api.h>
+ #include <asm/tlbflush.h>
+ static mempool_t *page_pool, *isa_page_pool;
+@@ -484,8 +483,6 @@ void blk_queue_bounce(request_queue_t *q
+               pool = isa_page_pool;
+       }
+-      blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
+-
+       /*
+        * slow path
+        */