[PATCH] README: update to say it needs 2.6.14-rc1 or newer
[blktrace.git] / kernel / blk-trace-2.6.14-rc1-git-G1
1 diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
2 --- a/drivers/block/Kconfig
3 +++ b/drivers/block/Kconfig
4 @@ -419,6 +419,14 @@ config LBD
5           your machine, or if you want to have a raid or loopback device
6           bigger than 2TB.  Otherwise say N.
7  
8 +config BLK_DEV_IO_TRACE
9 +       bool "Support for tracing block io actions"
10 +       select RELAYFS_FS
11 +       help
12 +         Say Y here, if you want to be able to trace the block layer actions
13 +         on a given queue.
14 +
15 +
16  config CDROM_PKTCDVD
17         tristate "Packet writing on CD/DVD media"
18         depends on !UML
19 diff --git a/drivers/block/Makefile b/drivers/block/Makefile
20 --- a/drivers/block/Makefile
21 +++ b/drivers/block/Makefile
22 @@ -45,3 +45,5 @@ obj-$(CONFIG_VIODASD)         += viodasd.o
23  obj-$(CONFIG_BLK_DEV_SX8)      += sx8.o
24  obj-$(CONFIG_BLK_DEV_UB)       += ub.o
25  
26 +obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
27 +
28 diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
29 --- a/drivers/block/elevator.c
30 +++ b/drivers/block/elevator.c
31 @@ -34,6 +34,7 @@
32  #include <linux/slab.h>
33  #include <linux/init.h>
34  #include <linux/compiler.h>
35 +#include <linux/blktrace.h>
36  
37  #include <asm/uaccess.h>
38  
39 @@ -305,6 +306,8 @@ void elv_requeue_request(request_queue_t
40  void __elv_add_request(request_queue_t *q, struct request *rq, int where,
41                        int plug)
42  {
43 +       blk_add_trace_rq(q, rq, BLK_TA_QUEUE);
44 +
45         /*
46          * barriers implicitly indicate back insertion
47          */
48 @@ -371,6 +374,9 @@ struct request *elv_next_request(request
49         int ret;
50  
51         while ((rq = __elv_next_request(q)) != NULL) {
52 +
53 +               blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
54 +
55                 /*
56                  * just mark as started even if we don't start it, a request
57                  * that has been delayed should not be passed by new incoming
58 diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
59 --- a/drivers/block/ioctl.c
60 +++ b/drivers/block/ioctl.c
61 @@ -4,6 +4,7 @@
62  #include <linux/backing-dev.h>
63  #include <linux/buffer_head.h>
64  #include <linux/smp_lock.h>
65 +#include <linux/blktrace.h>
66  #include <asm/uaccess.h>
67  
68  static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
69 @@ -188,6 +189,10 @@ static int blkdev_locked_ioctl(struct fi
70                 return put_ulong(arg, bdev->bd_inode->i_size >> 9);
71         case BLKGETSIZE64:
72                 return put_u64(arg, bdev->bd_inode->i_size);
73 +       case BLKSTARTTRACE:
74 +               return blk_start_trace(bdev, (char __user *) arg);
75 +       case BLKSTOPTRACE:
76 +               return blk_stop_trace(bdev);
77         }
78         return -ENOIOCTLCMD;
79  }
80 diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
81 --- a/drivers/block/ll_rw_blk.c
82 +++ b/drivers/block/ll_rw_blk.c
83 @@ -29,6 +29,7 @@
84  #include <linux/swap.h>
85  #include <linux/writeback.h>
86  #include <linux/blkdev.h>
87 +#include <linux/blktrace.h>
88  
89  /*
90   * for max sense size
91 @@ -1422,8 +1423,10 @@ void blk_plug_device(request_queue_t *q)
92         if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
93                 return;
94  
95 -       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
96 +       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
97                 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
98 +               blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
99 +       }
100  }
101  
102  EXPORT_SYMBOL(blk_plug_device);
103 @@ -1487,14 +1490,27 @@ static void blk_backing_dev_unplug(struc
104         /*
105          * devices don't necessarily have an ->unplug_fn defined
106          */
107 -       if (q->unplug_fn)
108 +       if (q->unplug_fn) {
109 +               if (test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
110 +                       int nrq = q->rq.count[READ] + q->rq.count[WRITE];
111 +
112 +                       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG, nrq);
113 +               }
114 +
115                 q->unplug_fn(q);
116 +       }
117  }
118  
119  static void blk_unplug_work(void *data)
120  {
121         request_queue_t *q = data;
122  
123 +       if (test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
124 +               int nrq = q->rq.count[READ] + q->rq.count[WRITE];
125 +
126 +               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG, nrq);
127 +       }
128 +
129         q->unplug_fn(q);
130  }
131  
132 @@ -1624,6 +1640,11 @@ void blk_cleanup_queue(request_queue_t *
133         if (q->queue_tags)
134                 __blk_queue_free_tags(q);
135  
136 +       if (q->blk_trace) {
137 +               blk_cleanup_trace(q->blk_trace);
138 +               q->blk_trace = NULL;
139 +       }
140 +
141         blk_queue_ordered(q, QUEUE_ORDERED_NONE);
142  
143         kmem_cache_free(requestq_cachep, q);
144 @@ -1970,6 +1991,8 @@ rq_starved:
145         
146         rq_init(q, rq);
147         rq->rl = rl;
148 +
149 +       blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
150  out:
151         return rq;
152  }
153 @@ -1998,6 +2021,8 @@ static struct request *get_request_wait(
154                 if (!rq) {
155                         struct io_context *ioc;
156  
157 +                       blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
158 +
159                         __generic_unplug_device(q);
160                         spin_unlock_irq(q->queue_lock);
161                         io_schedule();
162 @@ -2051,6 +2076,8 @@ EXPORT_SYMBOL(blk_get_request);
163   */
164  void blk_requeue_request(request_queue_t *q, struct request *rq)
165  {
166 +       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
167 +
168         if (blk_rq_tagged(rq))
169                 blk_queue_end_tag(q, rq);
170  
171 @@ -2714,6 +2741,8 @@ static int __make_request(request_queue_
172                         if (!q->back_merge_fn(q, req, bio))
173                                 break;
174  
175 +                       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
176 +
177                         req->biotail->bi_next = bio;
178                         req->biotail = bio;
179                         req->nr_sectors = req->hard_nr_sectors += nr_sectors;
180 @@ -2729,6 +2758,8 @@ static int __make_request(request_queue_
181                         if (!q->front_merge_fn(q, req, bio))
182                                 break;
183  
184 +                       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
185 +
186                         bio->bi_next = req->bio;
187                         req->bio = bio;
188  
189 @@ -3030,6 +3061,10 @@ end_io:
190                 blk_partition_remap(bio);
191  
192                 ret = q->make_request_fn(q, bio);
193 +
194 +               if (ret)
195 +                       blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
196 +
197         } while (ret);
198  }
199  
200 @@ -3148,6 +3183,8 @@ static int __end_that_request_first(stru
201         int total_bytes, bio_nbytes, error, next_idx = 0;
202         struct bio *bio;
203  
204 +       blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
205 +
206         /*
207          * extend uptodate bool to allow < 0 value to be direct io error
208          */
209 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
210 --- a/include/linux/blkdev.h
211 +++ b/include/linux/blkdev.h
212 @@ -22,6 +22,7 @@ typedef struct request_queue request_que
213  struct elevator_queue;
214  typedef struct elevator_queue elevator_t;
215  struct request_pm_state;
216 +struct blk_trace;
217  
218  #define BLKDEV_MIN_RQ  4
219  #define BLKDEV_MAX_RQ  128     /* Default maximum */
220 @@ -412,6 +413,8 @@ struct request_queue
221          */
222         struct request          *flush_rq;
223         unsigned char           ordered;
224 +
225 +       struct blk_trace        *blk_trace;
226  };
227  
228  enum {
229 diff --git a/include/linux/fs.h b/include/linux/fs.h
230 --- a/include/linux/fs.h
231 +++ b/include/linux/fs.h
232 @@ -196,6 +196,8 @@ extern int dir_notify_enable;
233  #define BLKBSZGET  _IOR(0x12,112,size_t)
234  #define BLKBSZSET  _IOW(0x12,113,size_t)
235  #define BLKGETSIZE64 _IOR(0x12,114,size_t)     /* return device size in bytes (u64 *arg) */
236 +#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup)
237 +#define BLKSTOPTRACE _IO(0x12,116)
238  
239  #define BMAP_IOCTL 1           /* obsolete - kept for compatibility */
240  #define FIBMAP    _IO(0x00,1)  /* bmap access */
241 --- /dev/null   2005-09-09 21:24:12.000000000 +0200
242 +++ linux-2.6/drivers/block/blktrace.c  2005-09-15 14:13:03.000000000 +0200
243 @@ -0,0 +1,224 @@
244 +#include <linux/config.h>
245 +#include <linux/kernel.h>
246 +#include <linux/blkdev.h>
247 +#include <linux/blktrace.h>
248 +#include <linux/percpu.h>
249 +#include <linux/init.h>
250 +#include <asm/uaccess.h>
251 +
252 +static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
253 +
254 +void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
255 +                    int rw, u32 what, int error, int pdu_len, void *pdu_data)
256 +{
257 +       struct blk_io_trace t;
258 +       unsigned long flags;
259 +       int cpu;
260 +
261 +       if (rw & (1 << BIO_RW_BARRIER))
262 +               what |= BLK_TC_ACT(BLK_TC_BARRIER);
263 +       if (rw & (1 << BIO_RW_SYNC))
264 +               what |= BLK_TC_ACT(BLK_TC_SYNC);
265 +
266 +       if (rw & WRITE)
267 +               what |= BLK_TC_ACT(BLK_TC_WRITE);
268 +       else
269 +               what |= BLK_TC_ACT(BLK_TC_READ);
270 +
271 +       if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
272 +               return;
273 +
274 +       t.magic         = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
275 +       t.sequence      = atomic_add_return(1, &bt->sequence);
276 +
277 +       cpu = get_cpu();
278 +       t.cpu           = cpu;
279 +       t.time          = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
280 +       put_cpu();
281 +
282 +       t.device        = bt->dev;
283 +       t.sector        = sector;
284 +       t.bytes         = bytes;
285 +       t.action        = what;
286 +       t.error         = error;
287 +       t.pdu_len       = pdu_len;
288 +
289 +       t.pid           = current->pid;
290 +       memcpy(t.comm, current->comm, sizeof(t.comm));
291 +
292 +       local_irq_save(flags);
293 +       __relay_write(bt->rchan, &t, sizeof(t));
294 +       if (pdu_len)
295 +               __relay_write(bt->rchan, pdu_data, pdu_len);
296 +       local_irq_restore(flags);
297 +}
298 +
299 +static struct dentry *blk_tree_root;
300 +static DECLARE_MUTEX(blk_tree_mutex);
301 +
302 +static inline void blk_remove_root(void)
303 +{
304 +       if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY)
305 +               blk_tree_root = NULL;
306 +}
307 +
308 +static void blk_remove_tree(struct dentry *dir)
309 +{
310 +       down(&blk_tree_mutex);
311 +       relayfs_remove_dir(dir);
312 +       blk_remove_root();
313 +       up(&blk_tree_mutex);
314 +}
315 +
316 +static struct dentry *blk_create_tree(const char *blk_name)
317 +{
318 +       struct dentry *dir = NULL;
319 +
320 +       down(&blk_tree_mutex);
321 +
322 +       if (!blk_tree_root) {
323 +               blk_tree_root = relayfs_create_dir("block", NULL);
324 +               if (!blk_tree_root)
325 +                       goto err;
326 +       }
327 +
328 +       dir = relayfs_create_dir(blk_name, blk_tree_root);
329 +       if (!dir)
330 +               blk_remove_root();
331 +
332 +err:
333 +       up(&blk_tree_mutex);
334 +       return dir;
335 +}
336 +
337 +void blk_cleanup_trace(struct blk_trace *bt)
338 +{
339 +       relay_close(bt->rchan);
340 +       blk_remove_tree(bt->dir);
341 +       kfree(bt);
342 +}
343 +
344 +int blk_stop_trace(struct block_device *bdev)
345 +{
346 +       request_queue_t *q = bdev_get_queue(bdev);
347 +       struct blk_trace *bt = NULL;
348 +       int ret = -EINVAL;
349 +
350 +       if (!q)
351 +               return -ENXIO;
352 +
353 +       down(&bdev->bd_sem);
354 +
355 +       if (q->blk_trace) {
356 +               bt = q->blk_trace;
357 +               q->blk_trace = NULL;
358 +               ret = 0;
359 +       }
360 +
361 +       up(&bdev->bd_sem);
362 +
363 +       if (bt)
364 +               blk_cleanup_trace(bt);
365 +
366 +       return ret;
367 +}
368 +
369 +int blk_start_trace(struct block_device *bdev, char __user *arg)
370 +{
371 +       request_queue_t *q = bdev_get_queue(bdev);
372 +       struct blk_user_trace_setup buts;
373 +       struct blk_trace *bt = NULL;
374 +       struct dentry *dir = NULL;
375 +       char b[BDEVNAME_SIZE];
376 +       int ret;
377 +
378 +       if (!q)
379 +               return -ENXIO;
380 +
381 +       if (copy_from_user(&buts, arg, sizeof(buts)))
382 +               return -EFAULT;
383 +
384 +       if (!buts.buf_size || !buts.buf_nr)
385 +               return -EINVAL;
386 +
387 +       strcpy(buts.name, bdevname(bdev, b));
388 +
389 +       if (copy_to_user(arg, &buts, sizeof(buts)))
390 +               return -EFAULT;
391 +
392 +       down(&bdev->bd_sem);
393 +       ret = -EBUSY;
394 +       if (q->blk_trace)
395 +               goto err;
396 +
397 +       ret = -ENOMEM;
398 +       bt = kmalloc(sizeof(*bt), GFP_KERNEL);
399 +       if (!bt)
400 +               goto err;
401 +
402 +       ret = -ENOENT;
403 +       dir = blk_create_tree(bdevname(bdev, b));
404 +       if (!dir)
405 +               goto err;
406 +
407 +       bt->dir = dir;
408 +       bt->dev = bdev->bd_dev;
409 +       atomic_set(&bt->sequence, 0);
410 +
411 +       ret = -EIO;
412 +       bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, NULL);
413 +       if (!bt->rchan)
414 +               goto err;
415 +
416 +       bt->act_mask = buts.act_mask;
417 +       if (!bt->act_mask)
418 +               bt->act_mask = (u16) -1;
419 +
420 +       q->blk_trace = bt;
421 +       up(&bdev->bd_sem);
422 +       return 0;
423 +err:
424 +       up(&bdev->bd_sem);
425 +       if (dir)
426 +               blk_remove_tree(dir);
427 +       if (bt)
428 +               kfree(bt);
429 +       return ret;
430 +}
431 +
432 +static void blk_trace_check_cpu_time(void *data)
433 +{
434 +       unsigned long long a, b, *t;
435 +       struct timeval tv;
436 +       int cpu = get_cpu();
437 +
438 +       t = &per_cpu(blk_trace_cpu_offset, cpu);
439 +
440 +       a = sched_clock();
441 +       do_gettimeofday(&tv);
442 +       b = sched_clock();
443 +
444 +       *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
445 +       *t -= (a + b) / 2;
446 +       put_cpu();
447 +}
448 +
449 +static int blk_trace_calibrate_offsets(void)
450 +{
451 +       unsigned long flags;
452 +
453 +       smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
454 +       local_irq_save(flags);
455 +       blk_trace_check_cpu_time(NULL);
456 +       local_irq_restore(flags);
457 +
458 +       return 0;
459 +}
460 +
461 +static __init int blk_trace_init(void)
462 +{
463 +       return blk_trace_calibrate_offsets();
464 +}
465 +
466 +module_init(blk_trace_init);
467 +
468 --- /dev/null   2005-09-09 21:24:12.000000000 +0200
469 +++ linux-2.6/include/linux/blktrace.h  2005-09-15 14:13:12.000000000 +0200
470 @@ -0,0 +1,169 @@
471 +#ifndef BLKTRACE_H
472 +#define BLKTRACE_H
473 +
474 +#include <linux/config.h>
475 +#include <linux/blkdev.h>
476 +#include <linux/relayfs_fs.h>
477 +
478 +/*
479 + * Trace categories
480 + */
481 +enum {
482 +       BLK_TC_READ     = 1 << 0,       /* reads */
483 +       BLK_TC_WRITE    = 1 << 1,       /* writes */
484 +       BLK_TC_BARRIER  = 1 << 2,       /* barrier */
485 +       BLK_TC_SYNC     = 1 << 3,       /* barrier */
486 +       BLK_TC_QUEUE    = 1 << 4,       /* queueing/merging */
487 +       BLK_TC_REQUEUE  = 1 << 5,       /* requeueing */
488 +       BLK_TC_ISSUE    = 1 << 6,       /* issue */
489 +       BLK_TC_COMPLETE = 1 << 7,       /* completions */
490 +       BLK_TC_FS       = 1 << 8,       /* fs requests */
491 +       BLK_TC_PC       = 1 << 9,       /* pc requests */
492 +
493 +       BLK_TC_END      = 1 << 15,      /* only 16-bits, reminder */
494 +};
495 +
496 +#define BLK_TC_SHIFT           (16)
497 +#define BLK_TC_ACT(act)                ((act) << BLK_TC_SHIFT)
498 +
499 +/*
500 + * Basic trace actions
501 + */
502 +enum {
503 +       __BLK_TA_QUEUE = 1,             /* queued */
504 +       __BLK_TA_BACKMERGE,             /* back merged to existing rq */
505 +       __BLK_TA_FRONTMERGE,            /* front merge to existing rq */
506 +       __BLK_TA_GETRQ,                 /* allocated new request */
507 +       __BLK_TA_SLEEPRQ,               /* sleeping on rq allocation */
508 +       __BLK_TA_REQUEUE,               /* request requeued */
509 +       __BLK_TA_ISSUE,                 /* sent to driver */
510 +       __BLK_TA_COMPLETE,              /* completed by driver */
511 +       __BLK_TA_PLUG,                  /* queue was plugged */
512 +       __BLK_TA_UNPLUG,                /* queue was unplugged */
513 +};
514 +
515 +/*
516 + * Trace actions in full. Additionally, read or write is masked
517 + */
518 +#define BLK_TA_QUEUE           (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
519 +#define BLK_TA_BACKMERGE       (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
520 +#define BLK_TA_FRONTMERGE      (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
521 +#define        BLK_TA_GETRQ            (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
522 +#define        BLK_TA_SLEEPRQ          (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
523 +#define        BLK_TA_REQUEUE          (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
524 +#define BLK_TA_ISSUE           (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
525 +#define BLK_TA_COMPLETE                (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
526 +#define BLK_TA_PLUG            (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
527 +#define BLK_TA_UNPLUG          (__BLK_TA_UNPLUG | BLK_TC_ACT(BLK_TC_QUEUE))
528 +
529 +#define BLK_IO_TRACE_MAGIC     0x65617400
530 +#define BLK_IO_TRACE_VERSION   0x05
531 +
532 +/*
533 + * The trace itself
534 + */
535 +struct blk_io_trace {
536 +       u32 magic;              /* MAGIC << 8 | version */
537 +       u32 sequence;           /* event number */
538 +       u64 time;               /* in microseconds */
539 +       u64 sector;             /* disk offset */
540 +       u32 bytes;              /* transfer length */
541 +       u32 action;             /* what happened */
542 +       u32 pid;                /* who did it */
543 +       u32 cpu;                /* on what cpu did it happen */
544 +       u16 error;              /* completion error */
545 +       u16 pdu_len;            /* length of data after this trace */
546 +       u32 device;             /* device number */
547 +       char comm[16];          /* task command name (TASK_COMM_LEN) */
548 +};
549 +
550 +struct blk_trace {
551 +       struct dentry *dir;
552 +       struct rchan *rchan;
553 +       atomic_t sequence;
554 +       u32 dev;
555 +       u16 act_mask;
556 +};
557 +
558 +/*
559 + * User setup structure passed with BLKSTARTTRACE
560 + */
561 +struct blk_user_trace_setup {
562 +       char name[BDEVNAME_SIZE];       /* output */
563 +       u16 act_mask;                   /* input */
564 +       u32 buf_size;                   /* input */
565 +       u32 buf_nr;                     /* input */
566 +};
567 +
568 +#if defined(CONFIG_BLK_DEV_IO_TRACE)
569 +extern int blk_start_trace(struct block_device *, char __user *);
570 +extern int blk_stop_trace(struct block_device *);
571 +extern void blk_cleanup_trace(struct blk_trace *);
572 +extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
573 +
574 +static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
575 +                                   u32 what)
576 +{
577 +       struct blk_trace *bt = q->blk_trace;
578 +       int rw = rq->flags & 0x07;
579 +
580 +       if (likely(!bt))
581 +               return;
582 +
583 +       if (blk_pc_request(rq)) {
584 +               what |= BLK_TC_ACT(BLK_TC_PC);
585 +               __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
586 +       } else  {
587 +               what |= BLK_TC_ACT(BLK_TC_FS);
588 +               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
589 +       }
590 +}
591 +
592 +static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
593 +                                    u32 what)
594 +{
595 +       struct blk_trace *bt = q->blk_trace;
596 +
597 +       if (likely(!bt))
598 +               return;
599 +
600 +       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
601 +}
602 +
603 +static inline void blk_add_trace_generic(struct request_queue *q,
604 +                                        struct bio *bio, int rw, u32 what)
605 +{
606 +       struct blk_trace *bt = q->blk_trace;
607 +
608 +       if (likely(!bt))
609 +               return;
610 +
611 +       if (bio)
612 +               blk_add_trace_bio(q, bio, what);
613 +       else
614 +               __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
615 +}
616 +
617 +static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
618 +                                        unsigned int pdu)
619 +{
620 +       struct blk_trace *bt = q->blk_trace;
621 +       u64 rpdu = pdu;
622 +
623 +       if (likely(!bt))
624 +               return;
625 +
626 +       __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
627 +}
628 +
629 +#else /* !CONFIG_BLK_DEV_IO_TRACE */
630 +#define blk_start_trace(bdev, arg)             (-EINVAL)
631 +#define blk_stop_trace(bdev)                   (-EINVAL)
632 +#define blk_cleanup_trace(bt)                  do { } while (0)
633 +#define blk_add_trace_rq(q, rq, what)          do { } while (0)
634 +#define blk_add_trace_bio(q, rq, what)         do { } while (0)
635 +#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
636 +#define blk_add_trace_pdu_int(q, what, pdul, pdu)      do { } while (0)
637 +#endif /* CONFIG_BLK_DEV_IO_TRACE */
638 +
639 +#endif