[PATCH] blkparse: split queue and insert into two operations
[blktrace.git] / kernel / blk-trace-2.6.14-rc2-git-H0
1 diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
2 --- a/drivers/block/Kconfig
3 +++ b/drivers/block/Kconfig
4 @@ -419,6 +419,14 @@ config LBD
5           your machine, or if you want to have a raid or loopback device
6           bigger than 2TB.  Otherwise say N.
7  
8 +config BLK_DEV_IO_TRACE
9 +       bool "Support for tracing block io actions"
10 +       select RELAYFS_FS
11 +       help
12 +         Say Y here, if you want to be able to trace the block layer actions
13 +         on a given queue.
14 +
15 +
16  config CDROM_PKTCDVD
17         tristate "Packet writing on CD/DVD media"
18         depends on !UML
19 diff --git a/drivers/block/Makefile b/drivers/block/Makefile
20 --- a/drivers/block/Makefile
21 +++ b/drivers/block/Makefile
22 @@ -45,3 +45,5 @@ obj-$(CONFIG_VIODASD)         += viodasd.o
23  obj-$(CONFIG_BLK_DEV_SX8)      += sx8.o
24  obj-$(CONFIG_BLK_DEV_UB)       += ub.o
25  
26 +obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
27 +
28 diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
29 --- a/drivers/block/elevator.c
30 +++ b/drivers/block/elevator.c
31 @@ -34,6 +34,7 @@
32  #include <linux/slab.h>
33  #include <linux/init.h>
34  #include <linux/compiler.h>
35 +#include <linux/blktrace.h>
36  
37  #include <asm/uaccess.h>
38  
39 @@ -305,6 +306,8 @@ void elv_requeue_request(request_queue_t
40  void __elv_add_request(request_queue_t *q, struct request *rq, int where,
41                        int plug)
42  {
43 +       blk_add_trace_rq(q, rq, BLK_TA_INSERT);
44 +
45         /*
46          * barriers implicitly indicate back insertion
47          */
48 @@ -371,6 +374,9 @@ struct request *elv_next_request(request
49         int ret;
50  
51         while ((rq = __elv_next_request(q)) != NULL) {
52 +
53 +               blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
54 +
55                 /*
56                  * just mark as started even if we don't start it, a request
57                  * that has been delayed should not be passed by new incoming
58 diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
59 --- a/drivers/block/ioctl.c
60 +++ b/drivers/block/ioctl.c
61 @@ -4,6 +4,7 @@
62  #include <linux/backing-dev.h>
63  #include <linux/buffer_head.h>
64  #include <linux/smp_lock.h>
65 +#include <linux/blktrace.h>
66  #include <asm/uaccess.h>
67  
68  static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
69 @@ -188,6 +189,10 @@ static int blkdev_locked_ioctl(struct fi
70                 return put_ulong(arg, bdev->bd_inode->i_size >> 9);
71         case BLKGETSIZE64:
72                 return put_u64(arg, bdev->bd_inode->i_size);
73 +       case BLKSTARTTRACE:
74 +               return blk_start_trace(bdev, (char __user *) arg);
75 +       case BLKSTOPTRACE:
76 +               return blk_stop_trace(bdev);
77         }
78         return -ENOIOCTLCMD;
79  }
80 diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
81 --- a/drivers/block/ll_rw_blk.c
82 +++ b/drivers/block/ll_rw_blk.c
83 @@ -29,6 +29,7 @@
84  #include <linux/swap.h>
85  #include <linux/writeback.h>
86  #include <linux/blkdev.h>
87 +#include <linux/blktrace.h>
88  
89  /*
90   * for max sense size
91 @@ -1422,8 +1423,10 @@ void blk_plug_device(request_queue_t *q)
92         if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
93                 return;
94  
95 -       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
96 +       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
97                 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
98 +               blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
99 +       }
100  }
101  
102  EXPORT_SYMBOL(blk_plug_device);
103 @@ -1487,8 +1490,12 @@ static void blk_backing_dev_unplug(struc
104         /*
105          * devices don't necessarily have an ->unplug_fn defined
106          */
107 -       if (q->unplug_fn)
108 +       if (q->unplug_fn) {
109 +               int nrq = q->rq.count[READ] + q->rq.count[WRITE];
110 +
111 +               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, nrq);
112                 q->unplug_fn(q);
113 +       }
114  }
115  
116  static void blk_unplug_work(void *data)
117 @@ -1501,7 +1508,9 @@ static void blk_unplug_work(void *data)
118  static void blk_unplug_timeout(unsigned long data)
119  {
120         request_queue_t *q = (request_queue_t *)data;
121 +       int nrq = q->rq.count[READ] + q->rq.count[WRITE];
122  
123 +       blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, nrq);
124         kblockd_schedule_work(&q->unplug_work);
125  }
126  
127 @@ -1624,6 +1633,11 @@ void blk_cleanup_queue(request_queue_t *
128         if (q->queue_tags)
129                 __blk_queue_free_tags(q);
130  
131 +       if (q->blk_trace) {
132 +               blk_cleanup_trace(q->blk_trace);
133 +               q->blk_trace = NULL;
134 +       }
135 +
136         blk_queue_ordered(q, QUEUE_ORDERED_NONE);
137  
138         kmem_cache_free(requestq_cachep, q);
139 @@ -1970,6 +1984,8 @@ rq_starved:
140         
141         rq_init(q, rq);
142         rq->rl = rl;
143 +
144 +       blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
145  out:
146         return rq;
147  }
148 @@ -1998,6 +2014,8 @@ static struct request *get_request_wait(
149                 if (!rq) {
150                         struct io_context *ioc;
151  
152 +                       blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
153 +
154                         __generic_unplug_device(q);
155                         spin_unlock_irq(q->queue_lock);
156                         io_schedule();
157 @@ -2051,6 +2069,8 @@ EXPORT_SYMBOL(blk_get_request);
158   */
159  void blk_requeue_request(request_queue_t *q, struct request *rq)
160  {
161 +       blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
162 +
163         if (blk_rq_tagged(rq))
164                 blk_queue_end_tag(q, rq);
165  
166 @@ -2714,6 +2734,8 @@ static int __make_request(request_queue_
167                         if (!q->back_merge_fn(q, req, bio))
168                                 break;
169  
170 +                       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
171 +
172                         req->biotail->bi_next = bio;
173                         req->biotail = bio;
174                         req->nr_sectors = req->hard_nr_sectors += nr_sectors;
175 @@ -2729,6 +2751,8 @@ static int __make_request(request_queue_
176                         if (!q->front_merge_fn(q, req, bio))
177                                 break;
178  
179 +                       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
180 +
181                         bio->bi_next = req->bio;
182                         req->bio = bio;
183  
184 @@ -3029,6 +3053,8 @@ end_io:
185                  */
186                 blk_partition_remap(bio);
187  
188 +               blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
189 +
190                 ret = q->make_request_fn(q, bio);
191         } while (ret);
192  }
193 @@ -3148,6 +3174,8 @@ static int __end_that_request_first(stru
194         int total_bytes, bio_nbytes, error, next_idx = 0;
195         struct bio *bio;
196  
197 +       blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
198 +
199         /*
200          * extend uptodate bool to allow < 0 value to be direct io error
201          */
202 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
203 --- a/include/linux/blkdev.h
204 +++ b/include/linux/blkdev.h
205 @@ -22,6 +22,7 @@ typedef struct request_queue request_que
206  struct elevator_queue;
207  typedef struct elevator_queue elevator_t;
208  struct request_pm_state;
209 +struct blk_trace;
210  
211  #define BLKDEV_MIN_RQ  4
212  #define BLKDEV_MAX_RQ  128     /* Default maximum */
213 @@ -412,6 +413,8 @@ struct request_queue
214          */
215         struct request          *flush_rq;
216         unsigned char           ordered;
217 +
218 +       struct blk_trace        *blk_trace;
219  };
220  
221  enum {
222 diff --git a/include/linux/fs.h b/include/linux/fs.h
223 --- a/include/linux/fs.h
224 +++ b/include/linux/fs.h
225 @@ -196,6 +196,8 @@ extern int dir_notify_enable;
226  #define BLKBSZGET  _IOR(0x12,112,size_t)
227  #define BLKBSZSET  _IOW(0x12,113,size_t)
228  #define BLKGETSIZE64 _IOR(0x12,114,size_t)     /* return device size in bytes (u64 *arg) */
229 +#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup)
230 +#define BLKSTOPTRACE _IO(0x12,116)
231  
232  #define BMAP_IOCTL 1           /* obsolete - kept for compatibility */
233  #define FIBMAP    _IO(0x00,1)  /* bmap access */
234 --- /dev/null   2004-06-30 22:03:36.000000000 +0200
235 +++ linux-2.6/drivers/block/blktrace.c  2005-09-21 12:19:09.000000000 +0200
236 @@ -0,0 +1,224 @@
237 +#include <linux/config.h>
238 +#include <linux/kernel.h>
239 +#include <linux/blkdev.h>
240 +#include <linux/blktrace.h>
241 +#include <linux/percpu.h>
242 +#include <linux/init.h>
243 +#include <asm/uaccess.h>
244 +
245 +static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
246 +
247 +void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
248 +                    int rw, u32 what, int error, int pdu_len, void *pdu_data)
249 +{
250 +       struct blk_io_trace t;
251 +       unsigned long flags;
252 +       int cpu;
253 +
254 +       if (rw & (1 << BIO_RW_BARRIER))
255 +               what |= BLK_TC_ACT(BLK_TC_BARRIER);
256 +       if (rw & (1 << BIO_RW_SYNC))
257 +               what |= BLK_TC_ACT(BLK_TC_SYNC);
258 +
259 +       if (rw & WRITE)
260 +               what |= BLK_TC_ACT(BLK_TC_WRITE);
261 +       else
262 +               what |= BLK_TC_ACT(BLK_TC_READ);
263 +
264 +       if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
265 +               return;
266 +
267 +       t.magic         = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
268 +       t.sequence      = atomic_add_return(1, &bt->sequence);
269 +
270 +       cpu = get_cpu();
271 +       t.cpu           = cpu;
272 +       t.time          = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
273 +       put_cpu();
274 +
275 +       t.device        = bt->dev;
276 +       t.sector        = sector;
277 +       t.bytes         = bytes;
278 +       t.action        = what;
279 +       t.error         = error;
280 +       t.pdu_len       = pdu_len;
281 +
282 +       t.pid           = current->pid;
283 +       memcpy(t.comm, current->comm, sizeof(t.comm));
284 +
285 +       local_irq_save(flags);
286 +       __relay_write(bt->rchan, &t, sizeof(t));
287 +       if (pdu_len)
288 +               __relay_write(bt->rchan, pdu_data, pdu_len);
289 +       local_irq_restore(flags);
290 +}
291 +
292 +static struct dentry *blk_tree_root;
293 +static DECLARE_MUTEX(blk_tree_mutex);
294 +
295 +static inline void blk_remove_root(void)
296 +{
297 +       if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY)
298 +               blk_tree_root = NULL;
299 +}
300 +
301 +static void blk_remove_tree(struct dentry *dir)
302 +{
303 +       down(&blk_tree_mutex);
304 +       relayfs_remove_dir(dir);
305 +       blk_remove_root();
306 +       up(&blk_tree_mutex);
307 +}
308 +
309 +static struct dentry *blk_create_tree(const char *blk_name)
310 +{
311 +       struct dentry *dir = NULL;
312 +
313 +       down(&blk_tree_mutex);
314 +
315 +       if (!blk_tree_root) {
316 +               blk_tree_root = relayfs_create_dir("block", NULL);
317 +               if (!blk_tree_root)
318 +                       goto err;
319 +       }
320 +
321 +       dir = relayfs_create_dir(blk_name, blk_tree_root);
322 +       if (!dir)
323 +               blk_remove_root();
324 +
325 +err:
326 +       up(&blk_tree_mutex);
327 +       return dir;
328 +}
329 +
330 +void blk_cleanup_trace(struct blk_trace *bt)
331 +{
332 +       relay_close(bt->rchan);
333 +       blk_remove_tree(bt->dir);
334 +       kfree(bt);
335 +}
336 +
337 +int blk_stop_trace(struct block_device *bdev)
338 +{
339 +       request_queue_t *q = bdev_get_queue(bdev);
340 +       struct blk_trace *bt = NULL;
341 +       int ret = -EINVAL;
342 +
343 +       if (!q)
344 +               return -ENXIO;
345 +
346 +       down(&bdev->bd_sem);
347 +
348 +       if (q->blk_trace) {
349 +               bt = q->blk_trace;
350 +               q->blk_trace = NULL;
351 +               ret = 0;
352 +       }
353 +
354 +       up(&bdev->bd_sem);
355 +
356 +       if (bt)
357 +               blk_cleanup_trace(bt);
358 +
359 +       return ret;
360 +}
361 +
362 +int blk_start_trace(struct block_device *bdev, char __user *arg)
363 +{
364 +       request_queue_t *q = bdev_get_queue(bdev);
365 +       struct blk_user_trace_setup buts;
366 +       struct blk_trace *bt = NULL;
367 +       struct dentry *dir = NULL;
368 +       char b[BDEVNAME_SIZE];
369 +       int ret;
370 +
371 +       if (!q)
372 +               return -ENXIO;
373 +
374 +       if (copy_from_user(&buts, arg, sizeof(buts)))
375 +               return -EFAULT;
376 +
377 +       if (!buts.buf_size || !buts.buf_nr)
378 +               return -EINVAL;
379 +
380 +       strcpy(buts.name, bdevname(bdev, b));
381 +
382 +       if (copy_to_user(arg, &buts, sizeof(buts)))
383 +               return -EFAULT;
384 +
385 +       down(&bdev->bd_sem);
386 +       ret = -EBUSY;
387 +       if (q->blk_trace)
388 +               goto err;
389 +
390 +       ret = -ENOMEM;
391 +       bt = kmalloc(sizeof(*bt), GFP_KERNEL);
392 +       if (!bt)
393 +               goto err;
394 +
395 +       ret = -ENOENT;
396 +       dir = blk_create_tree(bdevname(bdev, b));
397 +       if (!dir)
398 +               goto err;
399 +
400 +       bt->dir = dir;
401 +       bt->dev = bdev->bd_dev;
402 +       atomic_set(&bt->sequence, 0);
403 +
404 +       ret = -EIO;
405 +       bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, NULL);
406 +       if (!bt->rchan)
407 +               goto err;
408 +
409 +       bt->act_mask = buts.act_mask;
410 +       if (!bt->act_mask)
411 +               bt->act_mask = (u16) -1;
412 +
413 +       q->blk_trace = bt;
414 +       up(&bdev->bd_sem);
415 +       return 0;
416 +err:
417 +       up(&bdev->bd_sem);
418 +       if (dir)
419 +               blk_remove_tree(dir);
420 +       if (bt)
421 +               kfree(bt);
422 +       return ret;
423 +}
424 +
425 +static void blk_trace_check_cpu_time(void *data)
426 +{
427 +       unsigned long long a, b, *t;
428 +       struct timeval tv;
429 +       int cpu = get_cpu();
430 +
431 +       t = &per_cpu(blk_trace_cpu_offset, cpu);
432 +
433 +       a = sched_clock();
434 +       do_gettimeofday(&tv);
435 +       b = sched_clock();
436 +
437 +       *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
438 +       *t -= (a + b) / 2;
439 +       put_cpu();
440 +}
441 +
442 +static int blk_trace_calibrate_offsets(void)
443 +{
444 +       unsigned long flags;
445 +
446 +       smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1);
447 +       local_irq_save(flags);
448 +       blk_trace_check_cpu_time(NULL);
449 +       local_irq_restore(flags);
450 +
451 +       return 0;
452 +}
453 +
454 +static __init int blk_trace_init(void)
455 +{
456 +       return blk_trace_calibrate_offsets();
457 +}
458 +
459 +module_init(blk_trace_init);
460 +
461 --- /dev/null   2004-06-30 22:03:36.000000000 +0200
462 +++ linux-2.6/include/linux/blktrace.h  2005-09-21 16:22:55.000000000 +0200
463 @@ -0,0 +1,173 @@
464 +#ifndef BLKTRACE_H
465 +#define BLKTRACE_H
466 +
467 +#include <linux/config.h>
468 +#include <linux/blkdev.h>
469 +#include <linux/relayfs_fs.h>
470 +
471 +/*
472 + * Trace categories
473 + */
474 +enum {
475 +       BLK_TC_READ     = 1 << 0,       /* reads */
476 +       BLK_TC_WRITE    = 1 << 1,       /* writes */
477 +       BLK_TC_BARRIER  = 1 << 2,       /* barrier */
478 +       BLK_TC_SYNC     = 1 << 3,       /* barrier */
479 +       BLK_TC_QUEUE    = 1 << 4,       /* queueing/merging */
480 +       BLK_TC_REQUEUE  = 1 << 5,       /* requeueing */
481 +       BLK_TC_ISSUE    = 1 << 6,       /* issue */
482 +       BLK_TC_COMPLETE = 1 << 7,       /* completions */
483 +       BLK_TC_FS       = 1 << 8,       /* fs requests */
484 +       BLK_TC_PC       = 1 << 9,       /* pc requests */
485 +
486 +       BLK_TC_END      = 1 << 15,      /* only 16-bits, reminder */
487 +};
488 +
489 +#define BLK_TC_SHIFT           (16)
490 +#define BLK_TC_ACT(act)                ((act) << BLK_TC_SHIFT)
491 +
492 +/*
493 + * Basic trace actions
494 + */
495 +enum {
496 +       __BLK_TA_QUEUE = 1,             /* queued */
497 +       __BLK_TA_BACKMERGE,             /* back merged to existing rq */
498 +       __BLK_TA_FRONTMERGE,            /* front merge to existing rq */
499 +       __BLK_TA_GETRQ,                 /* allocated new request */
500 +       __BLK_TA_SLEEPRQ,               /* sleeping on rq allocation */
501 +       __BLK_TA_REQUEUE,               /* request requeued */
502 +       __BLK_TA_ISSUE,                 /* sent to driver */
503 +       __BLK_TA_COMPLETE,              /* completed by driver */
504 +       __BLK_TA_PLUG,                  /* queue was plugged */
505 +       __BLK_TA_UNPLUG_IO,             /* queue was unplugged by io */
506 +       __BLK_TA_UNPLUG_TIMER,          /* queue was unplugged by timer */
507 +       __BLK_TA_INSERT,                /* insert request */
508 +};
509 +
510 +/*
511 + * Trace actions in full. Additionally, read or write is masked
512 + */
513 +#define BLK_TA_QUEUE           (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
514 +#define BLK_TA_BACKMERGE       (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
515 +#define BLK_TA_FRONTMERGE      (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE))
516 +#define        BLK_TA_GETRQ            (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE))
517 +#define        BLK_TA_SLEEPRQ          (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE))
518 +#define        BLK_TA_REQUEUE          (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE))
519 +#define BLK_TA_ISSUE           (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE))
520 +#define BLK_TA_COMPLETE                (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE))
521 +#define BLK_TA_PLUG            (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE))
522 +#define BLK_TA_UNPLUG_IO       (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE))
523 +#define BLK_TA_UNPLUG_TIMER    (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE))
524 +#define BLK_TA_INSERT          (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE))
525 +
526 +#define BLK_IO_TRACE_MAGIC     0x65617400
527 +#define BLK_IO_TRACE_VERSION   0x05
528 +
529 +/*
530 + * The trace itself
531 + */
532 +struct blk_io_trace {
533 +       u32 magic;              /* MAGIC << 8 | version */
534 +       u32 sequence;           /* event number */
535 +       u64 time;               /* in microseconds */
536 +       u64 sector;             /* disk offset */
537 +       u32 bytes;              /* transfer length */
538 +       u32 action;             /* what happened */
539 +       u32 pid;                /* who did it */
540 +       u32 cpu;                /* on what cpu did it happen */
541 +       u16 error;              /* completion error */
542 +       u16 pdu_len;            /* length of data after this trace */
543 +       u32 device;             /* device number */
544 +       char comm[16];          /* task command name (TASK_COMM_LEN) */
545 +};
546 +
547 +struct blk_trace {
548 +       struct dentry *dir;
549 +       struct rchan *rchan;
550 +       atomic_t sequence;
551 +       u32 dev;
552 +       u16 act_mask;
553 +};
554 +
555 +/*
556 + * User setup structure passed with BLKSTARTTRACE
557 + */
558 +struct blk_user_trace_setup {
559 +       char name[BDEVNAME_SIZE];       /* output */
560 +       u16 act_mask;                   /* input */
561 +       u32 buf_size;                   /* input */
562 +       u32 buf_nr;                     /* input */
563 +};
564 +
565 +#if defined(CONFIG_BLK_DEV_IO_TRACE)
566 +extern int blk_start_trace(struct block_device *, char __user *);
567 +extern int blk_stop_trace(struct block_device *);
568 +extern void blk_cleanup_trace(struct blk_trace *);
569 +extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
570 +
571 +static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
572 +                                   u32 what)
573 +{
574 +       struct blk_trace *bt = q->blk_trace;
575 +       int rw = rq->flags & 0x07;
576 +
577 +       if (likely(!bt))
578 +               return;
579 +
580 +       if (blk_pc_request(rq)) {
581 +               what |= BLK_TC_ACT(BLK_TC_PC);
582 +               __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
583 +       } else  {
584 +               what |= BLK_TC_ACT(BLK_TC_FS);
585 +               __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
586 +       }
587 +}
588 +
589 +static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
590 +                                    u32 what)
591 +{
592 +       struct blk_trace *bt = q->blk_trace;
593 +
594 +       if (likely(!bt))
595 +               return;
596 +
597 +       __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
598 +}
599 +
600 +static inline void blk_add_trace_generic(struct request_queue *q,
601 +                                        struct bio *bio, int rw, u32 what)
602 +{
603 +       struct blk_trace *bt = q->blk_trace;
604 +
605 +       if (likely(!bt))
606 +               return;
607 +
608 +       if (bio)
609 +               blk_add_trace_bio(q, bio, what);
610 +       else
611 +               __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
612 +}
613 +
614 +static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
615 +                                        unsigned int pdu)
616 +{
617 +       struct blk_trace *bt = q->blk_trace;
618 +       u64 rpdu = cpu_to_be64(pdu);
619 +
620 +       if (likely(!bt))
621 +               return;
622 +
623 +       __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
624 +}
625 +
626 +#else /* !CONFIG_BLK_DEV_IO_TRACE */
627 +#define blk_start_trace(bdev, arg)             (-EINVAL)
628 +#define blk_stop_trace(bdev)                   (-EINVAL)
629 +#define blk_cleanup_trace(bt)                  do { } while (0)
630 +#define blk_add_trace_rq(q, rq, what)          do { } while (0)
631 +#define blk_add_trace_bio(q, rq, what)         do { } while (0)
632 +#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
633 +#define blk_add_trace_pdu_int(q, what, pdul, pdu)      do { } while (0)
634 +#endif /* CONFIG_BLK_DEV_IO_TRACE */
635 +
636 +#endif