Commit | Line | Data |
---|---|---|
76c5a9c3 JA |
1 | diff --git a/Documentation/fb/vesafb.txt b/Documentation/fb/vesafb.txt |
2 | diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig | |
3 | --- a/drivers/block/Kconfig | |
4 | +++ b/drivers/block/Kconfig | |
5 | @@ -419,6 +419,14 @@ config LBD | |
6 | your machine, or if you want to have a raid or loopback device | |
7 | bigger than 2TB. Otherwise say N. | |
8 | ||
9 | +config BLK_DEV_IO_TRACE | |
10 | + bool "Support for tracing block io actions" | |
11 | + select RELAYFS_FS | |
12 | + help | |
13 | + Say Y here, if you want to be able to trace the block layer actions | |
14 | + on a given queue. | |
15 | + | |
16 | + | |
17 | config CDROM_PKTCDVD | |
18 | tristate "Packet writing on CD/DVD media" | |
19 | depends on !UML | |
20 | diff --git a/drivers/block/Makefile b/drivers/block/Makefile | |
21 | --- a/drivers/block/Makefile | |
22 | +++ b/drivers/block/Makefile | |
23 | @@ -45,3 +45,5 @@ obj-$(CONFIG_VIODASD) += viodasd.o | |
24 | obj-$(CONFIG_BLK_DEV_SX8) += sx8.o | |
25 | obj-$(CONFIG_BLK_DEV_UB) += ub.o | |
26 | ||
27 | +obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | |
28 | + | |
29 | diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c | |
30 | --- a/drivers/block/elevator.c | |
31 | +++ b/drivers/block/elevator.c | |
32 | @@ -34,6 +34,7 @@ | |
33 | #include <linux/slab.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/compiler.h> | |
36 | +#include <linux/blktrace.h> | |
37 | ||
38 | #include <asm/uaccess.h> | |
39 | ||
40 | @@ -371,6 +372,9 @@ struct request *elv_next_request(request | |
41 | int ret; | |
42 | ||
43 | while ((rq = __elv_next_request(q)) != NULL) { | |
44 | + | |
45 | + blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | |
46 | + | |
47 | /* | |
48 | * just mark as started even if we don't start it, a request | |
49 | * that has been delayed should not be passed by new incoming | |
50 | diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c | |
51 | --- a/drivers/block/ioctl.c | |
52 | +++ b/drivers/block/ioctl.c | |
53 | @@ -4,6 +4,7 @@ | |
54 | #include <linux/backing-dev.h> | |
55 | #include <linux/buffer_head.h> | |
56 | #include <linux/smp_lock.h> | |
57 | +#include <linux/blktrace.h> | |
58 | #include <asm/uaccess.h> | |
59 | ||
60 | static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) | |
61 | @@ -188,6 +189,10 @@ static int blkdev_locked_ioctl(struct fi | |
62 | return put_ulong(arg, bdev->bd_inode->i_size >> 9); | |
63 | case BLKGETSIZE64: | |
64 | return put_u64(arg, bdev->bd_inode->i_size); | |
65 | + case BLKSTARTTRACE: | |
66 | + return blk_start_trace(bdev, (char __user *) arg); | |
67 | + case BLKSTOPTRACE: | |
68 | + return blk_stop_trace(bdev); | |
69 | } | |
70 | return -ENOIOCTLCMD; | |
71 | } | |
72 | diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c | |
73 | --- a/drivers/block/ll_rw_blk.c | |
74 | +++ b/drivers/block/ll_rw_blk.c | |
75 | @@ -29,6 +29,7 @@ | |
76 | #include <linux/swap.h> | |
77 | #include <linux/writeback.h> | |
78 | #include <linux/blkdev.h> | |
79 | +#include <linux/blktrace.h> | |
80 | ||
81 | /* | |
82 | * for max sense size | |
83 | @@ -1624,6 +1625,11 @@ void blk_cleanup_queue(request_queue_t * | |
84 | if (q->queue_tags) | |
85 | __blk_queue_free_tags(q); | |
86 | ||
87 | + if (q->blk_trace) { | |
88 | + blk_cleanup_trace(q->blk_trace); | |
89 | + q->blk_trace = NULL; | |
90 | + } | |
91 | + | |
92 | blk_queue_ordered(q, QUEUE_ORDERED_NONE); | |
93 | ||
94 | kmem_cache_free(requestq_cachep, q); | |
95 | @@ -1970,6 +1976,8 @@ rq_starved: | |
96 | ||
97 | rq_init(q, rq); | |
98 | rq->rl = rl; | |
99 | + | |
100 | + blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); | |
101 | out: | |
102 | return rq; | |
103 | } | |
104 | @@ -1998,6 +2006,8 @@ static struct request *get_request_wait( | |
105 | if (!rq) { | |
106 | struct io_context *ioc; | |
107 | ||
108 | + blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); | |
109 | + | |
110 | __generic_unplug_device(q); | |
111 | spin_unlock_irq(q->queue_lock); | |
112 | io_schedule(); | |
113 | @@ -2051,6 +2061,8 @@ EXPORT_SYMBOL(blk_get_request); | |
114 | */ | |
115 | void blk_requeue_request(request_queue_t *q, struct request *rq) | |
116 | { | |
117 | + blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | |
118 | + | |
119 | if (blk_rq_tagged(rq)) | |
120 | blk_queue_end_tag(q, rq); | |
121 | ||
122 | @@ -2714,6 +2726,8 @@ static int __make_request(request_queue_ | |
123 | if (!q->back_merge_fn(q, req, bio)) | |
124 | break; | |
125 | ||
126 | + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | |
127 | + | |
128 | req->biotail->bi_next = bio; | |
129 | req->biotail = bio; | |
130 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | |
131 | @@ -2729,6 +2743,8 @@ static int __make_request(request_queue_ | |
132 | if (!q->front_merge_fn(q, req, bio)) | |
133 | break; | |
134 | ||
135 | + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | |
136 | + | |
137 | bio->bi_next = req->bio; | |
138 | req->bio = bio; | |
139 | ||
140 | @@ -2794,6 +2810,8 @@ get_rq: | |
141 | req->rq_disk = bio->bi_bdev->bd_disk; | |
142 | req->start_time = jiffies; | |
143 | ||
144 | + blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | |
145 | + | |
146 | spin_lock_irq(q->queue_lock); | |
147 | if (elv_queue_empty(q)) | |
148 | blk_plug_device(q); | |
149 | @@ -3030,6 +3048,10 @@ end_io: | |
150 | blk_partition_remap(bio); | |
151 | ||
152 | ret = q->make_request_fn(q, bio); | |
153 | + | |
154 | + if (ret) | |
155 | + blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | |
156 | + | |
157 | } while (ret); | |
158 | } | |
159 | ||
160 | @@ -3148,6 +3170,8 @@ static int __end_that_request_first(stru | |
161 | int total_bytes, bio_nbytes, error, next_idx = 0; | |
162 | struct bio *bio; | |
163 | ||
164 | + blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); | |
165 | + | |
166 | /* | |
167 | * extend uptodate bool to allow < 0 value to be direct io error | |
168 | */ | |
169 | diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h | |
170 | --- a/include/linux/blkdev.h | |
171 | +++ b/include/linux/blkdev.h | |
172 | @@ -22,6 +22,7 @@ typedef struct request_queue request_que | |
173 | struct elevator_queue; | |
174 | typedef struct elevator_queue elevator_t; | |
175 | struct request_pm_state; | |
176 | +struct blk_trace; | |
177 | ||
178 | #define BLKDEV_MIN_RQ 4 | |
179 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | |
180 | @@ -412,6 +413,8 @@ struct request_queue | |
181 | */ | |
182 | struct request *flush_rq; | |
183 | unsigned char ordered; | |
184 | + | |
185 | + struct blk_trace *blk_trace; | |
186 | }; | |
187 | ||
188 | enum { | |
189 | diff --git a/include/linux/fs.h b/include/linux/fs.h | |
190 | --- a/include/linux/fs.h | |
191 | +++ b/include/linux/fs.h | |
192 | @@ -195,6 +195,8 @@ extern int dir_notify_enable; | |
193 | #define BLKBSZGET _IOR(0x12,112,size_t) | |
194 | #define BLKBSZSET _IOW(0x12,113,size_t) | |
195 | #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ | |
196 | +#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup) | |
197 | +#define BLKSTOPTRACE _IO(0x12,116) | |
198 | ||
199 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ | |
200 | #define FIBMAP _IO(0x00,1) /* bmap access */ | |
201 | --- /dev/null 2005-09-03 12:52:15.000000000 +0200 | |
202 | +++ linux-2.6/drivers/block/blktrace.c 2005-09-08 08:33:20.000000000 +0200 | |
203 | @@ -0,0 +1,222 @@ | |
204 | +#include <linux/config.h> | |
205 | +#include <linux/kernel.h> | |
206 | +#include <linux/blkdev.h> | |
207 | +#include <linux/blktrace.h> | |
208 | +#include <linux/percpu.h> | |
209 | +#include <linux/init.h> | |
210 | +#include <asm/uaccess.h> | |
211 | + | |
212 | +static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; | |
213 | + | |
214 | +void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |
215 | + int rw, u32 what, int error, int pdu_len, char *pdu_data) | |
216 | +{ | |
217 | + struct blk_io_trace t; | |
218 | + unsigned long flags; | |
219 | + int cpu; | |
220 | + | |
221 | + if (rw & (1 << BIO_RW_BARRIER)) | |
222 | + what |= BLK_TC_ACT(BLK_TC_BARRIER); | |
223 | + if (rw & (1 << BIO_RW_SYNC)) | |
224 | + what |= BLK_TC_ACT(BLK_TC_SYNC); | |
225 | + | |
226 | + if (rw & WRITE) | |
227 | + what |= BLK_TC_ACT(BLK_TC_WRITE); | |
228 | + else | |
229 | + what |= BLK_TC_ACT(BLK_TC_READ); | |
230 | + | |
231 | + if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) | |
232 | + return; | |
233 | + | |
234 | + t.magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | |
235 | + t.sequence = atomic_add_return(1, &bt->sequence); | |
236 | + | |
237 | + cpu = get_cpu(); | |
238 | + t.cpu = cpu; | |
239 | + t.time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu); | |
240 | + put_cpu(); | |
241 | + | |
242 | + t.sector = sector; | |
243 | + t.bytes = bytes; | |
244 | + t.action = what; | |
245 | + t.error = error; | |
246 | + t.pdu_len = pdu_len; | |
247 | + | |
248 | + t.pid = current->pid; | |
249 | + memcpy(t.comm, current->comm, sizeof(t.comm)); | |
250 | + | |
251 | + local_irq_save(flags); | |
252 | + __relay_write(bt->rchan, &t, sizeof(t)); | |
253 | + if (pdu_len) | |
254 | + __relay_write(bt->rchan, pdu_data, pdu_len); | |
255 | + local_irq_restore(flags); | |
256 | +} | |
257 | + | |
258 | +static struct dentry *blk_tree_root; | |
259 | +static DECLARE_MUTEX(blk_tree_mutex); | |
260 | + | |
261 | +static inline void blk_remove_root(void) | |
262 | +{ | |
263 | + if (relayfs_remove_dir(blk_tree_root) != -ENOTEMPTY) | |
264 | + blk_tree_root = NULL; | |
265 | +} | |
266 | + | |
267 | +static void blk_remove_tree(struct dentry *dir) | |
268 | +{ | |
269 | + down(&blk_tree_mutex); | |
270 | + relayfs_remove_dir(dir); | |
271 | + blk_remove_root(); | |
272 | + up(&blk_tree_mutex); | |
273 | +} | |
274 | + | |
275 | +static struct dentry *blk_create_tree(const char *blk_name) | |
276 | +{ | |
277 | + struct dentry *dir = NULL; | |
278 | + | |
279 | + down(&blk_tree_mutex); | |
280 | + | |
281 | + if (!blk_tree_root) { | |
282 | + blk_tree_root = relayfs_create_dir("block", NULL); | |
283 | + if (!blk_tree_root) | |
284 | + goto err; | |
285 | + } | |
286 | + | |
287 | + dir = relayfs_create_dir(blk_name, blk_tree_root); | |
288 | + if (!dir) | |
289 | + blk_remove_root(); | |
290 | + | |
291 | +err: | |
292 | + up(&blk_tree_mutex); | |
293 | + return dir; | |
294 | +} | |
295 | + | |
296 | +void blk_cleanup_trace(struct blk_trace *bt) | |
297 | +{ | |
298 | + relay_close(bt->rchan); | |
299 | + blk_remove_tree(bt->dir); | |
300 | + kfree(bt); | |
301 | +} | |
302 | + | |
303 | +int blk_stop_trace(struct block_device *bdev) | |
304 | +{ | |
305 | + request_queue_t *q = bdev_get_queue(bdev); | |
306 | + struct blk_trace *bt = NULL; | |
307 | + int ret = -EINVAL; | |
308 | + | |
309 | + if (!q) | |
310 | + return -ENXIO; | |
311 | + | |
312 | + down(&bdev->bd_sem); | |
313 | + | |
314 | + if (q->blk_trace) { | |
315 | + bt = q->blk_trace; | |
316 | + q->blk_trace = NULL; | |
317 | + ret = 0; | |
318 | + } | |
319 | + | |
320 | + up(&bdev->bd_sem); | |
321 | + | |
322 | + if (bt) | |
323 | + blk_cleanup_trace(bt); | |
324 | + | |
325 | + return ret; | |
326 | +} | |
327 | + | |
328 | +int blk_start_trace(struct block_device *bdev, char __user *arg) | |
329 | +{ | |
330 | + request_queue_t *q = bdev_get_queue(bdev); | |
331 | + struct blk_user_trace_setup buts; | |
332 | + struct blk_trace *bt = NULL; | |
333 | + struct dentry *dir = NULL; | |
334 | + char b[BDEVNAME_SIZE]; | |
335 | + int ret; | |
336 | + | |
337 | + if (!q) | |
338 | + return -ENXIO; | |
339 | + | |
340 | + if (copy_from_user(&buts, arg, sizeof(buts))) | |
341 | + return -EFAULT; | |
342 | + | |
343 | + if (!buts.buf_size || !buts.buf_nr) | |
344 | + return -EINVAL; | |
345 | + | |
346 | + strcpy(buts.name, bdevname(bdev, b)); | |
347 | + | |
348 | + if (copy_to_user(arg, &buts, sizeof(buts))) | |
349 | + return -EFAULT; | |
350 | + | |
351 | + down(&bdev->bd_sem); | |
352 | + ret = -EBUSY; | |
353 | + if (q->blk_trace) | |
354 | + goto err; | |
355 | + | |
356 | + ret = -ENOMEM; | |
357 | + bt = kmalloc(sizeof(*bt), GFP_KERNEL); | |
358 | + if (!bt) | |
359 | + goto err; | |
360 | + | |
361 | + ret = -ENOENT; | |
362 | + dir = blk_create_tree(bdevname(bdev, b)); | |
363 | + if (!dir) | |
364 | + goto err; | |
365 | + | |
366 | + bt->dir = dir; | |
367 | + atomic_set(&bt->sequence, 0); | |
368 | + | |
369 | + ret = -EIO; | |
370 | + bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, NULL); | |
371 | + if (!bt->rchan) | |
372 | + goto err; | |
373 | + | |
374 | + bt->act_mask = buts.act_mask; | |
375 | + if (!bt->act_mask) | |
376 | + bt->act_mask = (u16) -1; | |
377 | + | |
378 | + q->blk_trace = bt; | |
379 | + up(&bdev->bd_sem); | |
380 | + return 0; | |
381 | +err: | |
382 | + up(&bdev->bd_sem); | |
383 | + if (dir) | |
384 | + blk_remove_tree(dir); | |
385 | + if (bt) | |
386 | + kfree(bt); | |
387 | + return ret; | |
388 | +} | |
389 | + | |
390 | +static void blk_trace_check_cpu_time(void *data) | |
391 | +{ | |
392 | + unsigned long long a, b, *t; | |
393 | + struct timeval tv; | |
394 | + int cpu = get_cpu(); | |
395 | + | |
396 | + t = &per_cpu(blk_trace_cpu_offset, cpu); | |
397 | + | |
398 | + a = sched_clock(); | |
399 | + do_gettimeofday(&tv); | |
400 | + b = sched_clock(); | |
401 | + | |
402 | + *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; | |
403 | + *t -= (a + b) / 2; | |
404 | + put_cpu(); | |
405 | +} | |
406 | + | |
407 | +static int blk_trace_calibrate_offsets(void) | |
408 | +{ | |
409 | + unsigned long flags; | |
410 | + | |
411 | + smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1); | |
412 | + local_irq_save(flags); | |
413 | + blk_trace_check_cpu_time(NULL); | |
414 | + local_irq_restore(flags); | |
415 | + | |
416 | + return 0; | |
417 | +} | |
418 | + | |
419 | +static __init int blk_trace_init(void) | |
420 | +{ | |
421 | + return blk_trace_calibrate_offsets(); | |
422 | +} | |
423 | + | |
424 | +module_init(blk_trace_init); | |
425 | + | |
426 | --- /dev/null 2005-09-03 12:52:15.000000000 +0200 | |
427 | +++ linux-2.6/include/linux/blktrace.h 2005-09-08 08:33:20.000000000 +0200 | |
428 | @@ -0,0 +1,150 @@ | |
429 | +#ifndef BLKTRACE_H | |
430 | +#define BLKTRACE_H | |
431 | + | |
432 | +#include <linux/config.h> | |
433 | +#include <linux/blkdev.h> | |
434 | +#include <linux/relayfs_fs.h> | |
435 | + | |
436 | +/* | |
437 | + * Trace categories | |
438 | + */ | |
439 | +enum { | |
440 | + BLK_TC_READ = 1 << 0, /* reads */ | |
441 | + BLK_TC_WRITE = 1 << 1, /* writes */ | |
442 | + BLK_TC_BARRIER = 1 << 2, /* barrier */ | |
443 | + BLK_TC_SYNC = 1 << 3, /* barrier */ | |
444 | + BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ | |
445 | + BLK_TC_REQUEUE = 1 << 5, /* requeueing */ | |
446 | + BLK_TC_ISSUE = 1 << 6, /* issue */ | |
447 | + BLK_TC_COMPLETE = 1 << 7, /* completions */ | |
448 | + BLK_TC_FS = 1 << 8, /* fs requests */ | |
449 | + BLK_TC_PC = 1 << 9, /* pc requests */ | |
450 | + | |
451 | + BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ | |
452 | +}; | |
453 | + | |
454 | +#define BLK_TC_SHIFT (16) | |
455 | +#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) | |
456 | + | |
457 | +/* | |
458 | + * Basic trace actions | |
459 | + */ | |
460 | +enum { | |
461 | + __BLK_TA_QUEUE = 1, /* queued */ | |
462 | + __BLK_TA_BACKMERGE, /* back merged to existing rq */ | |
463 | + __BLK_TA_FRONTMERGE, /* front merge to existing rq */ | |
464 | + __BLK_TA_GETRQ, /* allocated new request */ | |
465 | + __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ | |
466 | + __BLK_TA_REQUEUE, /* request requeued */ | |
467 | + __BLK_TA_ISSUE, /* sent to driver */ | |
468 | + __BLK_TA_COMPLETE, /* completed by driver */ | |
469 | +}; | |
470 | + | |
471 | +/* | |
472 | + * Trace actions in full. Additionally, read or write is masked | |
473 | + */ | |
474 | +#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) | |
475 | +#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) | |
476 | +#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) | |
477 | +#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) | |
478 | +#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) | |
479 | +#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) | |
480 | +#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) | |
481 | +#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) | |
482 | + | |
483 | +#define BLK_IO_TRACE_MAGIC 0x65617400 | |
484 | +#define BLK_IO_TRACE_VERSION 0x04 | |
485 | + | |
486 | +/* | |
487 | + * The trace itself | |
488 | + */ | |
489 | +struct blk_io_trace { | |
490 | + u32 magic; /* MAGIC << 8 | version */ | |
491 | + u32 sequence; /* event number */ | |
492 | + u64 time; /* in microseconds */ | |
493 | + u64 sector; /* disk offset */ | |
494 | + u32 bytes; /* transfer length */ | |
495 | + u32 action; /* what happened */ | |
496 | + u32 pid; /* who did it */ | |
497 | + u32 cpu; /* on what cpu did it happen */ | |
498 | + u16 error; /* completion error */ | |
499 | + u16 pdu_len; /* length of data after this trace */ | |
500 | + char comm[16]; /* task command name (TASK_COMM_LEN) */ | |
501 | +}; | |
502 | + | |
503 | +struct blk_trace { | |
504 | + struct dentry *dir; | |
505 | + struct rchan *rchan; | |
506 | + atomic_t sequence; | |
507 | + u16 act_mask; | |
508 | +}; | |
509 | + | |
510 | +/* | |
511 | + * User setup structure passed with BLKSTARTTRACE | |
512 | + */ | |
513 | +struct blk_user_trace_setup { | |
514 | + char name[BDEVNAME_SIZE]; /* output */ | |
515 | + u16 act_mask; /* input */ | |
516 | + u32 buf_size; /* input */ | |
517 | + u32 buf_nr; /* input */ | |
518 | +}; | |
519 | + | |
520 | +#if defined(CONFIG_BLK_DEV_IO_TRACE) | |
521 | +extern int blk_start_trace(struct block_device *, char __user *); | |
522 | +extern int blk_stop_trace(struct block_device *); | |
523 | +extern void blk_cleanup_trace(struct blk_trace *); | |
524 | +extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, char *); | |
525 | + | |
526 | +static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |
527 | + u32 what) | |
528 | +{ | |
529 | + struct blk_trace *bt = q->blk_trace; | |
530 | + int rw = rq->flags & 0x07; | |
531 | + | |
532 | + if (likely(!bt)) | |
533 | + return; | |
534 | + | |
535 | + if (blk_pc_request(rq)) { | |
536 | + what |= BLK_TC_ACT(BLK_TC_PC); | |
537 | + __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); | |
538 | + } else { | |
539 | + what |= BLK_TC_ACT(BLK_TC_FS); | |
540 | + __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); | |
541 | + } | |
542 | +} | |
543 | + | |
544 | +static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | |
545 | + u32 what) | |
546 | +{ | |
547 | + struct blk_trace *bt = q->blk_trace; | |
548 | + | |
549 | + if (likely(!bt)) | |
550 | + return; | |
551 | + | |
552 | + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); | |
553 | +} | |
554 | + | |
555 | +static inline void blk_add_trace_generic(struct request_queue *q, | |
556 | + struct bio *bio, int rw, u32 what) | |
557 | +{ | |
558 | + struct blk_trace *bt = q->blk_trace; | |
559 | + | |
560 | + if (likely(!bt)) | |
561 | + return; | |
562 | + | |
563 | + if (bio) | |
564 | + blk_add_trace_bio(q, bio, what); | |
565 | + else | |
566 | + __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); | |
567 | +} | |
568 | + | |
569 | +#else /* !CONFIG_BLK_DEV_IO_TRACE */ | |
570 | +#define blk_start_trace(bdev, arg) (-EINVAL) | |
571 | +#define blk_stop_trace(bdev) (-EINVAL) | |
572 | +#define blk_cleanup_trace(bt) do { } while (0) | |
573 | +#define blk_add_trace_rq(q, rq, what) do { } while (0) | |
574 | +#define blk_add_trace_bio(q, rq, what) do { } while (0) | |
575 | +#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) | |
576 | +#endif /* CONFIG_BLK_DEV_IO_TRACE */ | |
577 | + | |
578 | +#endif |