Commit | Line | Data |
---|---|---|
2056a782 JA |
1 | #ifndef BLKTRACE_H |
2 | #define BLKTRACE_H | |
3 | ||
2056a782 JA |
4 | #include <linux/blkdev.h> |
5 | #include <linux/relay.h> | |
6 | ||
7 | /* | |
8 | * Trace categories | |
9 | */ | |
10 | enum blktrace_cat { | |
11 | BLK_TC_READ = 1 << 0, /* reads */ | |
12 | BLK_TC_WRITE = 1 << 1, /* writes */ | |
13 | BLK_TC_BARRIER = 1 << 2, /* barrier */ | |
14 | BLK_TC_SYNC = 1 << 3, /* barrier */ | |
15 | BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ | |
16 | BLK_TC_REQUEUE = 1 << 5, /* requeueing */ | |
17 | BLK_TC_ISSUE = 1 << 6, /* issue */ | |
18 | BLK_TC_COMPLETE = 1 << 7, /* completions */ | |
19 | BLK_TC_FS = 1 << 8, /* fs requests */ | |
20 | BLK_TC_PC = 1 << 9, /* pc requests */ | |
21 | BLK_TC_NOTIFY = 1 << 10, /* special message */ | |
22 | ||
23 | BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ | |
24 | }; | |
25 | ||
26 | #define BLK_TC_SHIFT (16) | |
27 | #define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) | |
28 | ||
29 | /* | |
30 | * Basic trace actions | |
31 | */ | |
32 | enum blktrace_act { | |
33 | __BLK_TA_QUEUE = 1, /* queued */ | |
34 | __BLK_TA_BACKMERGE, /* back merged to existing rq */ | |
35 | __BLK_TA_FRONTMERGE, /* front merge to existing rq */ | |
36 | __BLK_TA_GETRQ, /* allocated new request */ | |
37 | __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ | |
38 | __BLK_TA_REQUEUE, /* request requeued */ | |
39 | __BLK_TA_ISSUE, /* sent to driver */ | |
40 | __BLK_TA_COMPLETE, /* completed by driver */ | |
41 | __BLK_TA_PLUG, /* queue was plugged */ | |
42 | __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */ | |
43 | __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */ | |
44 | __BLK_TA_INSERT, /* insert request */ | |
45 | __BLK_TA_SPLIT, /* bio was split */ | |
46 | __BLK_TA_BOUNCE, /* bio was bounced */ | |
47 | __BLK_TA_REMAP, /* bio was remapped */ | |
48 | }; | |
49 | ||
50 | /* | |
51 | * Trace actions in full. Additionally, read or write is masked | |
52 | */ | |
53 | #define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) | |
54 | #define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) | |
55 | #define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) | |
56 | #define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) | |
57 | #define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) | |
58 | #define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) | |
59 | #define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) | |
60 | #define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) | |
61 | #define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE)) | |
62 | #define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE)) | |
63 | #define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE)) | |
64 | #define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE)) | |
65 | #define BLK_TA_SPLIT (__BLK_TA_SPLIT) | |
66 | #define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) | |
67 | #define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) | |
68 | ||
69 | #define BLK_IO_TRACE_MAGIC 0x65617400 | |
70 | #define BLK_IO_TRACE_VERSION 0x07 | |
71 | ||
72 | /* | |
73 | * The trace itself | |
74 | */ | |
75 | struct blk_io_trace { | |
76 | u32 magic; /* MAGIC << 8 | version */ | |
77 | u32 sequence; /* event number */ | |
78 | u64 time; /* in microseconds */ | |
79 | u64 sector; /* disk offset */ | |
80 | u32 bytes; /* transfer length */ | |
81 | u32 action; /* what happened */ | |
82 | u32 pid; /* who did it */ | |
83 | u32 device; /* device number */ | |
84 | u32 cpu; /* on what cpu did it happen */ | |
85 | u16 error; /* completion error */ | |
86 | u16 pdu_len; /* length of data after this trace */ | |
87 | }; | |
88 | ||
89 | /* | |
90 | * The remap event | |
91 | */ | |
92 | struct blk_io_trace_remap { | |
93 | u32 device; | |
94 | u32 __pad; | |
95 | u64 sector; | |
96 | }; | |
97 | ||
98 | enum { | |
99 | Blktrace_setup = 1, | |
100 | Blktrace_running, | |
101 | Blktrace_stopped, | |
102 | }; | |
103 | ||
104 | struct blk_trace { | |
105 | int trace_state; | |
106 | struct rchan *rchan; | |
107 | unsigned long *sequence; | |
108 | u16 act_mask; | |
109 | u64 start_lba; | |
110 | u64 end_lba; | |
111 | u32 pid; | |
112 | u32 dev; | |
113 | struct dentry *dir; | |
114 | struct dentry *dropped_file; | |
115 | atomic_t dropped; | |
116 | }; | |
117 | ||
118 | /* | |
119 | * User setup structure passed with BLKTRACESTART | |
120 | */ | |
121 | struct blk_user_trace_setup { | |
122 | char name[BDEVNAME_SIZE]; /* output */ | |
123 | u16 act_mask; /* input */ | |
124 | u32 buf_size; /* input */ | |
125 | u32 buf_nr; /* input */ | |
126 | u64 start_lba; | |
127 | u64 end_lba; | |
128 | u32 pid; | |
129 | }; | |
130 | ||
131 | #if defined(CONFIG_BLK_DEV_IO_TRACE) | |
132 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); | |
133 | extern void blk_trace_shutdown(request_queue_t *); | |
134 | extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); | |
135 | ||
136 | /** | |
137 | * blk_add_trace_rq - Add a trace for a request oriented action | |
138 | * @q: queue the io is for | |
139 | * @rq: the source request | |
140 | * @what: the action | |
141 | * | |
142 | * Description: | |
143 | * Records an action against a request. Will log the bio offset + size. | |
144 | * | |
145 | **/ | |
146 | static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |
147 | u32 what) | |
148 | { | |
149 | struct blk_trace *bt = q->blk_trace; | |
150 | int rw = rq->flags & 0x07; | |
151 | ||
152 | if (likely(!bt)) | |
153 | return; | |
154 | ||
155 | if (blk_pc_request(rq)) { | |
156 | what |= BLK_TC_ACT(BLK_TC_PC); | |
157 | __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); | |
158 | } else { | |
159 | what |= BLK_TC_ACT(BLK_TC_FS); | |
160 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); | |
161 | } | |
162 | } | |
163 | ||
164 | /** | |
165 | * blk_add_trace_bio - Add a trace for a bio oriented action | |
166 | * @q: queue the io is for | |
167 | * @bio: the source bio | |
168 | * @what: the action | |
169 | * | |
170 | * Description: | |
171 | * Records an action against a bio. Will log the bio offset + size. | |
172 | * | |
173 | **/ | |
174 | static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | |
175 | u32 what) | |
176 | { | |
177 | struct blk_trace *bt = q->blk_trace; | |
178 | ||
179 | if (likely(!bt)) | |
180 | return; | |
181 | ||
182 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); | |
183 | } | |
184 | ||
185 | /** | |
186 | * blk_add_trace_generic - Add a trace for a generic action | |
187 | * @q: queue the io is for | |
188 | * @bio: the source bio | |
189 | * @rw: the data direction | |
190 | * @what: the action | |
191 | * | |
192 | * Description: | |
193 | * Records a simple trace | |
194 | * | |
195 | **/ | |
196 | static inline void blk_add_trace_generic(struct request_queue *q, | |
197 | struct bio *bio, int rw, u32 what) | |
198 | { | |
199 | struct blk_trace *bt = q->blk_trace; | |
200 | ||
201 | if (likely(!bt)) | |
202 | return; | |
203 | ||
204 | if (bio) | |
205 | blk_add_trace_bio(q, bio, what); | |
206 | else | |
207 | __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); | |
208 | } | |
209 | ||
210 | /** | |
211 | * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload | |
212 | * @q: queue the io is for | |
213 | * @what: the action | |
214 | * @bio: the source bio | |
215 | * @pdu: the integer payload | |
216 | * | |
217 | * Description: | |
218 | * Adds a trace with some integer payload. This might be an unplug | |
219 | * option given as the action, with the depth at unplug time given | |
220 | * as the payload | |
221 | * | |
222 | **/ | |
223 | static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what, | |
224 | struct bio *bio, unsigned int pdu) | |
225 | { | |
226 | struct blk_trace *bt = q->blk_trace; | |
227 | u64 rpdu = cpu_to_be64(pdu); | |
228 | ||
229 | if (likely(!bt)) | |
230 | return; | |
231 | ||
232 | if (bio) | |
233 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); | |
234 | else | |
235 | __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); | |
236 | } | |
237 | ||
238 | /** | |
239 | * blk_add_trace_remap - Add a trace for a remap operation | |
240 | * @q: queue the io is for | |
241 | * @bio: the source bio | |
242 | * @dev: target device | |
243 | * @from: source sector | |
244 | * @to: target sector | |
245 | * | |
246 | * Description: | |
247 | * Device mapper or raid target sometimes need to split a bio because | |
248 | * it spans a stripe (or similar). Add a trace for that action. | |
249 | * | |
250 | **/ | |
251 | static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | |
252 | dev_t dev, sector_t from, sector_t to) | |
253 | { | |
254 | struct blk_trace *bt = q->blk_trace; | |
255 | struct blk_io_trace_remap r; | |
256 | ||
257 | if (likely(!bt)) | |
258 | return; | |
259 | ||
260 | r.device = cpu_to_be32(dev); | |
261 | r.sector = cpu_to_be64(to); | |
262 | ||
263 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); | |
264 | } | |
265 | ||
266 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | |
267 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | |
268 | #define blk_trace_shutdown(q) do { } while (0) | |
269 | #define blk_add_trace_rq(q, rq, what) do { } while (0) | |
270 | #define blk_add_trace_bio(q, rq, what) do { } while (0) | |
271 | #define blk_add_trace_generic(q, rq, rw, what) do { } while (0) | |
272 | #define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) | |
273 | #define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) | |
274 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | |
275 | ||
276 | #endif |