block: remove the request_queue argument to the block_split tracepoint
[linux-block.git] / kernel / trace / blktrace.c
CommitLineData
91c1e6ba 1// SPDX-License-Identifier: GPL-2.0
2056a782 2/*
0fe23479 3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
2056a782 4 *
2056a782 5 */
1b0b2836
LC
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
2056a782
JA
9#include <linux/kernel.h>
10#include <linux/blkdev.h>
11#include <linux/blktrace_api.h>
12#include <linux/percpu.h>
13#include <linux/init.h>
14#include <linux/mutex.h>
5a0e3ad6 15#include <linux/slab.h>
2056a782 16#include <linux/debugfs.h>
6e5fdeed 17#include <linux/export.h>
be1c6341 18#include <linux/time.h>
939b3669 19#include <linux/uaccess.h>
a404d557 20#include <linux/list.h>
ca1136c9 21#include <linux/blk-cgroup.h>
55782138 22
18fbda91
OS
23#include "../../block/blk.h"
24
55782138
LZ
25#include <trace/events/block.h>
26
2db270a8 27#include "trace_output.h"
2056a782 28
55782138
LZ
29#ifdef CONFIG_BLK_DEV_IO_TRACE
30
2056a782
JA
31static unsigned int blktrace_seq __read_mostly = 1;
32
c71a8961 33static struct trace_array *blk_tr;
5006ea73 34static bool blk_tracer_enabled __read_mostly;
c71a8961 35
a404d557
JK
36static LIST_HEAD(running_trace_list);
37static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
38
c71a8961 39/* Select an alternative, minimalistic output than the original one */
ef18012b 40#define TRACE_BLK_OPT_CLASSIC 0x1
ca1136c9 41#define TRACE_BLK_OPT_CGROUP 0x2
69fd5c39 42#define TRACE_BLK_OPT_CGNAME 0x4
c71a8961
ACM
43
44static struct tracer_opt blk_tracer_opts[] = {
45 /* Default disable the minimalistic output */
157f9c00 46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
ca1136c9
SL
47#ifdef CONFIG_BLK_CGROUP
48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
69fd5c39 49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
ca1136c9 50#endif
c71a8961
ACM
51 { }
52};
53
54static struct tracer_flags blk_tracer_flags = {
55 .val = 0,
56 .opts = blk_tracer_opts,
57};
58
5f3ea37c 59/* Global reference count of probes */
a6da0024
JA
60static DEFINE_MUTEX(blk_probe_mutex);
61static int blk_probes_ref;
5f3ea37c 62
3c289ba7 63static void blk_register_tracepoints(void);
5f3ea37c
ACM
64static void blk_unregister_tracepoints(void);
65
be1c6341
OK
66/*
67 * Send out a notify message.
68 */
a863055b 69static void trace_note(struct blk_trace *bt, pid_t pid, int action,
67c0496e 70 const void *data, size_t len, u64 cgid)
be1c6341
OK
71{
72 struct blk_io_trace *t;
18cea459 73 struct ring_buffer_event *event = NULL;
13292494 74 struct trace_buffer *buffer = NULL;
18cea459
LZ
75 int pc = 0;
76 int cpu = smp_processor_id();
77 bool blk_tracer = blk_tracer_enabled;
67c0496e 78 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
18cea459
LZ
79
80 if (blk_tracer) {
1c5eb448 81 buffer = blk_tr->array_buffer.buffer;
18cea459 82 pc = preempt_count();
e77405ad 83 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
ca1136c9 84 sizeof(*t) + len + cgid_len,
18cea459
LZ
85 0, pc);
86 if (!event)
87 return;
88 t = ring_buffer_event_data(event);
89 goto record_it;
90 }
be1c6341 91
c71a8961
ACM
92 if (!bt->rchan)
93 return;
94
ca1136c9 95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
d3d9d2a5 96 if (t) {
d3d9d2a5 97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
2997c8c4 98 t->time = ktime_to_ns(ktime_get());
18cea459 99record_it:
d3d9d2a5 100 t->device = bt->dev;
ca1136c9 101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
d3d9d2a5
JA
102 t->pid = pid;
103 t->cpu = cpu;
ca1136c9 104 t->pdu_len = len + cgid_len;
67c0496e
TH
105 if (cgid_len)
106 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
ca1136c9 107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
18cea459
LZ
108
109 if (blk_tracer)
b7f0c959 110 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
d3d9d2a5 111 }
be1c6341
OK
112}
113
2056a782
JA
114/*
115 * Send out a notify for this process, if we haven't done so since a trace
116 * started
117 */
a404d557 118static void trace_note_tsk(struct task_struct *tsk)
2056a782 119{
a404d557
JK
120 unsigned long flags;
121 struct blk_trace *bt;
122
a863055b 123 tsk->btrace_seq = blktrace_seq;
a404d557
JK
124 spin_lock_irqsave(&running_trace_lock, flags);
125 list_for_each_entry(bt, &running_trace_list, running_list) {
126 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
67c0496e 127 sizeof(tsk->comm), 0);
a404d557
JK
128 }
129 spin_unlock_irqrestore(&running_trace_lock, flags);
be1c6341 130}
2056a782 131
be1c6341
OK
132static void trace_note_time(struct blk_trace *bt)
133{
59a37f8b 134 struct timespec64 now;
be1c6341
OK
135 unsigned long flags;
136 u32 words[2];
137
59a37f8b
AB
138 /* need to check user space to see if this breaks in y2038 or y2106 */
139 ktime_get_real_ts64(&now);
140 words[0] = (u32)now.tv_sec;
be1c6341
OK
141 words[1] = now.tv_nsec;
142
143 local_irq_save(flags);
67c0496e 144 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
be1c6341 145 local_irq_restore(flags);
2056a782
JA
146}
147
35fe6d76
SL
148void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
149 const char *fmt, ...)
9d5f09a4
AB
150{
151 int n;
152 va_list args;
14a73f54 153 unsigned long flags;
64565911 154 char *buf;
9d5f09a4 155
18cea459
LZ
156 if (unlikely(bt->trace_state != Blktrace_running &&
157 !blk_tracer_enabled))
c71a8961
ACM
158 return;
159
490da40d
TM
160 /*
161 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
162 * message to the trace.
163 */
164 if (!(bt->act_mask & BLK_TC_NOTIFY))
165 return;
166
14a73f54 167 local_irq_save(flags);
d8a0349c 168 buf = this_cpu_ptr(bt->msg_data);
9d5f09a4 169 va_start(args, fmt);
64565911 170 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
9d5f09a4
AB
171 va_end(args);
172
35fe6d76
SL
173 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
174 blkcg = NULL;
175#ifdef CONFIG_BLK_CGROUP
870c153c 176 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n,
74321038 177 blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
35fe6d76 178#else
870c153c 179 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, 0);
35fe6d76 180#endif
14a73f54 181 local_irq_restore(flags);
9d5f09a4
AB
182}
183EXPORT_SYMBOL_GPL(__trace_note_message);
184
2056a782
JA
185static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
186 pid_t pid)
187{
188 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
189 return 1;
d0deef5b 190 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
2056a782
JA
191 return 1;
192 if (bt->pid && pid != bt->pid)
193 return 1;
194
195 return 0;
196}
197
198/*
199 * Data direction bit lookup
200 */
e4955c99
LZ
201static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
202 BLK_TC_ACT(BLK_TC_WRITE) };
2056a782 203
7b6d91da 204#define BLK_TC_RAHEAD BLK_TC_AHEAD
28a8f0d3 205#define BLK_TC_PREFLUSH BLK_TC_FLUSH
7b6d91da 206
35ba8f70 207/* The ilog2() calls fall out because they're constant */
7b6d91da
CH
208#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
209 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
2056a782
JA
210
211/*
212 * The worker for the various blk_add_trace*() types. Fills out a
213 * blk_io_trace structure and places it in a per-cpu subbuffer.
214 */
5f3ea37c 215static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
1b9a9ab7 216 int op, int op_flags, u32 what, int error, int pdu_len,
67c0496e 217 void *pdu_data, u64 cgid)
2056a782
JA
218{
219 struct task_struct *tsk = current;
c71a8961 220 struct ring_buffer_event *event = NULL;
13292494 221 struct trace_buffer *buffer = NULL;
2056a782 222 struct blk_io_trace *t;
0a987751 223 unsigned long flags = 0;
2056a782
JA
224 unsigned long *sequence;
225 pid_t pid;
c71a8961 226 int cpu, pc = 0;
18cea459 227 bool blk_tracer = blk_tracer_enabled;
67c0496e 228 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
2056a782 229
18cea459 230 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
2056a782
JA
231 return;
232
1b9a9ab7
MC
233 what |= ddir_act[op_is_write(op) ? WRITE : READ];
234 what |= MASK_TC_BIT(op_flags, SYNC);
235 what |= MASK_TC_BIT(op_flags, RAHEAD);
236 what |= MASK_TC_BIT(op_flags, META);
28a8f0d3 237 what |= MASK_TC_BIT(op_flags, PREFLUSH);
1b9a9ab7 238 what |= MASK_TC_BIT(op_flags, FUA);
7afafc8a 239 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
1b9a9ab7 240 what |= BLK_TC_ACT(BLK_TC_DISCARD);
3a5e02ce
MC
241 if (op == REQ_OP_FLUSH)
242 what |= BLK_TC_ACT(BLK_TC_FLUSH);
ca1136c9
SL
243 if (cgid)
244 what |= __BLK_TA_CGROUP;
2056a782
JA
245
246 pid = tsk->pid;
d0deef5b 247 if (act_log_check(bt, what, sector, pid))
2056a782 248 return;
c71a8961
ACM
249 cpu = raw_smp_processor_id();
250
18cea459 251 if (blk_tracer) {
c71a8961
ACM
252 tracing_record_cmdline(current);
253
1c5eb448 254 buffer = blk_tr->array_buffer.buffer;
51a763dd 255 pc = preempt_count();
e77405ad 256 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
ca1136c9 257 sizeof(*t) + pdu_len + cgid_len,
51a763dd 258 0, pc);
c71a8961
ACM
259 if (!event)
260 return;
51a763dd 261 t = ring_buffer_event_data(event);
c71a8961
ACM
262 goto record_it;
263 }
2056a782 264
a404d557
JK
265 if (unlikely(tsk->btrace_seq != blktrace_seq))
266 trace_note_tsk(tsk);
267
2056a782
JA
268 /*
269 * A word about the locking here - we disable interrupts to reserve
270 * some space in the relay per-cpu buffer, to prevent an irq
14a73f54 271 * from coming in and stepping on our toes.
2056a782
JA
272 */
273 local_irq_save(flags);
ca1136c9 274 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
2056a782 275 if (t) {
2056a782
JA
276 sequence = per_cpu_ptr(bt->sequence, cpu);
277
278 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
279 t->sequence = ++(*sequence);
2997c8c4 280 t->time = ktime_to_ns(ktime_get());
c71a8961 281record_it:
08a06b83 282 /*
939b3669
ACM
283 * These two are not needed in ftrace as they are in the
284 * generic trace_entry, filled by tracing_generic_entry_update,
285 * but for the trace_event->bin() synthesizer benefit we do it
286 * here too.
287 */
288 t->cpu = cpu;
289 t->pid = pid;
08a06b83 290
2056a782
JA
291 t->sector = sector;
292 t->bytes = bytes;
293 t->action = what;
2056a782 294 t->device = bt->dev;
2056a782 295 t->error = error;
ca1136c9 296 t->pdu_len = pdu_len + cgid_len;
2056a782 297
ca1136c9 298 if (cgid_len)
67c0496e 299 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
2056a782 300 if (pdu_len)
ca1136c9 301 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
c71a8961 302
18cea459 303 if (blk_tracer) {
b7f0c959 304 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
c71a8961
ACM
305 return;
306 }
2056a782
JA
307 }
308
309 local_irq_restore(flags);
310}
311
ad5dd549 312static void blk_trace_free(struct blk_trace *bt)
2056a782 313{
02c62304 314 debugfs_remove(bt->msg_file);
2056a782 315 debugfs_remove(bt->dropped_file);
f48fc4d3 316 relay_close(bt->rchan);
39cbb602 317 debugfs_remove(bt->dir);
2056a782 318 free_percpu(bt->sequence);
64565911 319 free_percpu(bt->msg_data);
2056a782 320 kfree(bt);
ad5dd549
LZ
321}
322
a6da0024
JA
323static void get_probe_ref(void)
324{
325 mutex_lock(&blk_probe_mutex);
326 if (++blk_probes_ref == 1)
327 blk_register_tracepoints();
328 mutex_unlock(&blk_probe_mutex);
329}
330
331static void put_probe_ref(void)
332{
333 mutex_lock(&blk_probe_mutex);
334 if (!--blk_probes_ref)
335 blk_unregister_tracepoints();
336 mutex_unlock(&blk_probe_mutex);
337}
338
ad5dd549
LZ
339static void blk_trace_cleanup(struct blk_trace *bt)
340{
c780e86d 341 synchronize_rcu();
ad5dd549 342 blk_trace_free(bt);
a6da0024 343 put_probe_ref();
2056a782
JA
344}
345
1f2cac10 346static int __blk_trace_remove(struct request_queue *q)
2056a782
JA
347{
348 struct blk_trace *bt;
349
c3dbe541 350 bt = rcu_replace_pointer(q->blk_trace, NULL,
85e0cbbb 351 lockdep_is_held(&q->debugfs_mutex));
2056a782
JA
352 if (!bt)
353 return -EINVAL;
354
55547204 355 if (bt->trace_state != Blktrace_running)
2056a782
JA
356 blk_trace_cleanup(bt);
357
358 return 0;
359}
1f2cac10
JA
360
361int blk_trace_remove(struct request_queue *q)
362{
363 int ret;
364
85e0cbbb 365 mutex_lock(&q->debugfs_mutex);
1f2cac10 366 ret = __blk_trace_remove(q);
85e0cbbb 367 mutex_unlock(&q->debugfs_mutex);
1f2cac10
JA
368
369 return ret;
370}
6da127ad 371EXPORT_SYMBOL_GPL(blk_trace_remove);
2056a782 372
2056a782
JA
373static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
374 size_t count, loff_t *ppos)
375{
376 struct blk_trace *bt = filp->private_data;
377 char buf[16];
378
379 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
380
381 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
382}
383
2b8693c0 384static const struct file_operations blk_dropped_fops = {
2056a782 385 .owner = THIS_MODULE,
234e3405 386 .open = simple_open,
2056a782 387 .read = blk_dropped_read,
6038f373 388 .llseek = default_llseek,
2056a782
JA
389};
390
02c62304
AB
391static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
392 size_t count, loff_t *ppos)
393{
394 char *msg;
395 struct blk_trace *bt;
396
7635b03a 397 if (count >= BLK_TN_MAX_MSG)
02c62304
AB
398 return -EINVAL;
399
16e5c1fc
AV
400 msg = memdup_user_nul(buffer, count);
401 if (IS_ERR(msg))
402 return PTR_ERR(msg);
02c62304 403
02c62304 404 bt = filp->private_data;
35fe6d76 405 __trace_note_message(bt, NULL, "%s", msg);
02c62304
AB
406 kfree(msg);
407
408 return count;
409}
410
411static const struct file_operations blk_msg_fops = {
412 .owner = THIS_MODULE,
234e3405 413 .open = simple_open,
02c62304 414 .write = blk_msg_write,
6038f373 415 .llseek = noop_llseek,
02c62304
AB
416};
417
2056a782
JA
418/*
419 * Keep track of how many times we encountered a full subbuffer, to aid
420 * the user space app in telling how many lost events there were.
421 */
422static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
423 void *prev_subbuf, size_t prev_padding)
424{
425 struct blk_trace *bt;
426
427 if (!relay_buf_full(buf))
428 return 1;
429
430 bt = buf->chan->private_data;
431 atomic_inc(&bt->dropped);
432 return 0;
433}
434
435static int blk_remove_buf_file_callback(struct dentry *dentry)
436{
437 debugfs_remove(dentry);
f48fc4d3 438
2056a782
JA
439 return 0;
440}
441
442static struct dentry *blk_create_buf_file_callback(const char *filename,
443 struct dentry *parent,
f4ae40a6 444 umode_t mode,
2056a782
JA
445 struct rchan_buf *buf,
446 int *is_global)
447{
448 return debugfs_create_file(filename, mode, parent, buf,
449 &relay_file_operations);
450}
451
452static struct rchan_callbacks blk_relay_callbacks = {
453 .subbuf_start = blk_subbuf_start_callback,
454 .create_buf_file = blk_create_buf_file_callback,
455 .remove_buf_file = blk_remove_buf_file_callback,
456};
457
9908c309
LZ
458static void blk_trace_setup_lba(struct blk_trace *bt,
459 struct block_device *bdev)
460{
29ff57c6
CH
461 if (bdev) {
462 bt->start_lba = bdev->bd_start_sect;
463 bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
9908c309
LZ
464 } else {
465 bt->start_lba = 0;
466 bt->end_lba = -1ULL;
467 }
468}
469
2056a782
JA
470/*
471 * Setup everything required to start tracing
472 */
a428d314
OS
473static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
474 struct block_device *bdev,
475 struct blk_user_trace_setup *buts)
2056a782 476{
cdea01b2 477 struct blk_trace *bt = NULL;
2056a782 478 struct dentry *dir = NULL;
ff14417c 479 int ret;
2056a782 480
85e0cbbb 481 lockdep_assert_held(&q->debugfs_mutex);
a67549c8 482
171044d4 483 if (!buts->buf_size || !buts->buf_nr)
2056a782
JA
484 return -EINVAL;
485
0497b345
JA
486 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
487 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
2056a782
JA
488
489 /*
490 * some device names have larger paths - convert the slashes
491 * to underscores for this to work as expected
492 */
ff14417c 493 strreplace(buts->name, '/', '_');
2056a782 494
1b0b2836
LC
495 /*
496 * bdev can be NULL, as with scsi-generic, this is a helpful as
497 * we can be.
498 */
c3dbe541 499 if (rcu_dereference_protected(q->blk_trace,
85e0cbbb 500 lockdep_is_held(&q->debugfs_mutex))) {
1b0b2836
LC
501 pr_warn("Concurrent blktraces are not allowed on %s\n",
502 buts->name);
503 return -EBUSY;
504 }
505
2056a782
JA
506 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
507 if (!bt)
ad5dd549 508 return -ENOMEM;
2056a782 509
ad5dd549 510 ret = -ENOMEM;
2056a782
JA
511 bt->sequence = alloc_percpu(unsigned long);
512 if (!bt->sequence)
513 goto err;
514
313e458f 515 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
64565911
JA
516 if (!bt->msg_data)
517 goto err;
518
bad8e64f 519 /*
85e0cbbb
LC
520 * When tracing the whole disk reuse the existing debugfs directory
521 * created by the block layer on init. For partitions block devices,
bad8e64f
LC
522 * and scsi-generic block devices we create a temporary new debugfs
523 * directory that will be removed once the trace ends.
524 */
fa01b1e9 525 if (bdev && !bdev_is_partition(bdev))
bad8e64f
LC
526 dir = q->debugfs_dir;
527 else
6ac93117 528 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
2056a782 529
b431ef83
LC
530 /*
531 * As blktrace relies on debugfs for its interface the debugfs directory
532 * is required, contrary to the usual mantra of not checking for debugfs
533 * files or directories.
534 */
535 if (IS_ERR_OR_NULL(dir)) {
536 pr_warn("debugfs_dir not present for %s so skipping\n",
537 buts->name);
538 ret = -ENOENT;
539 goto err;
540 }
541
6da127ad 542 bt->dev = dev;
2056a782 543 atomic_set(&bt->dropped, 0);
a404d557 544 INIT_LIST_HEAD(&bt->running_list);
2056a782
JA
545
546 ret = -EIO;
939b3669
ACM
547 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
548 &blk_dropped_fops);
2056a782 549
02c62304 550 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
02c62304 551
171044d4
AB
552 bt->rchan = relay_open("trace", dir, buts->buf_size,
553 buts->buf_nr, &blk_relay_callbacks, bt);
2056a782
JA
554 if (!bt->rchan)
555 goto err;
2056a782 556
171044d4 557 bt->act_mask = buts->act_mask;
2056a782
JA
558 if (!bt->act_mask)
559 bt->act_mask = (u16) -1;
560
9908c309 561 blk_trace_setup_lba(bt, bdev);
2056a782 562
d0deef5b
SD
563 /* overwrite with user settings */
564 if (buts->start_lba)
565 bt->start_lba = buts->start_lba;
566 if (buts->end_lba)
567 bt->end_lba = buts->end_lba;
568
171044d4 569 bt->pid = buts->pid;
2056a782
JA
570 bt->trace_state = Blktrace_setup;
571
c3dbe541 572 rcu_assign_pointer(q->blk_trace, bt);
a6da0024 573 get_probe_ref();
cbe28296 574
6ac93117 575 ret = 0;
2056a782 576err:
6ac93117
OS
577 if (ret)
578 blk_trace_free(bt);
2056a782
JA
579 return ret;
580}
171044d4 581
1f2cac10
JA
582static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
583 struct block_device *bdev, char __user *arg)
171044d4
AB
584{
585 struct blk_user_trace_setup buts;
586 int ret;
587
588 ret = copy_from_user(&buts, arg, sizeof(buts));
589 if (ret)
590 return -EFAULT;
591
d0deef5b 592 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
171044d4
AB
593 if (ret)
594 return ret;
595
9a8c28c8 596 if (copy_to_user(arg, &buts, sizeof(buts))) {
2967acbb 597 __blk_trace_remove(q);
171044d4 598 return -EFAULT;
9a8c28c8 599 }
171044d4
AB
600 return 0;
601}
1f2cac10
JA
602
603int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
604 struct block_device *bdev,
605 char __user *arg)
606{
607 int ret;
608
85e0cbbb 609 mutex_lock(&q->debugfs_mutex);
1f2cac10 610 ret = __blk_trace_setup(q, name, dev, bdev, arg);
85e0cbbb 611 mutex_unlock(&q->debugfs_mutex);
1f2cac10
JA
612
613 return ret;
614}
6da127ad 615EXPORT_SYMBOL_GPL(blk_trace_setup);
2056a782 616
62c2a7d9
AB
617#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
618static int compat_blk_trace_setup(struct request_queue *q, char *name,
619 dev_t dev, struct block_device *bdev,
620 char __user *arg)
621{
622 struct blk_user_trace_setup buts;
623 struct compat_blk_user_trace_setup cbuts;
624 int ret;
625
626 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
627 return -EFAULT;
628
629 buts = (struct blk_user_trace_setup) {
630 .act_mask = cbuts.act_mask,
631 .buf_size = cbuts.buf_size,
632 .buf_nr = cbuts.buf_nr,
633 .start_lba = cbuts.start_lba,
634 .end_lba = cbuts.end_lba,
635 .pid = cbuts.pid,
636 };
62c2a7d9
AB
637
638 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
639 if (ret)
640 return ret;
641
f8c5e944 642 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
2967acbb 643 __blk_trace_remove(q);
62c2a7d9
AB
644 return -EFAULT;
645 }
646
647 return 0;
648}
649#endif
650
1f2cac10 651static int __blk_trace_startstop(struct request_queue *q, int start)
2056a782 652{
2056a782 653 int ret;
c780e86d 654 struct blk_trace *bt;
2056a782 655
c780e86d 656 bt = rcu_dereference_protected(q->blk_trace,
85e0cbbb 657 lockdep_is_held(&q->debugfs_mutex));
939b3669 658 if (bt == NULL)
2056a782
JA
659 return -EINVAL;
660
661 /*
662 * For starting a trace, we can transition from a setup or stopped
663 * trace. For stopping a trace, the state must be running
664 */
665 ret = -EINVAL;
666 if (start) {
667 if (bt->trace_state == Blktrace_setup ||
668 bt->trace_state == Blktrace_stopped) {
669 blktrace_seq++;
670 smp_mb();
671 bt->trace_state = Blktrace_running;
a404d557
JK
672 spin_lock_irq(&running_trace_lock);
673 list_add(&bt->running_list, &running_trace_list);
674 spin_unlock_irq(&running_trace_lock);
be1c6341
OK
675
676 trace_note_time(bt);
2056a782
JA
677 ret = 0;
678 }
679 } else {
680 if (bt->trace_state == Blktrace_running) {
681 bt->trace_state = Blktrace_stopped;
a404d557
JK
682 spin_lock_irq(&running_trace_lock);
683 list_del_init(&bt->running_list);
684 spin_unlock_irq(&running_trace_lock);
2056a782
JA
685 relay_flush(bt->rchan);
686 ret = 0;
687 }
688 }
689
690 return ret;
691}
1f2cac10
JA
692
693int blk_trace_startstop(struct request_queue *q, int start)
694{
695 int ret;
696
85e0cbbb 697 mutex_lock(&q->debugfs_mutex);
1f2cac10 698 ret = __blk_trace_startstop(q, start);
85e0cbbb 699 mutex_unlock(&q->debugfs_mutex);
1f2cac10
JA
700
701 return ret;
702}
6da127ad 703EXPORT_SYMBOL_GPL(blk_trace_startstop);
2056a782 704
5acb3cc2
WL
705/*
706 * When reading or writing the blktrace sysfs files, the references to the
707 * opened sysfs or device files should prevent the underlying block device
708 * from being removed. So no further delete protection is really needed.
709 */
710
2056a782
JA
711/**
712 * blk_trace_ioctl: - handle the ioctls associated with tracing
713 * @bdev: the block device
ef18012b 714 * @cmd: the ioctl cmd
2056a782
JA
715 * @arg: the argument data, if any
716 *
717 **/
718int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
719{
165125e1 720 struct request_queue *q;
2056a782 721 int ret, start = 0;
6da127ad 722 char b[BDEVNAME_SIZE];
2056a782
JA
723
724 q = bdev_get_queue(bdev);
725 if (!q)
726 return -ENXIO;
727
85e0cbbb 728 mutex_lock(&q->debugfs_mutex);
2056a782
JA
729
730 switch (cmd) {
731 case BLKTRACESETUP:
f36f21ec 732 bdevname(bdev, b);
1f2cac10 733 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
2056a782 734 break;
62c2a7d9
AB
735#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
736 case BLKTRACESETUP32:
737 bdevname(bdev, b);
738 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
739 break;
740#endif
2056a782
JA
741 case BLKTRACESTART:
742 start = 1;
df561f66 743 fallthrough;
2056a782 744 case BLKTRACESTOP:
1f2cac10 745 ret = __blk_trace_startstop(q, start);
2056a782
JA
746 break;
747 case BLKTRACETEARDOWN:
1f2cac10 748 ret = __blk_trace_remove(q);
2056a782
JA
749 break;
750 default:
751 ret = -ENOTTY;
752 break;
753 }
754
85e0cbbb 755 mutex_unlock(&q->debugfs_mutex);
2056a782
JA
756 return ret;
757}
758
759/**
760 * blk_trace_shutdown: - stop and cleanup trace structures
761 * @q: the request queue associated with the device
762 *
763 **/
165125e1 764void blk_trace_shutdown(struct request_queue *q)
2056a782 765{
85e0cbbb 766 mutex_lock(&q->debugfs_mutex);
c780e86d 767 if (rcu_dereference_protected(q->blk_trace,
85e0cbbb 768 lockdep_is_held(&q->debugfs_mutex))) {
1f2cac10
JA
769 __blk_trace_startstop(q, 0);
770 __blk_trace_remove(q);
6c5c9341 771 }
1f2cac10 772
85e0cbbb 773 mutex_unlock(&q->debugfs_mutex);
2056a782 774}
5f3ea37c 775
ca1136c9 776#ifdef CONFIG_BLK_CGROUP
67c0496e 777static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
ca1136c9 778{
c780e86d 779 struct blk_trace *bt;
ca1136c9 780
c780e86d
JK
781 /* We don't use the 'bt' value here except as an optimization... */
782 bt = rcu_dereference_protected(q->blk_trace, 1);
ca1136c9 783 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
67c0496e 784 return 0;
ca1136c9 785
db6638d7 786 if (!bio->bi_blkg)
67c0496e 787 return 0;
74321038 788 return cgroup_id(bio_blkcg(bio)->css.cgroup);
ca1136c9
SL
789}
790#else
e75ad2cc 791static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
ca1136c9 792{
67c0496e 793 return 0;
ca1136c9
SL
794}
795#endif
796
67c0496e 797static u64
ca1136c9
SL
798blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
799{
800 if (!rq->bio)
67c0496e 801 return 0;
ca1136c9
SL
802 /* Use the first bio */
803 return blk_trace_bio_get_cgid(q, rq->bio);
804}
805
5f3ea37c
ACM
806/*
807 * blktrace probes
808 */
809
810/**
811 * blk_add_trace_rq - Add a trace for a request oriented action
5f3ea37c 812 * @rq: the source request
caf7df12 813 * @error: return status to log
af5040da 814 * @nr_bytes: number of completed bytes
5f3ea37c 815 * @what: the action
ca1136c9 816 * @cgid: the cgroup info
5f3ea37c
ACM
817 *
818 * Description:
819 * Records an action against a request. Will log the bio offset + size.
820 *
821 **/
caf7df12 822static void blk_add_trace_rq(struct request *rq, int error,
67c0496e 823 unsigned int nr_bytes, u32 what, u64 cgid)
5f3ea37c 824{
c780e86d 825 struct blk_trace *bt;
5f3ea37c 826
c780e86d
JK
827 rcu_read_lock();
828 bt = rcu_dereference(rq->q->blk_trace);
829 if (likely(!bt)) {
830 rcu_read_unlock();
5f3ea37c 831 return;
c780e86d 832 }
5f3ea37c 833
57292b58 834 if (blk_rq_is_passthrough(rq))
5f3ea37c 835 what |= BLK_TC_ACT(BLK_TC_PC);
48b77ad6 836 else
5f3ea37c 837 what |= BLK_TC_ACT(BLK_TC_FS);
48b77ad6
CH
838
839 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
ca1136c9 840 rq->cmd_flags, what, error, 0, NULL, cgid);
c780e86d 841 rcu_read_unlock();
5f3ea37c
ACM
842}
843
38516ab5
SR
844static void blk_add_trace_rq_insert(void *ignore,
845 struct request_queue *q, struct request *rq)
5f3ea37c 846{
ca1136c9
SL
847 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
848 blk_trace_request_get_cgid(q, rq));
5f3ea37c
ACM
849}
850
38516ab5
SR
851static void blk_add_trace_rq_issue(void *ignore,
852 struct request_queue *q, struct request *rq)
5f3ea37c 853{
ca1136c9
SL
854 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
855 blk_trace_request_get_cgid(q, rq));
5f3ea37c
ACM
856}
857
f3bdc62f
JK
858static void blk_add_trace_rq_merge(void *ignore,
859 struct request_queue *q, struct request *rq)
860{
861 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
862 blk_trace_request_get_cgid(q, rq));
863}
864
38516ab5
SR
865static void blk_add_trace_rq_requeue(void *ignore,
866 struct request_queue *q,
939b3669 867 struct request *rq)
5f3ea37c 868{
ca1136c9
SL
869 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
870 blk_trace_request_get_cgid(q, rq));
5f3ea37c
ACM
871}
872
caf7df12
CH
873static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
874 int error, unsigned int nr_bytes)
5f3ea37c 875{
ca1136c9
SL
876 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
877 blk_trace_request_get_cgid(rq->q, rq));
5f3ea37c
ACM
878}
879
880/**
881 * blk_add_trace_bio - Add a trace for a bio oriented action
882 * @q: queue the io is for
883 * @bio: the source bio
884 * @what: the action
797a455d 885 * @error: error, if any
5f3ea37c
ACM
886 *
887 * Description:
888 * Records an action against a bio. Will log the bio offset + size.
889 *
890 **/
891static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
1690102d 892 u32 what, int error)
5f3ea37c 893{
c780e86d 894 struct blk_trace *bt;
5f3ea37c 895
c780e86d
JK
896 rcu_read_lock();
897 bt = rcu_dereference(q->blk_trace);
898 if (likely(!bt)) {
899 rcu_read_unlock();
5f3ea37c 900 return;
c780e86d 901 }
5f3ea37c 902
4f024f37 903 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1690102d
MPS
904 bio_op(bio), bio->bi_opf, what, error, 0, NULL,
905 blk_trace_bio_get_cgid(q, bio));
c780e86d 906 rcu_read_unlock();
5f3ea37c
ACM
907}
908
e8a676d6 909static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
5f3ea37c 910{
e8a676d6 911 blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BOUNCE, 0);
5f3ea37c
ACM
912}
913
0a82a8d1 914static void blk_add_trace_bio_complete(void *ignore,
d24de76a 915 struct request_queue *q, struct bio *bio)
5f3ea37c 916{
d24de76a
CH
917 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
918 blk_status_to_errno(bio->bi_status));
5f3ea37c
ACM
919}
920
e8a676d6 921static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
5f3ea37c 922{
e8a676d6 923 blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_BACKMERGE, 0);
5f3ea37c
ACM
924}
925
e8a676d6 926static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
5f3ea37c 927{
e8a676d6 928 blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_FRONTMERGE, 0);
5f3ea37c
ACM
929}
930
e8a676d6 931static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
5f3ea37c 932{
e8a676d6 933 blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_QUEUE, 0);
5f3ea37c
ACM
934}
935
e8a676d6 936static void blk_add_trace_getrq(void *ignore, struct bio *bio)
5f3ea37c 937{
e8a676d6 938 blk_add_trace_bio(bio->bi_disk->queue, bio, BLK_TA_GETRQ, 0);
5f3ea37c
ACM
939}
940
38516ab5 941static void blk_add_trace_plug(void *ignore, struct request_queue *q)
5f3ea37c 942{
c780e86d 943 struct blk_trace *bt;
5f3ea37c 944
c780e86d
JK
945 rcu_read_lock();
946 bt = rcu_dereference(q->blk_trace);
5f3ea37c 947 if (bt)
67c0496e 948 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
c780e86d 949 rcu_read_unlock();
5f3ea37c
ACM
950}
951
49cac01e
JA
952static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
953 unsigned int depth, bool explicit)
5f3ea37c 954{
c780e86d 955 struct blk_trace *bt;
5f3ea37c 956
c780e86d
JK
957 rcu_read_lock();
958 bt = rcu_dereference(q->blk_trace);
5f3ea37c 959 if (bt) {
94b5eb28 960 __be64 rpdu = cpu_to_be64(depth);
49cac01e 961 u32 what;
5f3ea37c 962
49cac01e
JA
963 if (explicit)
964 what = BLK_TA_UNPLUG_IO;
965 else
966 what = BLK_TA_UNPLUG_TIMER;
967
67c0496e 968 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
5f3ea37c 969 }
c780e86d 970 rcu_read_unlock();
5f3ea37c
ACM
971}
972
eb6f7f7c 973static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
5f3ea37c 974{
eb6f7f7c 975 struct request_queue *q = bio->bi_disk->queue;
c780e86d 976 struct blk_trace *bt;
5f3ea37c 977
c780e86d
JK
978 rcu_read_lock();
979 bt = rcu_dereference(q->blk_trace);
5f3ea37c
ACM
980 if (bt) {
981 __be64 rpdu = cpu_to_be64(pdu);
982
4f024f37 983 __blk_add_trace(bt, bio->bi_iter.bi_sector,
1eff9d32 984 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
48bc3cd3
CK
985 BLK_TA_SPLIT,
986 blk_status_to_errno(bio->bi_status),
987 sizeof(rpdu), &rpdu,
988 blk_trace_bio_get_cgid(q, bio));
5f3ea37c 989 }
c780e86d 990 rcu_read_unlock();
5f3ea37c
ACM
991}
992
993/**
d07335e5 994 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
546cf44a 995 * @ignore: trace callback data parameter (not used)
5f3ea37c
ACM
996 * @q: queue the io is for
997 * @bio: the source bio
998 * @dev: target device
a42aaa3b 999 * @from: source sector
5f3ea37c
ACM
1000 *
1001 * Description:
1002 * Device mapper or raid target sometimes need to split a bio because
1003 * it spans a stripe (or similar). Add a trace for that action.
1004 *
1005 **/
d07335e5
MS
1006static void blk_add_trace_bio_remap(void *ignore,
1007 struct request_queue *q, struct bio *bio,
1008 dev_t dev, sector_t from)
5f3ea37c 1009{
c780e86d 1010 struct blk_trace *bt;
5f3ea37c
ACM
1011 struct blk_io_trace_remap r;
1012
c780e86d
JK
1013 rcu_read_lock();
1014 bt = rcu_dereference(q->blk_trace);
1015 if (likely(!bt)) {
1016 rcu_read_unlock();
5f3ea37c 1017 return;
c780e86d 1018 }
5f3ea37c 1019
a42aaa3b 1020 r.device_from = cpu_to_be32(dev);
74d46992 1021 r.device_to = cpu_to_be32(bio_dev(bio));
a42aaa3b 1022 r.sector_from = cpu_to_be64(from);
5f3ea37c 1023
4f024f37 1024 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
48bc3cd3
CK
1025 bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
1026 blk_status_to_errno(bio->bi_status),
ca1136c9 1027 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
c780e86d 1028 rcu_read_unlock();
5f3ea37c
ACM
1029}
1030
b0da3f0d
JN
1031/**
1032 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
546cf44a 1033 * @ignore: trace callback data parameter (not used)
b0da3f0d
JN
1034 * @q: queue the io is for
1035 * @rq: the source request
1036 * @dev: target device
1037 * @from: source sector
1038 *
1039 * Description:
1040 * Device mapper remaps request to other devices.
1041 * Add a trace for that action.
1042 *
1043 **/
38516ab5
SR
1044static void blk_add_trace_rq_remap(void *ignore,
1045 struct request_queue *q,
b0da3f0d
JN
1046 struct request *rq, dev_t dev,
1047 sector_t from)
1048{
c780e86d 1049 struct blk_trace *bt;
b0da3f0d
JN
1050 struct blk_io_trace_remap r;
1051
c780e86d
JK
1052 rcu_read_lock();
1053 bt = rcu_dereference(q->blk_trace);
1054 if (likely(!bt)) {
1055 rcu_read_unlock();
b0da3f0d 1056 return;
c780e86d 1057 }
b0da3f0d
JN
1058
1059 r.device_from = cpu_to_be32(dev);
1060 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
1061 r.sector_from = cpu_to_be64(from);
1062
1063 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
caf7df12 1064 rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
ca1136c9 1065 sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
c780e86d 1066 rcu_read_unlock();
b0da3f0d
JN
1067}
1068
5f3ea37c
ACM
1069/**
1070 * blk_add_driver_data - Add binary message with driver-specific data
1071 * @q: queue the io is for
1072 * @rq: io request
1073 * @data: driver-specific data
1074 * @len: length of driver-specific data
1075 *
1076 * Description:
1077 * Some drivers might want to write driver-specific data per request.
1078 *
1079 **/
1080void blk_add_driver_data(struct request_queue *q,
1081 struct request *rq,
1082 void *data, size_t len)
1083{
c780e86d 1084 struct blk_trace *bt;
5f3ea37c 1085
c780e86d
JK
1086 rcu_read_lock();
1087 bt = rcu_dereference(q->blk_trace);
1088 if (likely(!bt)) {
1089 rcu_read_unlock();
5f3ea37c 1090 return;
c780e86d 1091 }
5f3ea37c 1092
48b77ad6 1093 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
ca1136c9
SL
1094 BLK_TA_DRV_DATA, 0, len, data,
1095 blk_trace_request_get_cgid(q, rq));
c780e86d 1096 rcu_read_unlock();
5f3ea37c
ACM
1097}
1098EXPORT_SYMBOL_GPL(blk_add_driver_data);
1099
3c289ba7 1100static void blk_register_tracepoints(void)
5f3ea37c
ACM
1101{
1102 int ret;
1103
38516ab5 1104 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
5f3ea37c 1105 WARN_ON(ret);
38516ab5 1106 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
5f3ea37c 1107 WARN_ON(ret);
f3bdc62f
JK
1108 ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1109 WARN_ON(ret);
38516ab5 1110 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
5f3ea37c 1111 WARN_ON(ret);
38516ab5 1112 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
5f3ea37c 1113 WARN_ON(ret);
38516ab5 1114 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
5f3ea37c 1115 WARN_ON(ret);
38516ab5 1116 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
5f3ea37c 1117 WARN_ON(ret);
38516ab5 1118 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
5f3ea37c 1119 WARN_ON(ret);
38516ab5 1120 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
5f3ea37c 1121 WARN_ON(ret);
38516ab5 1122 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
5f3ea37c 1123 WARN_ON(ret);
38516ab5 1124 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
5f3ea37c 1125 WARN_ON(ret);
38516ab5 1126 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
5f3ea37c 1127 WARN_ON(ret);
49cac01e 1128 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
5f3ea37c 1129 WARN_ON(ret);
38516ab5 1130 ret = register_trace_block_split(blk_add_trace_split, NULL);
5f3ea37c 1131 WARN_ON(ret);
d07335e5 1132 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
5f3ea37c 1133 WARN_ON(ret);
38516ab5 1134 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
b0da3f0d 1135 WARN_ON(ret);
5f3ea37c
ACM
1136}
1137
1138static void blk_unregister_tracepoints(void)
1139{
38516ab5 1140 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
d07335e5 1141 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
38516ab5 1142 unregister_trace_block_split(blk_add_trace_split, NULL);
49cac01e 1143 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
38516ab5 1144 unregister_trace_block_plug(blk_add_trace_plug, NULL);
38516ab5
SR
1145 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1146 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1147 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1148 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1149 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1150 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1151 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1152 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
f3bdc62f 1153 unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
38516ab5
SR
1154 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1155 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
5f3ea37c
ACM
1156
1157 tracepoint_synchronize_unregister();
1158}
c71a8961
ACM
1159
1160/*
1161 * struct blk_io_tracer formatting routines
1162 */
1163
1164static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1165{
157f9c00 1166 int i = 0;
65796348 1167 int tc = t->action >> BLK_TC_SHIFT;
157f9c00 1168
ca1136c9 1169 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
18cea459
LZ
1170 rwbs[i++] = 'N';
1171 goto out;
1172 }
1173
c09c47ca
NK
1174 if (tc & BLK_TC_FLUSH)
1175 rwbs[i++] = 'F';
1176
65796348 1177 if (tc & BLK_TC_DISCARD)
157f9c00 1178 rwbs[i++] = 'D';
65796348 1179 else if (tc & BLK_TC_WRITE)
157f9c00
ACM
1180 rwbs[i++] = 'W';
1181 else if (t->bytes)
1182 rwbs[i++] = 'R';
1183 else
1184 rwbs[i++] = 'N';
1185
c09c47ca
NK
1186 if (tc & BLK_TC_FUA)
1187 rwbs[i++] = 'F';
65796348 1188 if (tc & BLK_TC_AHEAD)
157f9c00 1189 rwbs[i++] = 'A';
65796348 1190 if (tc & BLK_TC_SYNC)
157f9c00 1191 rwbs[i++] = 'S';
65796348 1192 if (tc & BLK_TC_META)
157f9c00 1193 rwbs[i++] = 'M';
18cea459 1194out:
157f9c00 1195 rwbs[i] = '\0';
c71a8961
ACM
1196}
1197
1198static inline
1199const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1200{
1201 return (const struct blk_io_trace *)ent;
1202}
1203
ca1136c9
SL
1204static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1205{
67c0496e 1206 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
ca1136c9
SL
1207}
1208
67c0496e 1209static inline u64 t_cgid(const struct trace_entry *ent)
c71a8961 1210{
67c0496e 1211 return *(u64 *)(te_blk_io_trace(ent) + 1);
ca1136c9
SL
1212}
1213
1214static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1215{
67c0496e 1216 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
c71a8961
ACM
1217}
1218
66de7792
LZ
1219static inline u32 t_action(const struct trace_entry *ent)
1220{
1221 return te_blk_io_trace(ent)->action;
1222}
1223
1224static inline u32 t_bytes(const struct trace_entry *ent)
1225{
1226 return te_blk_io_trace(ent)->bytes;
1227}
1228
c71a8961
ACM
1229static inline u32 t_sec(const struct trace_entry *ent)
1230{
1231 return te_blk_io_trace(ent)->bytes >> 9;
1232}
1233
1234static inline unsigned long long t_sector(const struct trace_entry *ent)
1235{
1236 return te_blk_io_trace(ent)->sector;
1237}
1238
1239static inline __u16 t_error(const struct trace_entry *ent)
1240{
e0dc81be 1241 return te_blk_io_trace(ent)->error;
c71a8961
ACM
1242}
1243
ca1136c9 1244static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
c71a8961 1245{
71df3fd8 1246 const __be64 *val = pdu_start(ent, has_cg);
c71a8961
ACM
1247 return be64_to_cpu(*val);
1248}
1249
ca1136c9
SL
1250typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1251 bool has_cg);
b6a4b0c3 1252
ca1136c9
SL
1253static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1254 bool has_cg)
c71a8961 1255{
c09c47ca 1256 char rwbs[RWBS_LEN];
35ac51bf
LZ
1257 unsigned long long ts = iter->ts;
1258 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
c71a8961 1259 unsigned secs = (unsigned long)ts;
b6a4b0c3 1260 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
c71a8961
ACM
1261
1262 fill_rwbs(rwbs, t);
1263
f4a1d08c
SRRH
1264 trace_seq_printf(&iter->seq,
1265 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1266 MAJOR(t->device), MINOR(t->device), iter->cpu,
1267 secs, nsec_rem, iter->ent->pid, act, rwbs);
c71a8961
ACM
1268}
1269
ca1136c9
SL
1270static void blk_log_action(struct trace_iterator *iter, const char *act,
1271 bool has_cg)
c71a8961 1272{
c09c47ca 1273 char rwbs[RWBS_LEN];
b6a4b0c3
LZ
1274 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1275
c71a8961 1276 fill_rwbs(rwbs, t);
ca1136c9 1277 if (has_cg) {
67c0496e 1278 u64 id = t_cgid(iter->ent);
ca1136c9 1279
69fd5c39
SL
1280 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1281 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1282
1283 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1284 sizeof(blkcg_name_buf));
1285 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1286 MAJOR(t->device), MINOR(t->device),
1287 blkcg_name_buf, act, rwbs);
40430452
TH
1288 } else {
1289 /*
1290 * The cgid portion used to be "INO,GEN". Userland
1291 * builds a FILEID_INO32_GEN fid out of them and
1292 * opens the cgroup using open_by_handle_at(2).
1293 * While 32bit ino setups are still the same, 64bit
1294 * ones now use the 64bit ino as the whole ID and
1295 * no longer use generation.
1296 *
1297 * Regarldess of the content, always output
1298 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1299 * be mapped back to @id on both 64 and 32bit ino
1300 * setups. See __kernfs_fh_to_dentry().
1301 */
69fd5c39 1302 trace_seq_printf(&iter->seq,
40430452 1303 "%3d,%-3d %llx,%-llx %2s %3s ",
ca1136c9 1304 MAJOR(t->device), MINOR(t->device),
40430452
TH
1305 id & U32_MAX, id >> 32, act, rwbs);
1306 }
ca1136c9
SL
1307 } else
1308 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1309 MAJOR(t->device), MINOR(t->device), act, rwbs);
c71a8961
ACM
1310}
1311
ca1136c9
SL
1312static void blk_log_dump_pdu(struct trace_seq *s,
1313 const struct trace_entry *ent, bool has_cg)
66de7792 1314{
04986257 1315 const unsigned char *pdu_buf;
66de7792 1316 int pdu_len;
f4a1d08c 1317 int i, end;
66de7792 1318
ca1136c9
SL
1319 pdu_buf = pdu_start(ent, has_cg);
1320 pdu_len = pdu_real_len(ent, has_cg);
66de7792
LZ
1321
1322 if (!pdu_len)
f4a1d08c 1323 return;
66de7792
LZ
1324
1325 /* find the last zero that needs to be printed */
1326 for (end = pdu_len - 1; end >= 0; end--)
1327 if (pdu_buf[end])
1328 break;
1329 end++;
1330
f4a1d08c 1331 trace_seq_putc(s, '(');
66de7792
LZ
1332
1333 for (i = 0; i < pdu_len; i++) {
1334
f4a1d08c
SRRH
1335 trace_seq_printf(s, "%s%02x",
1336 i == 0 ? "" : " ", pdu_buf[i]);
66de7792
LZ
1337
1338 /*
1339 * stop when the rest is just zeroes and indicate so
1340 * with a ".." appended
1341 */
f4a1d08c
SRRH
1342 if (i == end && end != pdu_len - 1) {
1343 trace_seq_puts(s, " ..) ");
1344 return;
1345 }
66de7792
LZ
1346 }
1347
f4a1d08c 1348 trace_seq_puts(s, ") ");
66de7792
LZ
1349}
1350
ca1136c9 1351static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1352{
4ca53085
SR
1353 char cmd[TASK_COMM_LEN];
1354
1355 trace_find_cmdline(ent->pid, cmd);
c71a8961 1356
66de7792 1357 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
f4a1d08c 1358 trace_seq_printf(s, "%u ", t_bytes(ent));
ca1136c9 1359 blk_log_dump_pdu(s, ent, has_cg);
f4a1d08c 1360 trace_seq_printf(s, "[%s]\n", cmd);
66de7792
LZ
1361 } else {
1362 if (t_sec(ent))
f4a1d08c 1363 trace_seq_printf(s, "%llu + %u [%s]\n",
66de7792 1364 t_sector(ent), t_sec(ent), cmd);
f4a1d08c
SRRH
1365 else
1366 trace_seq_printf(s, "[%s]\n", cmd);
66de7792 1367 }
c71a8961
ACM
1368}
1369
f4a1d08c 1370static void blk_log_with_error(struct trace_seq *s,
ca1136c9 1371 const struct trace_entry *ent, bool has_cg)
c71a8961 1372{
66de7792 1373 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
ca1136c9 1374 blk_log_dump_pdu(s, ent, has_cg);
f4a1d08c 1375 trace_seq_printf(s, "[%d]\n", t_error(ent));
66de7792
LZ
1376 } else {
1377 if (t_sec(ent))
f4a1d08c
SRRH
1378 trace_seq_printf(s, "%llu + %u [%d]\n",
1379 t_sector(ent),
1380 t_sec(ent), t_error(ent));
1381 else
1382 trace_seq_printf(s, "%llu [%d]\n",
1383 t_sector(ent), t_error(ent));
66de7792 1384 }
c71a8961
ACM
1385}
1386
ca1136c9 1387static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1388{
5aec598c 1389 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
c71a8961 1390
f4a1d08c
SRRH
1391 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1392 t_sector(ent), t_sec(ent),
5aec598c
CK
1393 MAJOR(be32_to_cpu(__r->device_from)),
1394 MINOR(be32_to_cpu(__r->device_from)),
1395 be64_to_cpu(__r->sector_from));
c71a8961
ACM
1396}
1397
ca1136c9 1398static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1399{
4ca53085
SR
1400 char cmd[TASK_COMM_LEN];
1401
1402 trace_find_cmdline(ent->pid, cmd);
1403
f4a1d08c 1404 trace_seq_printf(s, "[%s]\n", cmd);
c71a8961
ACM
1405}
1406
ca1136c9 1407static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1408{
4ca53085
SR
1409 char cmd[TASK_COMM_LEN];
1410
1411 trace_find_cmdline(ent->pid, cmd);
1412
ca1136c9 1413 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
c71a8961
ACM
1414}
1415
ca1136c9 1416static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1417{
4ca53085
SR
1418 char cmd[TASK_COMM_LEN];
1419
1420 trace_find_cmdline(ent->pid, cmd);
1421
f4a1d08c 1422 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
ca1136c9 1423 get_pdu_int(ent, has_cg), cmd);
c71a8961
ACM
1424}
1425
ca1136c9
SL
1426static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1427 bool has_cg)
18cea459 1428{
18cea459 1429
ca1136c9
SL
1430 trace_seq_putmem(s, pdu_start(ent, has_cg),
1431 pdu_real_len(ent, has_cg));
f4a1d08c 1432 trace_seq_putc(s, '\n');
18cea459
LZ
1433}
1434
c71a8961
ACM
1435/*
1436 * struct tracer operations
1437 */
1438
1439static void blk_tracer_print_header(struct seq_file *m)
1440{
1441 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1442 return;
1443 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1444 "# | | | | | |\n");
1445}
1446
1447static void blk_tracer_start(struct trace_array *tr)
1448{
ad5dd549 1449 blk_tracer_enabled = true;
c71a8961
ACM
1450}
1451
1452static int blk_tracer_init(struct trace_array *tr)
1453{
1454 blk_tr = tr;
1455 blk_tracer_start(tr);
c71a8961
ACM
1456 return 0;
1457}
1458
1459static void blk_tracer_stop(struct trace_array *tr)
1460{
ad5dd549 1461 blk_tracer_enabled = false;
c71a8961
ACM
1462}
1463
1464static void blk_tracer_reset(struct trace_array *tr)
1465{
c71a8961
ACM
1466 blk_tracer_stop(tr);
1467}
1468
e4955c99 1469static const struct {
c71a8961 1470 const char *act[2];
ca1136c9
SL
1471 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1472 bool has_cg);
e4955c99 1473} what2act[] = {
ef18012b 1474 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
c71a8961
ACM
1475 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1476 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1477 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1478 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1479 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1480 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1481 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1482 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1483 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
49cac01e 1484 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
c71a8961
ACM
1485 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1486 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1487 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1488 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1489};
1490
b6a4b0c3
LZ
1491static enum print_line_t print_one_line(struct trace_iterator *iter,
1492 bool classic)
c71a8961 1493{
983f938a 1494 struct trace_array *tr = iter->tr;
2c9b238e 1495 struct trace_seq *s = &iter->seq;
b6a4b0c3
LZ
1496 const struct blk_io_trace *t;
1497 u16 what;
b6a4b0c3
LZ
1498 bool long_act;
1499 blk_log_action_t *log_action;
ca1136c9 1500 bool has_cg;
c71a8961 1501
b6a4b0c3 1502 t = te_blk_io_trace(iter->ent);
ca1136c9 1503 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
983f938a 1504 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
b6a4b0c3 1505 log_action = classic ? &blk_log_action_classic : &blk_log_action;
ca1136c9 1506 has_cg = t->action & __BLK_TA_CGROUP;
08a06b83 1507
ca1136c9
SL
1508 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1509 log_action(iter, long_act ? "message" : "m", has_cg);
1510 blk_log_msg(s, iter->ent, has_cg);
b7d7641e 1511 return trace_handle_return(s);
18cea459
LZ
1512 }
1513
eb08f8eb 1514 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
f4a1d08c 1515 trace_seq_printf(s, "Unknown action %x\n", what);
c71a8961 1516 else {
ca1136c9
SL
1517 log_action(iter, what2act[what].act[long_act], has_cg);
1518 what2act[what].print(s, iter->ent, has_cg);
c71a8961 1519 }
f4a1d08c
SRRH
1520
1521 return trace_handle_return(s);
c71a8961
ACM
1522}
1523
b6a4b0c3 1524static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
a9a57763 1525 int flags, struct trace_event *event)
b6a4b0c3 1526{
b6a4b0c3
LZ
1527 return print_one_line(iter, false);
1528}
1529
f4a1d08c 1530static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
08a06b83
ACM
1531{
1532 struct trace_seq *s = &iter->seq;
1533 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1534 const int offset = offsetof(struct blk_io_trace, sector);
1535 struct blk_io_trace old = {
1536 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
6c051ce0 1537 .time = iter->ts,
08a06b83
ACM
1538 };
1539
f4a1d08c
SRRH
1540 trace_seq_putmem(s, &old, offset);
1541 trace_seq_putmem(s, &t->sector,
1542 sizeof(old) - offset + t->pdu_len);
08a06b83
ACM
1543}
1544
ae7462b4 1545static enum print_line_t
a9a57763
SR
1546blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1547 struct trace_event *event)
08a06b83 1548{
f4a1d08c
SRRH
1549 blk_trace_synthesize_old_trace(iter);
1550
1551 return trace_handle_return(&iter->seq);
08a06b83
ACM
1552}
1553
c71a8961
ACM
1554static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1555{
c71a8961
ACM
1556 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1557 return TRACE_TYPE_UNHANDLED;
1558
b6a4b0c3 1559 return print_one_line(iter, true);
c71a8961
ACM
1560}
1561
8c1a49ae
SRRH
1562static int
1563blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
f3948f88
LZ
1564{
1565 /* don't output context-info for blk_classic output */
1566 if (bit == TRACE_BLK_OPT_CLASSIC) {
1567 if (set)
983f938a 1568 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
f3948f88 1569 else
983f938a 1570 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
f3948f88
LZ
1571 }
1572 return 0;
1573}
1574
c71a8961
ACM
1575static struct tracer blk_tracer __read_mostly = {
1576 .name = "blk",
1577 .init = blk_tracer_init,
1578 .reset = blk_tracer_reset,
1579 .start = blk_tracer_start,
1580 .stop = blk_tracer_stop,
1581 .print_header = blk_tracer_print_header,
1582 .print_line = blk_tracer_print_line,
1583 .flags = &blk_tracer_flags,
f3948f88 1584 .set_flag = blk_tracer_set_flag,
c71a8961
ACM
1585};
1586
a9a57763 1587static struct trace_event_functions trace_blk_event_funcs = {
c71a8961 1588 .trace = blk_trace_event_print,
08a06b83 1589 .binary = blk_trace_event_print_binary,
c71a8961
ACM
1590};
1591
a9a57763
SR
1592static struct trace_event trace_blk_event = {
1593 .type = TRACE_BLK,
1594 .funcs = &trace_blk_event_funcs,
1595};
1596
c71a8961
ACM
1597static int __init init_blk_tracer(void)
1598{
9023c930 1599 if (!register_trace_event(&trace_blk_event)) {
a395d6a7 1600 pr_warn("Warning: could not register block events\n");
c71a8961
ACM
1601 return 1;
1602 }
1603
1604 if (register_tracer(&blk_tracer) != 0) {
a395d6a7 1605 pr_warn("Warning: could not register the block tracer\n");
9023c930 1606 unregister_trace_event(&trace_blk_event);
c71a8961
ACM
1607 return 1;
1608 }
1609
1610 return 0;
1611}
1612
1613device_initcall(init_blk_tracer);
1614
1615static int blk_trace_remove_queue(struct request_queue *q)
1616{
1617 struct blk_trace *bt;
1618
c3dbe541 1619 bt = rcu_replace_pointer(q->blk_trace, NULL,
85e0cbbb 1620 lockdep_is_held(&q->debugfs_mutex));
c71a8961
ACM
1621 if (bt == NULL)
1622 return -EINVAL;
1623
a6da0024 1624 put_probe_ref();
c780e86d 1625 synchronize_rcu();
ad5dd549 1626 blk_trace_free(bt);
c71a8961
ACM
1627 return 0;
1628}
1629
1630/*
1631 * Setup everything required to start tracing
1632 */
9908c309
LZ
1633static int blk_trace_setup_queue(struct request_queue *q,
1634 struct block_device *bdev)
c71a8961 1635{
cdea01b2 1636 struct blk_trace *bt = NULL;
18cea459 1637 int ret = -ENOMEM;
c71a8961 1638
c71a8961
ACM
1639 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1640 if (!bt)
15152e44 1641 return -ENOMEM;
c71a8961 1642
18cea459
LZ
1643 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1644 if (!bt->msg_data)
1645 goto free_bt;
1646
9908c309 1647 bt->dev = bdev->bd_dev;
c71a8961 1648 bt->act_mask = (u16)-1;
9908c309
LZ
1649
1650 blk_trace_setup_lba(bt, bdev);
c71a8961 1651
c3dbe541 1652 rcu_assign_pointer(q->blk_trace, bt);
a6da0024 1653 get_probe_ref();
c71a8961 1654 return 0;
18cea459
LZ
1655
1656free_bt:
1657 blk_trace_free(bt);
1658 return ret;
c71a8961
ACM
1659}
1660
1661/*
1662 * sysfs interface to enable and configure tracing
1663 */
1664
c71a8961
ACM
1665static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1666 struct device_attribute *attr,
1667 char *buf);
1668static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1669 struct device_attribute *attr,
1670 const char *buf, size_t count);
1671#define BLK_TRACE_DEVICE_ATTR(_name) \
1672 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1673 sysfs_blk_trace_attr_show, \
1674 sysfs_blk_trace_attr_store)
1675
cd649b8b 1676static BLK_TRACE_DEVICE_ATTR(enable);
c71a8961
ACM
1677static BLK_TRACE_DEVICE_ATTR(act_mask);
1678static BLK_TRACE_DEVICE_ATTR(pid);
1679static BLK_TRACE_DEVICE_ATTR(start_lba);
1680static BLK_TRACE_DEVICE_ATTR(end_lba);
1681
1682static struct attribute *blk_trace_attrs[] = {
1683 &dev_attr_enable.attr,
1684 &dev_attr_act_mask.attr,
1685 &dev_attr_pid.attr,
1686 &dev_attr_start_lba.attr,
1687 &dev_attr_end_lba.attr,
1688 NULL
1689};
1690
1691struct attribute_group blk_trace_attr_group = {
1692 .name = "trace",
1693 .attrs = blk_trace_attrs,
1694};
1695
09341997
LZ
1696static const struct {
1697 int mask;
1698 const char *str;
1699} mask_maps[] = {
1700 { BLK_TC_READ, "read" },
1701 { BLK_TC_WRITE, "write" },
c09c47ca 1702 { BLK_TC_FLUSH, "flush" },
09341997
LZ
1703 { BLK_TC_SYNC, "sync" },
1704 { BLK_TC_QUEUE, "queue" },
1705 { BLK_TC_REQUEUE, "requeue" },
1706 { BLK_TC_ISSUE, "issue" },
1707 { BLK_TC_COMPLETE, "complete" },
1708 { BLK_TC_FS, "fs" },
1709 { BLK_TC_PC, "pc" },
8d1547e0 1710 { BLK_TC_NOTIFY, "notify" },
09341997
LZ
1711 { BLK_TC_AHEAD, "ahead" },
1712 { BLK_TC_META, "meta" },
1713 { BLK_TC_DISCARD, "discard" },
1714 { BLK_TC_DRV_DATA, "drv_data" },
c09c47ca 1715 { BLK_TC_FUA, "fua" },
09341997
LZ
1716};
1717
1718static int blk_trace_str2mask(const char *str)
c71a8961 1719{
09341997 1720 int i;
c71a8961 1721 int mask = 0;
9eb85125 1722 char *buf, *s, *token;
c71a8961 1723
9eb85125
LZ
1724 buf = kstrdup(str, GFP_KERNEL);
1725 if (buf == NULL)
c71a8961 1726 return -ENOMEM;
9eb85125 1727 s = strstrip(buf);
c71a8961
ACM
1728
1729 while (1) {
09341997
LZ
1730 token = strsep(&s, ",");
1731 if (token == NULL)
c71a8961
ACM
1732 break;
1733
09341997
LZ
1734 if (*token == '\0')
1735 continue;
1736
1737 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1738 if (strcasecmp(token, mask_maps[i].str) == 0) {
1739 mask |= mask_maps[i].mask;
1740 break;
1741 }
1742 }
1743 if (i == ARRAY_SIZE(mask_maps)) {
1744 mask = -EINVAL;
1745 break;
1746 }
c71a8961 1747 }
9eb85125 1748 kfree(buf);
c71a8961
ACM
1749
1750 return mask;
1751}
1752
09341997
LZ
1753static ssize_t blk_trace_mask2str(char *buf, int mask)
1754{
1755 int i;
1756 char *p = buf;
1757
1758 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1759 if (mask & mask_maps[i].mask) {
1760 p += sprintf(p, "%s%s",
1761 (p == buf) ? "" : ",", mask_maps[i].str);
1762 }
1763 }
1764 *p++ = '\n';
1765
1766 return p - buf;
1767}
1768
c71a8961
ACM
1769static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1770 struct device_attribute *attr,
1771 char *buf)
1772{
0d02129e
CH
1773 struct block_device *bdev = dev_to_bdev(dev);
1774 struct request_queue *q = bdev_get_queue(bdev);
c780e86d 1775 struct blk_trace *bt;
c71a8961
ACM
1776 ssize_t ret = -ENXIO;
1777
85e0cbbb 1778 mutex_lock(&q->debugfs_mutex);
cd649b8b 1779
c780e86d 1780 bt = rcu_dereference_protected(q->blk_trace,
85e0cbbb 1781 lockdep_is_held(&q->debugfs_mutex));
cd649b8b 1782 if (attr == &dev_attr_enable) {
c780e86d 1783 ret = sprintf(buf, "%u\n", !!bt);
cd649b8b
LZ
1784 goto out_unlock_bdev;
1785 }
1786
c780e86d 1787 if (bt == NULL)
c71a8961
ACM
1788 ret = sprintf(buf, "disabled\n");
1789 else if (attr == &dev_attr_act_mask)
c780e86d 1790 ret = blk_trace_mask2str(buf, bt->act_mask);
c71a8961 1791 else if (attr == &dev_attr_pid)
c780e86d 1792 ret = sprintf(buf, "%u\n", bt->pid);
c71a8961 1793 else if (attr == &dev_attr_start_lba)
c780e86d 1794 ret = sprintf(buf, "%llu\n", bt->start_lba);
c71a8961 1795 else if (attr == &dev_attr_end_lba)
c780e86d 1796 ret = sprintf(buf, "%llu\n", bt->end_lba);
cd649b8b
LZ
1797
1798out_unlock_bdev:
85e0cbbb 1799 mutex_unlock(&q->debugfs_mutex);
c71a8961
ACM
1800 return ret;
1801}
1802
1803static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1804 struct device_attribute *attr,
1805 const char *buf, size_t count)
1806{
0d02129e
CH
1807 struct block_device *bdev = dev_to_bdev(dev);
1808 struct request_queue *q = bdev_get_queue(bdev);
c780e86d 1809 struct blk_trace *bt;
c71a8961 1810 u64 value;
09341997 1811 ssize_t ret = -EINVAL;
c71a8961
ACM
1812
1813 if (count == 0)
1814 goto out;
1815
1816 if (attr == &dev_attr_act_mask) {
5f339453 1817 if (kstrtoull(buf, 0, &value)) {
c71a8961 1818 /* Assume it is a list of trace category names */
09341997
LZ
1819 ret = blk_trace_str2mask(buf);
1820 if (ret < 0)
c71a8961 1821 goto out;
09341997 1822 value = ret;
c71a8961 1823 }
0d02129e
CH
1824 } else {
1825 if (kstrtoull(buf, 0, &value))
1826 goto out;
1827 }
c71a8961 1828
85e0cbbb 1829 mutex_lock(&q->debugfs_mutex);
cd649b8b 1830
c780e86d 1831 bt = rcu_dereference_protected(q->blk_trace,
85e0cbbb 1832 lockdep_is_held(&q->debugfs_mutex));
cd649b8b 1833 if (attr == &dev_attr_enable) {
c780e86d 1834 if (!!value == !!bt) {
757d9140
SRV
1835 ret = 0;
1836 goto out_unlock_bdev;
1837 }
cd649b8b 1838 if (value)
9908c309 1839 ret = blk_trace_setup_queue(q, bdev);
cd649b8b
LZ
1840 else
1841 ret = blk_trace_remove_queue(q);
1842 goto out_unlock_bdev;
1843 }
1844
c71a8961 1845 ret = 0;
153031a3 1846 if (bt == NULL) {
9908c309 1847 ret = blk_trace_setup_queue(q, bdev);
153031a3 1848 bt = rcu_dereference_protected(q->blk_trace,
85e0cbbb 1849 lockdep_is_held(&q->debugfs_mutex));
153031a3 1850 }
c71a8961
ACM
1851
1852 if (ret == 0) {
1853 if (attr == &dev_attr_act_mask)
c780e86d 1854 bt->act_mask = value;
c71a8961 1855 else if (attr == &dev_attr_pid)
c780e86d 1856 bt->pid = value;
c71a8961 1857 else if (attr == &dev_attr_start_lba)
c780e86d 1858 bt->start_lba = value;
c71a8961 1859 else if (attr == &dev_attr_end_lba)
c780e86d 1860 bt->end_lba = value;
c71a8961 1861 }
cd649b8b
LZ
1862
1863out_unlock_bdev:
85e0cbbb 1864 mutex_unlock(&q->debugfs_mutex);
c71a8961 1865out:
cd649b8b 1866 return ret ? ret : count;
c71a8961 1867}
cd649b8b 1868
1d54ad6d
LZ
1869int blk_trace_init_sysfs(struct device *dev)
1870{
1871 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1872}
1873
48c0d4d4
ZK
1874void blk_trace_remove_sysfs(struct device *dev)
1875{
1876 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1877}
1878
55782138
LZ
1879#endif /* CONFIG_BLK_DEV_IO_TRACE */
1880
1881#ifdef CONFIG_EVENT_TRACING
1882
ef295ecf 1883void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
55782138
LZ
1884{
1885 int i = 0;
1886
ef295ecf 1887 if (op & REQ_PREFLUSH)
c09c47ca
NK
1888 rwbs[i++] = 'F';
1889
ef295ecf 1890 switch (op & REQ_OP_MASK) {
1b9a9ab7
MC
1891 case REQ_OP_WRITE:
1892 case REQ_OP_WRITE_SAME:
55782138 1893 rwbs[i++] = 'W';
1b9a9ab7
MC
1894 break;
1895 case REQ_OP_DISCARD:
55782138 1896 rwbs[i++] = 'D';
1b9a9ab7 1897 break;
288dab8a
CH
1898 case REQ_OP_SECURE_ERASE:
1899 rwbs[i++] = 'D';
1900 rwbs[i++] = 'E';
1901 break;
3a5e02ce
MC
1902 case REQ_OP_FLUSH:
1903 rwbs[i++] = 'F';
1904 break;
1b9a9ab7 1905 case REQ_OP_READ:
55782138 1906 rwbs[i++] = 'R';
1b9a9ab7
MC
1907 break;
1908 default:
55782138 1909 rwbs[i++] = 'N';
1b9a9ab7 1910 }
55782138 1911
ef295ecf 1912 if (op & REQ_FUA)
c09c47ca 1913 rwbs[i++] = 'F';
ef295ecf 1914 if (op & REQ_RAHEAD)
55782138 1915 rwbs[i++] = 'A';
ef295ecf 1916 if (op & REQ_SYNC)
55782138 1917 rwbs[i++] = 'S';
ef295ecf 1918 if (op & REQ_META)
55782138
LZ
1919 rwbs[i++] = 'M';
1920
1921 rwbs[i] = '\0';
1922}
9ca8f8e5 1923EXPORT_SYMBOL_GPL(blk_fill_rwbs);
55782138 1924
55782138
LZ
1925#endif /* CONFIG_EVENT_TRACING */
1926