Merge tag 'printk-for-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/printk...
[linux-block.git] / kernel / trace / blktrace.c
CommitLineData
91c1e6ba 1// SPDX-License-Identifier: GPL-2.0
2056a782 2/*
0fe23479 3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
2056a782 4 *
2056a782 5 */
1b0b2836
LC
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
2056a782
JA
9#include <linux/kernel.h>
10#include <linux/blkdev.h>
11#include <linux/blktrace_api.h>
12#include <linux/percpu.h>
13#include <linux/init.h>
14#include <linux/mutex.h>
5a0e3ad6 15#include <linux/slab.h>
2056a782 16#include <linux/debugfs.h>
6e5fdeed 17#include <linux/export.h>
be1c6341 18#include <linux/time.h>
939b3669 19#include <linux/uaccess.h>
a404d557 20#include <linux/list.h>
ca1136c9 21#include <linux/blk-cgroup.h>
55782138 22
18fbda91
OS
23#include "../../block/blk.h"
24
55782138
LZ
25#include <trace/events/block.h>
26
2db270a8 27#include "trace_output.h"
2056a782 28
55782138
LZ
29#ifdef CONFIG_BLK_DEV_IO_TRACE
30
2056a782
JA
31static unsigned int blktrace_seq __read_mostly = 1;
32
c71a8961 33static struct trace_array *blk_tr;
5006ea73 34static bool blk_tracer_enabled __read_mostly;
c71a8961 35
a404d557 36static LIST_HEAD(running_trace_list);
361c81db 37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
a404d557 38
c71a8961 39/* Select an alternative, minimalistic output than the original one */
ef18012b 40#define TRACE_BLK_OPT_CLASSIC 0x1
ca1136c9 41#define TRACE_BLK_OPT_CGROUP 0x2
69fd5c39 42#define TRACE_BLK_OPT_CGNAME 0x4
c71a8961
ACM
43
44static struct tracer_opt blk_tracer_opts[] = {
45 /* Default disable the minimalistic output */
157f9c00 46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
ca1136c9
SL
47#ifdef CONFIG_BLK_CGROUP
48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
69fd5c39 49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
ca1136c9 50#endif
c71a8961
ACM
51 { }
52};
53
54static struct tracer_flags blk_tracer_flags = {
55 .val = 0,
56 .opts = blk_tracer_opts,
57};
58
5f3ea37c 59/* Global reference count of probes */
a6da0024
JA
60static DEFINE_MUTEX(blk_probe_mutex);
61static int blk_probes_ref;
5f3ea37c 62
3c289ba7 63static void blk_register_tracepoints(void);
5f3ea37c
ACM
64static void blk_unregister_tracepoints(void);
65
be1c6341
OK
66/*
67 * Send out a notify message.
68 */
a863055b 69static void trace_note(struct blk_trace *bt, pid_t pid, int action,
67c0496e 70 const void *data, size_t len, u64 cgid)
be1c6341
OK
71{
72 struct blk_io_trace *t;
18cea459 73 struct ring_buffer_event *event = NULL;
13292494 74 struct trace_buffer *buffer = NULL;
36590c50 75 unsigned int trace_ctx = 0;
18cea459
LZ
76 int cpu = smp_processor_id();
77 bool blk_tracer = blk_tracer_enabled;
67c0496e 78 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
18cea459
LZ
79
80 if (blk_tracer) {
1c5eb448 81 buffer = blk_tr->array_buffer.buffer;
36590c50 82 trace_ctx = tracing_gen_ctx_flags(0);
e77405ad 83 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
ca1136c9 84 sizeof(*t) + len + cgid_len,
36590c50 85 trace_ctx);
18cea459
LZ
86 if (!event)
87 return;
88 t = ring_buffer_event_data(event);
89 goto record_it;
90 }
be1c6341 91
c71a8961
ACM
92 if (!bt->rchan)
93 return;
94
ca1136c9 95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
d3d9d2a5 96 if (t) {
d3d9d2a5 97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
2997c8c4 98 t->time = ktime_to_ns(ktime_get());
18cea459 99record_it:
d3d9d2a5 100 t->device = bt->dev;
ca1136c9 101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
d3d9d2a5
JA
102 t->pid = pid;
103 t->cpu = cpu;
ca1136c9 104 t->pdu_len = len + cgid_len;
67c0496e
TH
105 if (cgid_len)
106 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
ca1136c9 107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
18cea459
LZ
108
109 if (blk_tracer)
36590c50 110 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
d3d9d2a5 111 }
be1c6341
OK
112}
113
2056a782
JA
114/*
115 * Send out a notify for this process, if we haven't done so since a trace
116 * started
117 */
a404d557 118static void trace_note_tsk(struct task_struct *tsk)
2056a782 119{
a404d557
JK
120 unsigned long flags;
121 struct blk_trace *bt;
122
a863055b 123 tsk->btrace_seq = blktrace_seq;
361c81db 124 raw_spin_lock_irqsave(&running_trace_lock, flags);
a404d557
JK
125 list_for_each_entry(bt, &running_trace_list, running_list) {
126 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
67c0496e 127 sizeof(tsk->comm), 0);
a404d557 128 }
361c81db 129 raw_spin_unlock_irqrestore(&running_trace_lock, flags);
be1c6341 130}
2056a782 131
be1c6341
OK
132static void trace_note_time(struct blk_trace *bt)
133{
59a37f8b 134 struct timespec64 now;
be1c6341
OK
135 unsigned long flags;
136 u32 words[2];
137
59a37f8b
AB
138 /* need to check user space to see if this breaks in y2038 or y2106 */
139 ktime_get_real_ts64(&now);
140 words[0] = (u32)now.tv_sec;
be1c6341
OK
141 words[1] = now.tv_nsec;
142
143 local_irq_save(flags);
67c0496e 144 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
be1c6341 145 local_irq_restore(flags);
2056a782
JA
146}
147
f4a6a61c
CH
148void __blk_trace_note_message(struct blk_trace *bt,
149 struct cgroup_subsys_state *css, const char *fmt, ...)
9d5f09a4
AB
150{
151 int n;
152 va_list args;
14a73f54 153 unsigned long flags;
64565911 154 char *buf;
f4a6a61c 155 u64 cgid = 0;
9d5f09a4 156
18cea459
LZ
157 if (unlikely(bt->trace_state != Blktrace_running &&
158 !blk_tracer_enabled))
c71a8961
ACM
159 return;
160
490da40d
TM
161 /*
162 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
163 * message to the trace.
164 */
165 if (!(bt->act_mask & BLK_TC_NOTIFY))
166 return;
167
14a73f54 168 local_irq_save(flags);
d8a0349c 169 buf = this_cpu_ptr(bt->msg_data);
9d5f09a4 170 va_start(args, fmt);
64565911 171 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
9d5f09a4
AB
172 va_end(args);
173
35fe6d76 174#ifdef CONFIG_BLK_CGROUP
f4a6a61c
CH
175 if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
176 cgid = cgroup_id(css->cgroup);
177 else
178 cgid = 1;
35fe6d76 179#endif
f4a6a61c 180 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid);
14a73f54 181 local_irq_restore(flags);
9d5f09a4 182}
f4a6a61c 183EXPORT_SYMBOL_GPL(__blk_trace_note_message);
9d5f09a4 184
2056a782
JA
185static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
186 pid_t pid)
187{
188 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
189 return 1;
d0deef5b 190 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
2056a782
JA
191 return 1;
192 if (bt->pid && pid != bt->pid)
193 return 1;
194
195 return 0;
196}
197
198/*
199 * Data direction bit lookup
200 */
e4955c99
LZ
201static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
202 BLK_TC_ACT(BLK_TC_WRITE) };
2056a782 203
7b6d91da 204#define BLK_TC_RAHEAD BLK_TC_AHEAD
28a8f0d3 205#define BLK_TC_PREFLUSH BLK_TC_FLUSH
7b6d91da 206
35ba8f70 207/* The ilog2() calls fall out because they're constant */
919dbca8 208#define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) << \
7b6d91da 209 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
2056a782
JA
210
211/*
212 * The worker for the various blk_add_trace*() types. Fills out a
213 * blk_io_trace structure and places it in a per-cpu subbuffer.
214 */
5f3ea37c 215static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
919dbca8
BVA
216 const blk_opf_t opf, u32 what, int error,
217 int pdu_len, void *pdu_data, u64 cgid)
2056a782
JA
218{
219 struct task_struct *tsk = current;
c71a8961 220 struct ring_buffer_event *event = NULL;
13292494 221 struct trace_buffer *buffer = NULL;
2056a782 222 struct blk_io_trace *t;
0a987751 223 unsigned long flags = 0;
2056a782 224 unsigned long *sequence;
36590c50 225 unsigned int trace_ctx = 0;
2056a782 226 pid_t pid;
36590c50 227 int cpu;
18cea459 228 bool blk_tracer = blk_tracer_enabled;
67c0496e 229 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
919dbca8 230 const enum req_op op = opf & REQ_OP_MASK;
2056a782 231
18cea459 232 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
2056a782
JA
233 return;
234
1b9a9ab7 235 what |= ddir_act[op_is_write(op) ? WRITE : READ];
919dbca8
BVA
236 what |= MASK_TC_BIT(opf, SYNC);
237 what |= MASK_TC_BIT(opf, RAHEAD);
238 what |= MASK_TC_BIT(opf, META);
239 what |= MASK_TC_BIT(opf, PREFLUSH);
240 what |= MASK_TC_BIT(opf, FUA);
7afafc8a 241 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
1b9a9ab7 242 what |= BLK_TC_ACT(BLK_TC_DISCARD);
3a5e02ce
MC
243 if (op == REQ_OP_FLUSH)
244 what |= BLK_TC_ACT(BLK_TC_FLUSH);
ca1136c9
SL
245 if (cgid)
246 what |= __BLK_TA_CGROUP;
2056a782
JA
247
248 pid = tsk->pid;
d0deef5b 249 if (act_log_check(bt, what, sector, pid))
2056a782 250 return;
c71a8961
ACM
251 cpu = raw_smp_processor_id();
252
18cea459 253 if (blk_tracer) {
c71a8961
ACM
254 tracing_record_cmdline(current);
255
1c5eb448 256 buffer = blk_tr->array_buffer.buffer;
36590c50 257 trace_ctx = tracing_gen_ctx_flags(0);
e77405ad 258 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
ca1136c9 259 sizeof(*t) + pdu_len + cgid_len,
36590c50 260 trace_ctx);
c71a8961
ACM
261 if (!event)
262 return;
51a763dd 263 t = ring_buffer_event_data(event);
c71a8961
ACM
264 goto record_it;
265 }
2056a782 266
a404d557
JK
267 if (unlikely(tsk->btrace_seq != blktrace_seq))
268 trace_note_tsk(tsk);
269
2056a782
JA
270 /*
271 * A word about the locking here - we disable interrupts to reserve
272 * some space in the relay per-cpu buffer, to prevent an irq
14a73f54 273 * from coming in and stepping on our toes.
2056a782
JA
274 */
275 local_irq_save(flags);
ca1136c9 276 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
2056a782 277 if (t) {
2056a782
JA
278 sequence = per_cpu_ptr(bt->sequence, cpu);
279
280 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
281 t->sequence = ++(*sequence);
2997c8c4 282 t->time = ktime_to_ns(ktime_get());
c71a8961 283record_it:
08a06b83 284 /*
939b3669
ACM
285 * These two are not needed in ftrace as they are in the
286 * generic trace_entry, filled by tracing_generic_entry_update,
287 * but for the trace_event->bin() synthesizer benefit we do it
288 * here too.
289 */
290 t->cpu = cpu;
291 t->pid = pid;
08a06b83 292
2056a782
JA
293 t->sector = sector;
294 t->bytes = bytes;
295 t->action = what;
2056a782 296 t->device = bt->dev;
2056a782 297 t->error = error;
ca1136c9 298 t->pdu_len = pdu_len + cgid_len;
2056a782 299
ca1136c9 300 if (cgid_len)
67c0496e 301 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
2056a782 302 if (pdu_len)
ca1136c9 303 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
c71a8961 304
18cea459 305 if (blk_tracer) {
36590c50 306 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
c71a8961
ACM
307 return;
308 }
2056a782
JA
309 }
310
311 local_irq_restore(flags);
312}
313
30939293 314static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
2056a782 315{
f48fc4d3 316 relay_close(bt->rchan);
30939293
YK
317
318 /*
319 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
320 * under 'q->debugfs_dir', thus lookup and remove them.
321 */
322 if (!bt->dir) {
83e8864f
GKH
323 debugfs_lookup_and_remove("dropped", q->debugfs_dir);
324 debugfs_lookup_and_remove("msg", q->debugfs_dir);
30939293
YK
325 } else {
326 debugfs_remove(bt->dir);
327 }
2056a782 328 free_percpu(bt->sequence);
64565911 329 free_percpu(bt->msg_data);
2056a782 330 kfree(bt);
ad5dd549
LZ
331}
332
a6da0024
JA
333static void get_probe_ref(void)
334{
335 mutex_lock(&blk_probe_mutex);
336 if (++blk_probes_ref == 1)
337 blk_register_tracepoints();
338 mutex_unlock(&blk_probe_mutex);
339}
340
341static void put_probe_ref(void)
342{
343 mutex_lock(&blk_probe_mutex);
344 if (!--blk_probes_ref)
345 blk_unregister_tracepoints();
346 mutex_unlock(&blk_probe_mutex);
347}
348
60a9bb90
YB
349static int blk_trace_start(struct blk_trace *bt)
350{
351 if (bt->trace_state != Blktrace_setup &&
352 bt->trace_state != Blktrace_stopped)
353 return -EINVAL;
354
355 blktrace_seq++;
356 smp_mb();
357 bt->trace_state = Blktrace_running;
358 raw_spin_lock_irq(&running_trace_lock);
359 list_add(&bt->running_list, &running_trace_list);
360 raw_spin_unlock_irq(&running_trace_lock);
361 trace_note_time(bt);
362
363 return 0;
364}
365
366static int blk_trace_stop(struct blk_trace *bt)
367{
368 if (bt->trace_state != Blktrace_running)
369 return -EINVAL;
370
371 bt->trace_state = Blktrace_stopped;
372 raw_spin_lock_irq(&running_trace_lock);
373 list_del_init(&bt->running_list);
374 raw_spin_unlock_irq(&running_trace_lock);
375 relay_flush(bt->rchan);
376
377 return 0;
378}
379
30939293 380static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
ad5dd549 381{
dcd1a59c 382 blk_trace_stop(bt);
c780e86d 383 synchronize_rcu();
30939293 384 blk_trace_free(q, bt);
a6da0024 385 put_probe_ref();
2056a782
JA
386}
387
1f2cac10 388static int __blk_trace_remove(struct request_queue *q)
2056a782
JA
389{
390 struct blk_trace *bt;
391
c3dbe541 392 bt = rcu_replace_pointer(q->blk_trace, NULL,
85e0cbbb 393 lockdep_is_held(&q->debugfs_mutex));
2056a782
JA
394 if (!bt)
395 return -EINVAL;
396
dcd1a59c 397 blk_trace_cleanup(q, bt);
2056a782
JA
398
399 return 0;
400}
1f2cac10
JA
401
402int blk_trace_remove(struct request_queue *q)
403{
404 int ret;
405
85e0cbbb 406 mutex_lock(&q->debugfs_mutex);
1f2cac10 407 ret = __blk_trace_remove(q);
85e0cbbb 408 mutex_unlock(&q->debugfs_mutex);
1f2cac10
JA
409
410 return ret;
411}
6da127ad 412EXPORT_SYMBOL_GPL(blk_trace_remove);
2056a782 413
2056a782
JA
414static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
415 size_t count, loff_t *ppos)
416{
417 struct blk_trace *bt = filp->private_data;
418 char buf[16];
419
420 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
421
422 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
423}
424
2b8693c0 425static const struct file_operations blk_dropped_fops = {
2056a782 426 .owner = THIS_MODULE,
234e3405 427 .open = simple_open,
2056a782 428 .read = blk_dropped_read,
6038f373 429 .llseek = default_llseek,
2056a782
JA
430};
431
02c62304
AB
432static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
433 size_t count, loff_t *ppos)
434{
435 char *msg;
436 struct blk_trace *bt;
437
7635b03a 438 if (count >= BLK_TN_MAX_MSG)
02c62304
AB
439 return -EINVAL;
440
16e5c1fc
AV
441 msg = memdup_user_nul(buffer, count);
442 if (IS_ERR(msg))
443 return PTR_ERR(msg);
02c62304 444
02c62304 445 bt = filp->private_data;
f4a6a61c 446 __blk_trace_note_message(bt, NULL, "%s", msg);
02c62304
AB
447 kfree(msg);
448
449 return count;
450}
451
452static const struct file_operations blk_msg_fops = {
453 .owner = THIS_MODULE,
234e3405 454 .open = simple_open,
02c62304 455 .write = blk_msg_write,
6038f373 456 .llseek = noop_llseek,
02c62304
AB
457};
458
2056a782
JA
459/*
460 * Keep track of how many times we encountered a full subbuffer, to aid
461 * the user space app in telling how many lost events there were.
462 */
463static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
464 void *prev_subbuf, size_t prev_padding)
465{
466 struct blk_trace *bt;
467
468 if (!relay_buf_full(buf))
469 return 1;
470
471 bt = buf->chan->private_data;
472 atomic_inc(&bt->dropped);
473 return 0;
474}
475
476static int blk_remove_buf_file_callback(struct dentry *dentry)
477{
478 debugfs_remove(dentry);
f48fc4d3 479
2056a782
JA
480 return 0;
481}
482
483static struct dentry *blk_create_buf_file_callback(const char *filename,
484 struct dentry *parent,
f4ae40a6 485 umode_t mode,
2056a782
JA
486 struct rchan_buf *buf,
487 int *is_global)
488{
489 return debugfs_create_file(filename, mode, parent, buf,
490 &relay_file_operations);
491}
492
abf4e00c 493static const struct rchan_callbacks blk_relay_callbacks = {
2056a782
JA
494 .subbuf_start = blk_subbuf_start_callback,
495 .create_buf_file = blk_create_buf_file_callback,
496 .remove_buf_file = blk_remove_buf_file_callback,
497};
498
9908c309
LZ
499static void blk_trace_setup_lba(struct blk_trace *bt,
500 struct block_device *bdev)
501{
29ff57c6
CH
502 if (bdev) {
503 bt->start_lba = bdev->bd_start_sect;
504 bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
9908c309
LZ
505 } else {
506 bt->start_lba = 0;
507 bt->end_lba = -1ULL;
508 }
509}
510
2056a782
JA
511/*
512 * Setup everything required to start tracing
513 */
a428d314
OS
514static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
515 struct block_device *bdev,
516 struct blk_user_trace_setup *buts)
2056a782 517{
cdea01b2 518 struct blk_trace *bt = NULL;
2056a782 519 struct dentry *dir = NULL;
ff14417c 520 int ret;
2056a782 521
85e0cbbb 522 lockdep_assert_held(&q->debugfs_mutex);
a67549c8 523
171044d4 524 if (!buts->buf_size || !buts->buf_nr)
2056a782
JA
525 return -EINVAL;
526
0497b345
JA
527 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
528 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
2056a782
JA
529
530 /*
531 * some device names have larger paths - convert the slashes
532 * to underscores for this to work as expected
533 */
ff14417c 534 strreplace(buts->name, '/', '_');
2056a782 535
1b0b2836
LC
536 /*
537 * bdev can be NULL, as with scsi-generic, this is a helpful as
538 * we can be.
539 */
c3dbe541 540 if (rcu_dereference_protected(q->blk_trace,
85e0cbbb 541 lockdep_is_held(&q->debugfs_mutex))) {
1b0b2836
LC
542 pr_warn("Concurrent blktraces are not allowed on %s\n",
543 buts->name);
544 return -EBUSY;
545 }
546
2056a782
JA
547 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
548 if (!bt)
ad5dd549 549 return -ENOMEM;
2056a782 550
ad5dd549 551 ret = -ENOMEM;
2056a782
JA
552 bt->sequence = alloc_percpu(unsigned long);
553 if (!bt->sequence)
554 goto err;
555
313e458f 556 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
64565911
JA
557 if (!bt->msg_data)
558 goto err;
559
bad8e64f 560 /*
85e0cbbb
LC
561 * When tracing the whole disk reuse the existing debugfs directory
562 * created by the block layer on init. For partitions block devices,
bad8e64f
LC
563 * and scsi-generic block devices we create a temporary new debugfs
564 * directory that will be removed once the trace ends.
565 */
fa01b1e9 566 if (bdev && !bdev_is_partition(bdev))
bad8e64f
LC
567 dir = q->debugfs_dir;
568 else
6ac93117 569 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
2056a782 570
b431ef83
LC
571 /*
572 * As blktrace relies on debugfs for its interface the debugfs directory
573 * is required, contrary to the usual mantra of not checking for debugfs
574 * files or directories.
575 */
576 if (IS_ERR_OR_NULL(dir)) {
577 pr_warn("debugfs_dir not present for %s so skipping\n",
578 buts->name);
579 ret = -ENOENT;
580 goto err;
581 }
582
6da127ad 583 bt->dev = dev;
2056a782 584 atomic_set(&bt->dropped, 0);
a404d557 585 INIT_LIST_HEAD(&bt->running_list);
2056a782
JA
586
587 ret = -EIO;
c0ea5760
GKH
588 debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
589 debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
02c62304 590
171044d4
AB
591 bt->rchan = relay_open("trace", dir, buts->buf_size,
592 buts->buf_nr, &blk_relay_callbacks, bt);
2056a782
JA
593 if (!bt->rchan)
594 goto err;
2056a782 595
171044d4 596 bt->act_mask = buts->act_mask;
2056a782
JA
597 if (!bt->act_mask)
598 bt->act_mask = (u16) -1;
599
9908c309 600 blk_trace_setup_lba(bt, bdev);
2056a782 601
d0deef5b
SD
602 /* overwrite with user settings */
603 if (buts->start_lba)
604 bt->start_lba = buts->start_lba;
605 if (buts->end_lba)
606 bt->end_lba = buts->end_lba;
607
171044d4 608 bt->pid = buts->pid;
2056a782
JA
609 bt->trace_state = Blktrace_setup;
610
c3dbe541 611 rcu_assign_pointer(q->blk_trace, bt);
a6da0024 612 get_probe_ref();
cbe28296 613
6ac93117 614 ret = 0;
2056a782 615err:
6ac93117 616 if (ret)
30939293 617 blk_trace_free(q, bt);
2056a782
JA
618 return ret;
619}
171044d4 620
1f2cac10
JA
621static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
622 struct block_device *bdev, char __user *arg)
171044d4
AB
623{
624 struct blk_user_trace_setup buts;
625 int ret;
626
627 ret = copy_from_user(&buts, arg, sizeof(buts));
628 if (ret)
629 return -EFAULT;
630
d0deef5b 631 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
171044d4
AB
632 if (ret)
633 return ret;
634
9a8c28c8 635 if (copy_to_user(arg, &buts, sizeof(buts))) {
2967acbb 636 __blk_trace_remove(q);
171044d4 637 return -EFAULT;
9a8c28c8 638 }
171044d4
AB
639 return 0;
640}
1f2cac10
JA
641
642int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
643 struct block_device *bdev,
644 char __user *arg)
645{
646 int ret;
647
85e0cbbb 648 mutex_lock(&q->debugfs_mutex);
1f2cac10 649 ret = __blk_trace_setup(q, name, dev, bdev, arg);
85e0cbbb 650 mutex_unlock(&q->debugfs_mutex);
1f2cac10
JA
651
652 return ret;
653}
6da127ad 654EXPORT_SYMBOL_GPL(blk_trace_setup);
2056a782 655
62c2a7d9
AB
656#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
657static int compat_blk_trace_setup(struct request_queue *q, char *name,
658 dev_t dev, struct block_device *bdev,
659 char __user *arg)
660{
661 struct blk_user_trace_setup buts;
662 struct compat_blk_user_trace_setup cbuts;
663 int ret;
664
665 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
666 return -EFAULT;
667
668 buts = (struct blk_user_trace_setup) {
669 .act_mask = cbuts.act_mask,
670 .buf_size = cbuts.buf_size,
671 .buf_nr = cbuts.buf_nr,
672 .start_lba = cbuts.start_lba,
673 .end_lba = cbuts.end_lba,
674 .pid = cbuts.pid,
675 };
62c2a7d9
AB
676
677 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
678 if (ret)
679 return ret;
680
f8c5e944 681 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
2967acbb 682 __blk_trace_remove(q);
62c2a7d9
AB
683 return -EFAULT;
684 }
685
686 return 0;
687}
688#endif
689
1f2cac10 690static int __blk_trace_startstop(struct request_queue *q, int start)
2056a782 691{
c780e86d 692 struct blk_trace *bt;
2056a782 693
c780e86d 694 bt = rcu_dereference_protected(q->blk_trace,
85e0cbbb 695 lockdep_is_held(&q->debugfs_mutex));
939b3669 696 if (bt == NULL)
2056a782
JA
697 return -EINVAL;
698
60a9bb90
YB
699 if (start)
700 return blk_trace_start(bt);
701 else
702 return blk_trace_stop(bt);
2056a782 703}
1f2cac10
JA
704
705int blk_trace_startstop(struct request_queue *q, int start)
706{
707 int ret;
708
85e0cbbb 709 mutex_lock(&q->debugfs_mutex);
1f2cac10 710 ret = __blk_trace_startstop(q, start);
85e0cbbb 711 mutex_unlock(&q->debugfs_mutex);
1f2cac10
JA
712
713 return ret;
714}
6da127ad 715EXPORT_SYMBOL_GPL(blk_trace_startstop);
2056a782 716
5acb3cc2
WL
717/*
718 * When reading or writing the blktrace sysfs files, the references to the
719 * opened sysfs or device files should prevent the underlying block device
720 * from being removed. So no further delete protection is really needed.
721 */
722
2056a782 723/**
2e833c8c 724 * blk_trace_ioctl - handle the ioctls associated with tracing
2056a782 725 * @bdev: the block device
ef18012b 726 * @cmd: the ioctl cmd
2056a782
JA
727 * @arg: the argument data, if any
728 *
729 **/
730int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
731{
9e0c7efa 732 struct request_queue *q = bdev_get_queue(bdev);
2056a782 733 int ret, start = 0;
6da127ad 734 char b[BDEVNAME_SIZE];
2056a782 735
85e0cbbb 736 mutex_lock(&q->debugfs_mutex);
2056a782
JA
737
738 switch (cmd) {
739 case BLKTRACESETUP:
900d156b 740 snprintf(b, sizeof(b), "%pg", bdev);
1f2cac10 741 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
2056a782 742 break;
62c2a7d9
AB
743#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
744 case BLKTRACESETUP32:
900d156b 745 snprintf(b, sizeof(b), "%pg", bdev);
62c2a7d9
AB
746 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
747 break;
748#endif
2056a782
JA
749 case BLKTRACESTART:
750 start = 1;
df561f66 751 fallthrough;
2056a782 752 case BLKTRACESTOP:
1f2cac10 753 ret = __blk_trace_startstop(q, start);
2056a782
JA
754 break;
755 case BLKTRACETEARDOWN:
1f2cac10 756 ret = __blk_trace_remove(q);
2056a782
JA
757 break;
758 default:
759 ret = -ENOTTY;
760 break;
761 }
762
85e0cbbb 763 mutex_unlock(&q->debugfs_mutex);
2056a782
JA
764 return ret;
765}
766
767/**
2e833c8c 768 * blk_trace_shutdown - stop and cleanup trace structures
2056a782
JA
769 * @q: the request queue associated with the device
770 *
771 **/
165125e1 772void blk_trace_shutdown(struct request_queue *q)
2056a782 773{
c780e86d 774 if (rcu_dereference_protected(q->blk_trace,
2db96217 775 lockdep_is_held(&q->debugfs_mutex)))
1f2cac10 776 __blk_trace_remove(q);
2056a782 777}
5f3ea37c 778
ca1136c9 779#ifdef CONFIG_BLK_CGROUP
67c0496e 780static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
ca1136c9 781{
bbb1ebe7 782 struct cgroup_subsys_state *blkcg_css;
c780e86d 783 struct blk_trace *bt;
ca1136c9 784
c780e86d
JK
785 /* We don't use the 'bt' value here except as an optimization... */
786 bt = rcu_dereference_protected(q->blk_trace, 1);
ca1136c9 787 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
67c0496e 788 return 0;
ca1136c9 789
bbb1ebe7
CH
790 blkcg_css = bio_blkcg_css(bio);
791 if (!blkcg_css)
67c0496e 792 return 0;
bbb1ebe7 793 return cgroup_id(blkcg_css->cgroup);
ca1136c9
SL
794}
795#else
e75ad2cc 796static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
ca1136c9 797{
67c0496e 798 return 0;
ca1136c9
SL
799}
800#endif
801
67c0496e 802static u64
a54895fa 803blk_trace_request_get_cgid(struct request *rq)
ca1136c9
SL
804{
805 if (!rq->bio)
67c0496e 806 return 0;
ca1136c9 807 /* Use the first bio */
a54895fa 808 return blk_trace_bio_get_cgid(rq->q, rq->bio);
ca1136c9
SL
809}
810
5f3ea37c
ACM
811/*
812 * blktrace probes
813 */
814
815/**
816 * blk_add_trace_rq - Add a trace for a request oriented action
5f3ea37c 817 * @rq: the source request
caf7df12 818 * @error: return status to log
af5040da 819 * @nr_bytes: number of completed bytes
5f3ea37c 820 * @what: the action
ca1136c9 821 * @cgid: the cgroup info
5f3ea37c
ACM
822 *
823 * Description:
824 * Records an action against a request. Will log the bio offset + size.
825 *
826 **/
8a7d267b 827static void blk_add_trace_rq(struct request *rq, blk_status_t error,
67c0496e 828 unsigned int nr_bytes, u32 what, u64 cgid)
5f3ea37c 829{
c780e86d 830 struct blk_trace *bt;
5f3ea37c 831
c780e86d
JK
832 rcu_read_lock();
833 bt = rcu_dereference(rq->q->blk_trace);
834 if (likely(!bt)) {
835 rcu_read_unlock();
5f3ea37c 836 return;
c780e86d 837 }
5f3ea37c 838
57292b58 839 if (blk_rq_is_passthrough(rq))
5f3ea37c 840 what |= BLK_TC_ACT(BLK_TC_PC);
48b77ad6 841 else
5f3ea37c 842 what |= BLK_TC_ACT(BLK_TC_FS);
48b77ad6 843
919dbca8
BVA
844 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
845 what, blk_status_to_errno(error), 0, NULL, cgid);
c780e86d 846 rcu_read_unlock();
5f3ea37c
ACM
847}
848
a54895fa 849static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
5f3ea37c 850{
ca1136c9 851 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
a54895fa 852 blk_trace_request_get_cgid(rq));
5f3ea37c
ACM
853}
854
a54895fa 855static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
5f3ea37c 856{
ca1136c9 857 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
a54895fa 858 blk_trace_request_get_cgid(rq));
5f3ea37c
ACM
859}
860
a54895fa 861static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
f3bdc62f
JK
862{
863 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
a54895fa 864 blk_trace_request_get_cgid(rq));
f3bdc62f
JK
865}
866
a54895fa 867static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
5f3ea37c 868{
ca1136c9 869 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
a54895fa 870 blk_trace_request_get_cgid(rq));
5f3ea37c
ACM
871}
872
caf7df12 873static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
8a7d267b 874 blk_status_t error, unsigned int nr_bytes)
5f3ea37c 875{
ca1136c9 876 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
a54895fa 877 blk_trace_request_get_cgid(rq));
5f3ea37c
ACM
878}
879
880/**
881 * blk_add_trace_bio - Add a trace for a bio oriented action
882 * @q: queue the io is for
883 * @bio: the source bio
884 * @what: the action
797a455d 885 * @error: error, if any
5f3ea37c
ACM
886 *
887 * Description:
888 * Records an action against a bio. Will log the bio offset + size.
889 *
890 **/
891static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
1690102d 892 u32 what, int error)
5f3ea37c 893{
c780e86d 894 struct blk_trace *bt;
5f3ea37c 895
c780e86d
JK
896 rcu_read_lock();
897 bt = rcu_dereference(q->blk_trace);
898 if (likely(!bt)) {
899 rcu_read_unlock();
5f3ea37c 900 return;
c780e86d 901 }
5f3ea37c 902
4f024f37 903 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
919dbca8 904 bio->bi_opf, what, error, 0, NULL,
1690102d 905 blk_trace_bio_get_cgid(q, bio));
c780e86d 906 rcu_read_unlock();
5f3ea37c
ACM
907}
908
e8a676d6 909static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
5f3ea37c 910{
309dca30 911 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
5f3ea37c
ACM
912}
913
0a82a8d1 914static void blk_add_trace_bio_complete(void *ignore,
d24de76a 915 struct request_queue *q, struct bio *bio)
5f3ea37c 916{
d24de76a
CH
917 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
918 blk_status_to_errno(bio->bi_status));
5f3ea37c
ACM
919}
920
e8a676d6 921static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
5f3ea37c 922{
309dca30
CH
923 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
924 0);
5f3ea37c
ACM
925}
926
e8a676d6 927static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
5f3ea37c 928{
309dca30
CH
929 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
930 0);
5f3ea37c
ACM
931}
932
e8a676d6 933static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
5f3ea37c 934{
309dca30 935 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
5f3ea37c
ACM
936}
937
e8a676d6 938static void blk_add_trace_getrq(void *ignore, struct bio *bio)
5f3ea37c 939{
309dca30 940 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
5f3ea37c
ACM
941}
942
38516ab5 943static void blk_add_trace_plug(void *ignore, struct request_queue *q)
5f3ea37c 944{
c780e86d 945 struct blk_trace *bt;
5f3ea37c 946
c780e86d
JK
947 rcu_read_lock();
948 bt = rcu_dereference(q->blk_trace);
5f3ea37c 949 if (bt)
919dbca8 950 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
c780e86d 951 rcu_read_unlock();
5f3ea37c
ACM
952}
953
49cac01e
JA
954static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
955 unsigned int depth, bool explicit)
5f3ea37c 956{
c780e86d 957 struct blk_trace *bt;
5f3ea37c 958
c780e86d
JK
959 rcu_read_lock();
960 bt = rcu_dereference(q->blk_trace);
5f3ea37c 961 if (bt) {
94b5eb28 962 __be64 rpdu = cpu_to_be64(depth);
49cac01e 963 u32 what;
5f3ea37c 964
49cac01e
JA
965 if (explicit)
966 what = BLK_TA_UNPLUG_IO;
967 else
968 what = BLK_TA_UNPLUG_TIMER;
969
919dbca8 970 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
5f3ea37c 971 }
c780e86d 972 rcu_read_unlock();
5f3ea37c
ACM
973}
974
eb6f7f7c 975static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
5f3ea37c 976{
309dca30 977 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
c780e86d 978 struct blk_trace *bt;
5f3ea37c 979
c780e86d
JK
980 rcu_read_lock();
981 bt = rcu_dereference(q->blk_trace);
5f3ea37c
ACM
982 if (bt) {
983 __be64 rpdu = cpu_to_be64(pdu);
984
4f024f37 985 __blk_add_trace(bt, bio->bi_iter.bi_sector,
919dbca8 986 bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
48bc3cd3
CK
987 blk_status_to_errno(bio->bi_status),
988 sizeof(rpdu), &rpdu,
989 blk_trace_bio_get_cgid(q, bio));
5f3ea37c 990 }
c780e86d 991 rcu_read_unlock();
5f3ea37c
ACM
992}
993
994/**
d07335e5 995 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
546cf44a 996 * @ignore: trace callback data parameter (not used)
5f3ea37c 997 * @bio: the source bio
1c02fca6 998 * @dev: source device
a42aaa3b 999 * @from: source sector
5f3ea37c 1000 *
1c02fca6 1001 * Called after a bio is remapped to a different device and/or sector.
5f3ea37c 1002 **/
1c02fca6
CH
1003static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
1004 sector_t from)
5f3ea37c 1005{
309dca30 1006 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
c780e86d 1007 struct blk_trace *bt;
5f3ea37c
ACM
1008 struct blk_io_trace_remap r;
1009
c780e86d
JK
1010 rcu_read_lock();
1011 bt = rcu_dereference(q->blk_trace);
1012 if (likely(!bt)) {
1013 rcu_read_unlock();
5f3ea37c 1014 return;
c780e86d 1015 }
5f3ea37c 1016
a42aaa3b 1017 r.device_from = cpu_to_be32(dev);
74d46992 1018 r.device_to = cpu_to_be32(bio_dev(bio));
a42aaa3b 1019 r.sector_from = cpu_to_be64(from);
5f3ea37c 1020
4f024f37 1021 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
919dbca8 1022 bio->bi_opf, BLK_TA_REMAP,
48bc3cd3 1023 blk_status_to_errno(bio->bi_status),
ca1136c9 1024 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
c780e86d 1025 rcu_read_unlock();
5f3ea37c
ACM
1026}
1027
b0da3f0d
JN
1028/**
1029 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
546cf44a 1030 * @ignore: trace callback data parameter (not used)
b0da3f0d
JN
1031 * @rq: the source request
1032 * @dev: target device
1033 * @from: source sector
1034 *
1035 * Description:
1036 * Device mapper remaps request to other devices.
1037 * Add a trace for that action.
1038 *
1039 **/
a54895fa 1040static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
b0da3f0d
JN
1041 sector_t from)
1042{
c780e86d 1043 struct blk_trace *bt;
b0da3f0d
JN
1044 struct blk_io_trace_remap r;
1045
c780e86d 1046 rcu_read_lock();
a54895fa 1047 bt = rcu_dereference(rq->q->blk_trace);
c780e86d
JK
1048 if (likely(!bt)) {
1049 rcu_read_unlock();
b0da3f0d 1050 return;
c780e86d 1051 }
b0da3f0d
JN
1052
1053 r.device_from = cpu_to_be32(dev);
f3fa33ac 1054 r.device_to = cpu_to_be32(disk_devt(rq->q->disk));
b0da3f0d
JN
1055 r.sector_from = cpu_to_be64(from);
1056
1057 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
919dbca8 1058 rq->cmd_flags, BLK_TA_REMAP, 0,
a54895fa 1059 sizeof(r), &r, blk_trace_request_get_cgid(rq));
c780e86d 1060 rcu_read_unlock();
b0da3f0d
JN
1061}
1062
5f3ea37c
ACM
1063/**
1064 * blk_add_driver_data - Add binary message with driver-specific data
5f3ea37c
ACM
1065 * @rq: io request
1066 * @data: driver-specific data
1067 * @len: length of driver-specific data
1068 *
1069 * Description:
1070 * Some drivers might want to write driver-specific data per request.
1071 *
1072 **/
a54895fa 1073void blk_add_driver_data(struct request *rq, void *data, size_t len)
5f3ea37c 1074{
c780e86d 1075 struct blk_trace *bt;
5f3ea37c 1076
c780e86d 1077 rcu_read_lock();
a54895fa 1078 bt = rcu_dereference(rq->q->blk_trace);
c780e86d
JK
1079 if (likely(!bt)) {
1080 rcu_read_unlock();
5f3ea37c 1081 return;
c780e86d 1082 }
5f3ea37c 1083
919dbca8 1084 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
ca1136c9 1085 BLK_TA_DRV_DATA, 0, len, data,
a54895fa 1086 blk_trace_request_get_cgid(rq));
c780e86d 1087 rcu_read_unlock();
5f3ea37c
ACM
1088}
1089EXPORT_SYMBOL_GPL(blk_add_driver_data);
1090
3c289ba7 1091static void blk_register_tracepoints(void)
5f3ea37c
ACM
1092{
1093 int ret;
1094
38516ab5 1095 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
5f3ea37c 1096 WARN_ON(ret);
38516ab5 1097 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
5f3ea37c 1098 WARN_ON(ret);
f3bdc62f
JK
1099 ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1100 WARN_ON(ret);
38516ab5 1101 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
5f3ea37c 1102 WARN_ON(ret);
38516ab5 1103 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
5f3ea37c 1104 WARN_ON(ret);
38516ab5 1105 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
5f3ea37c 1106 WARN_ON(ret);
38516ab5 1107 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
5f3ea37c 1108 WARN_ON(ret);
38516ab5 1109 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
5f3ea37c 1110 WARN_ON(ret);
38516ab5 1111 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
5f3ea37c 1112 WARN_ON(ret);
38516ab5 1113 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
5f3ea37c 1114 WARN_ON(ret);
38516ab5 1115 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
5f3ea37c 1116 WARN_ON(ret);
38516ab5 1117 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
5f3ea37c 1118 WARN_ON(ret);
49cac01e 1119 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
5f3ea37c 1120 WARN_ON(ret);
38516ab5 1121 ret = register_trace_block_split(blk_add_trace_split, NULL);
5f3ea37c 1122 WARN_ON(ret);
d07335e5 1123 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
5f3ea37c 1124 WARN_ON(ret);
38516ab5 1125 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
b0da3f0d 1126 WARN_ON(ret);
5f3ea37c
ACM
1127}
1128
1129static void blk_unregister_tracepoints(void)
1130{
38516ab5 1131 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
d07335e5 1132 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
38516ab5 1133 unregister_trace_block_split(blk_add_trace_split, NULL);
49cac01e 1134 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
38516ab5 1135 unregister_trace_block_plug(blk_add_trace_plug, NULL);
38516ab5
SR
1136 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1137 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1138 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1139 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1140 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1141 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1142 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1143 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
f3bdc62f 1144 unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
38516ab5
SR
1145 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1146 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
5f3ea37c
ACM
1147
1148 tracepoint_synchronize_unregister();
1149}
c71a8961
ACM
1150
1151/*
1152 * struct blk_io_tracer formatting routines
1153 */
1154
1155static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1156{
157f9c00 1157 int i = 0;
65796348 1158 int tc = t->action >> BLK_TC_SHIFT;
157f9c00 1159
ca1136c9 1160 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
18cea459
LZ
1161 rwbs[i++] = 'N';
1162 goto out;
1163 }
1164
c09c47ca
NK
1165 if (tc & BLK_TC_FLUSH)
1166 rwbs[i++] = 'F';
1167
65796348 1168 if (tc & BLK_TC_DISCARD)
157f9c00 1169 rwbs[i++] = 'D';
65796348 1170 else if (tc & BLK_TC_WRITE)
157f9c00
ACM
1171 rwbs[i++] = 'W';
1172 else if (t->bytes)
1173 rwbs[i++] = 'R';
1174 else
1175 rwbs[i++] = 'N';
1176
c09c47ca
NK
1177 if (tc & BLK_TC_FUA)
1178 rwbs[i++] = 'F';
65796348 1179 if (tc & BLK_TC_AHEAD)
157f9c00 1180 rwbs[i++] = 'A';
65796348 1181 if (tc & BLK_TC_SYNC)
157f9c00 1182 rwbs[i++] = 'S';
65796348 1183 if (tc & BLK_TC_META)
157f9c00 1184 rwbs[i++] = 'M';
18cea459 1185out:
157f9c00 1186 rwbs[i] = '\0';
c71a8961
ACM
1187}
1188
1189static inline
1190const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1191{
1192 return (const struct blk_io_trace *)ent;
1193}
1194
ca1136c9
SL
1195static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1196{
67c0496e 1197 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
ca1136c9
SL
1198}
1199
67c0496e 1200static inline u64 t_cgid(const struct trace_entry *ent)
c71a8961 1201{
67c0496e 1202 return *(u64 *)(te_blk_io_trace(ent) + 1);
ca1136c9
SL
1203}
1204
1205static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1206{
67c0496e 1207 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
c71a8961
ACM
1208}
1209
66de7792
LZ
1210static inline u32 t_action(const struct trace_entry *ent)
1211{
1212 return te_blk_io_trace(ent)->action;
1213}
1214
1215static inline u32 t_bytes(const struct trace_entry *ent)
1216{
1217 return te_blk_io_trace(ent)->bytes;
1218}
1219
c71a8961
ACM
1220static inline u32 t_sec(const struct trace_entry *ent)
1221{
1222 return te_blk_io_trace(ent)->bytes >> 9;
1223}
1224
1225static inline unsigned long long t_sector(const struct trace_entry *ent)
1226{
1227 return te_blk_io_trace(ent)->sector;
1228}
1229
1230static inline __u16 t_error(const struct trace_entry *ent)
1231{
e0dc81be 1232 return te_blk_io_trace(ent)->error;
c71a8961
ACM
1233}
1234
ca1136c9 1235static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
c71a8961 1236{
71df3fd8 1237 const __be64 *val = pdu_start(ent, has_cg);
c71a8961
ACM
1238 return be64_to_cpu(*val);
1239}
1240
ca1136c9
SL
1241typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1242 bool has_cg);
b6a4b0c3 1243
ca1136c9
SL
1244static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1245 bool has_cg)
c71a8961 1246{
c09c47ca 1247 char rwbs[RWBS_LEN];
35ac51bf
LZ
1248 unsigned long long ts = iter->ts;
1249 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
c71a8961 1250 unsigned secs = (unsigned long)ts;
b6a4b0c3 1251 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
c71a8961
ACM
1252
1253 fill_rwbs(rwbs, t);
1254
f4a1d08c
SRRH
1255 trace_seq_printf(&iter->seq,
1256 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1257 MAJOR(t->device), MINOR(t->device), iter->cpu,
1258 secs, nsec_rem, iter->ent->pid, act, rwbs);
c71a8961
ACM
1259}
1260
ca1136c9
SL
1261static void blk_log_action(struct trace_iterator *iter, const char *act,
1262 bool has_cg)
c71a8961 1263{
c09c47ca 1264 char rwbs[RWBS_LEN];
b6a4b0c3
LZ
1265 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1266
c71a8961 1267 fill_rwbs(rwbs, t);
ca1136c9 1268 if (has_cg) {
67c0496e 1269 u64 id = t_cgid(iter->ent);
ca1136c9 1270
69fd5c39
SL
1271 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1272 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1273
1274 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1275 sizeof(blkcg_name_buf));
1276 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1277 MAJOR(t->device), MINOR(t->device),
1278 blkcg_name_buf, act, rwbs);
40430452
TH
1279 } else {
1280 /*
1281 * The cgid portion used to be "INO,GEN". Userland
1282 * builds a FILEID_INO32_GEN fid out of them and
1283 * opens the cgroup using open_by_handle_at(2).
1284 * While 32bit ino setups are still the same, 64bit
1285 * ones now use the 64bit ino as the whole ID and
1286 * no longer use generation.
1287 *
2b5894cc 1288 * Regardless of the content, always output
40430452
TH
1289 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1290 * be mapped back to @id on both 64 and 32bit ino
1291 * setups. See __kernfs_fh_to_dentry().
1292 */
69fd5c39 1293 trace_seq_printf(&iter->seq,
40430452 1294 "%3d,%-3d %llx,%-llx %2s %3s ",
ca1136c9 1295 MAJOR(t->device), MINOR(t->device),
40430452
TH
1296 id & U32_MAX, id >> 32, act, rwbs);
1297 }
ca1136c9
SL
1298 } else
1299 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1300 MAJOR(t->device), MINOR(t->device), act, rwbs);
c71a8961
ACM
1301}
1302
ca1136c9
SL
1303static void blk_log_dump_pdu(struct trace_seq *s,
1304 const struct trace_entry *ent, bool has_cg)
66de7792 1305{
04986257 1306 const unsigned char *pdu_buf;
66de7792 1307 int pdu_len;
f4a1d08c 1308 int i, end;
66de7792 1309
ca1136c9
SL
1310 pdu_buf = pdu_start(ent, has_cg);
1311 pdu_len = pdu_real_len(ent, has_cg);
66de7792
LZ
1312
1313 if (!pdu_len)
f4a1d08c 1314 return;
66de7792
LZ
1315
1316 /* find the last zero that needs to be printed */
1317 for (end = pdu_len - 1; end >= 0; end--)
1318 if (pdu_buf[end])
1319 break;
1320 end++;
1321
f4a1d08c 1322 trace_seq_putc(s, '(');
66de7792
LZ
1323
1324 for (i = 0; i < pdu_len; i++) {
1325
f4a1d08c
SRRH
1326 trace_seq_printf(s, "%s%02x",
1327 i == 0 ? "" : " ", pdu_buf[i]);
66de7792
LZ
1328
1329 /*
2b5894cc 1330 * stop when the rest is just zeros and indicate so
66de7792
LZ
1331 * with a ".." appended
1332 */
f4a1d08c
SRRH
1333 if (i == end && end != pdu_len - 1) {
1334 trace_seq_puts(s, " ..) ");
1335 return;
1336 }
66de7792
LZ
1337 }
1338
f4a1d08c 1339 trace_seq_puts(s, ") ");
66de7792
LZ
1340}
1341
ca1136c9 1342static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1343{
4ca53085
SR
1344 char cmd[TASK_COMM_LEN];
1345
1346 trace_find_cmdline(ent->pid, cmd);
c71a8961 1347
66de7792 1348 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
f4a1d08c 1349 trace_seq_printf(s, "%u ", t_bytes(ent));
ca1136c9 1350 blk_log_dump_pdu(s, ent, has_cg);
f4a1d08c 1351 trace_seq_printf(s, "[%s]\n", cmd);
66de7792
LZ
1352 } else {
1353 if (t_sec(ent))
f4a1d08c 1354 trace_seq_printf(s, "%llu + %u [%s]\n",
66de7792 1355 t_sector(ent), t_sec(ent), cmd);
f4a1d08c
SRRH
1356 else
1357 trace_seq_printf(s, "[%s]\n", cmd);
66de7792 1358 }
c71a8961
ACM
1359}
1360
f4a1d08c 1361static void blk_log_with_error(struct trace_seq *s,
ca1136c9 1362 const struct trace_entry *ent, bool has_cg)
c71a8961 1363{
66de7792 1364 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
ca1136c9 1365 blk_log_dump_pdu(s, ent, has_cg);
f4a1d08c 1366 trace_seq_printf(s, "[%d]\n", t_error(ent));
66de7792
LZ
1367 } else {
1368 if (t_sec(ent))
f4a1d08c
SRRH
1369 trace_seq_printf(s, "%llu + %u [%d]\n",
1370 t_sector(ent),
1371 t_sec(ent), t_error(ent));
1372 else
1373 trace_seq_printf(s, "%llu [%d]\n",
1374 t_sector(ent), t_error(ent));
66de7792 1375 }
c71a8961
ACM
1376}
1377
ca1136c9 1378static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1379{
5aec598c 1380 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
c71a8961 1381
f4a1d08c
SRRH
1382 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1383 t_sector(ent), t_sec(ent),
5aec598c
CK
1384 MAJOR(be32_to_cpu(__r->device_from)),
1385 MINOR(be32_to_cpu(__r->device_from)),
1386 be64_to_cpu(__r->sector_from));
c71a8961
ACM
1387}
1388
ca1136c9 1389static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1390{
4ca53085
SR
1391 char cmd[TASK_COMM_LEN];
1392
1393 trace_find_cmdline(ent->pid, cmd);
1394
f4a1d08c 1395 trace_seq_printf(s, "[%s]\n", cmd);
c71a8961
ACM
1396}
1397
ca1136c9 1398static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1399{
4ca53085
SR
1400 char cmd[TASK_COMM_LEN];
1401
1402 trace_find_cmdline(ent->pid, cmd);
1403
ca1136c9 1404 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
c71a8961
ACM
1405}
1406
ca1136c9 1407static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1408{
4ca53085
SR
1409 char cmd[TASK_COMM_LEN];
1410
1411 trace_find_cmdline(ent->pid, cmd);
1412
f4a1d08c 1413 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
ca1136c9 1414 get_pdu_int(ent, has_cg), cmd);
c71a8961
ACM
1415}
1416
ca1136c9
SL
1417static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1418 bool has_cg)
18cea459 1419{
18cea459 1420
ca1136c9
SL
1421 trace_seq_putmem(s, pdu_start(ent, has_cg),
1422 pdu_real_len(ent, has_cg));
f4a1d08c 1423 trace_seq_putc(s, '\n');
18cea459
LZ
1424}
1425
c71a8961
ACM
1426/*
1427 * struct tracer operations
1428 */
1429
1430static void blk_tracer_print_header(struct seq_file *m)
1431{
1432 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1433 return;
1434 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1435 "# | | | | | |\n");
1436}
1437
1438static void blk_tracer_start(struct trace_array *tr)
1439{
ad5dd549 1440 blk_tracer_enabled = true;
c71a8961
ACM
1441}
1442
1443static int blk_tracer_init(struct trace_array *tr)
1444{
1445 blk_tr = tr;
1446 blk_tracer_start(tr);
c71a8961
ACM
1447 return 0;
1448}
1449
1450static void blk_tracer_stop(struct trace_array *tr)
1451{
ad5dd549 1452 blk_tracer_enabled = false;
c71a8961
ACM
1453}
1454
1455static void blk_tracer_reset(struct trace_array *tr)
1456{
c71a8961
ACM
1457 blk_tracer_stop(tr);
1458}
1459
e4955c99 1460static const struct {
c71a8961 1461 const char *act[2];
ca1136c9
SL
1462 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1463 bool has_cg);
e4955c99 1464} what2act[] = {
ef18012b 1465 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
c71a8961
ACM
1466 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1467 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1468 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1469 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1470 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1471 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1472 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1473 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1474 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
49cac01e 1475 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
c71a8961
ACM
1476 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1477 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1478 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1479 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1480};
1481
b6a4b0c3
LZ
1482static enum print_line_t print_one_line(struct trace_iterator *iter,
1483 bool classic)
c71a8961 1484{
983f938a 1485 struct trace_array *tr = iter->tr;
2c9b238e 1486 struct trace_seq *s = &iter->seq;
b6a4b0c3
LZ
1487 const struct blk_io_trace *t;
1488 u16 what;
b6a4b0c3
LZ
1489 bool long_act;
1490 blk_log_action_t *log_action;
ca1136c9 1491 bool has_cg;
c71a8961 1492
b6a4b0c3 1493 t = te_blk_io_trace(iter->ent);
ca1136c9 1494 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
983f938a 1495 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
b6a4b0c3 1496 log_action = classic ? &blk_log_action_classic : &blk_log_action;
ca1136c9 1497 has_cg = t->action & __BLK_TA_CGROUP;
08a06b83 1498
ca1136c9
SL
1499 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1500 log_action(iter, long_act ? "message" : "m", has_cg);
1501 blk_log_msg(s, iter->ent, has_cg);
b7d7641e 1502 return trace_handle_return(s);
18cea459
LZ
1503 }
1504
eb08f8eb 1505 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
f4a1d08c 1506 trace_seq_printf(s, "Unknown action %x\n", what);
c71a8961 1507 else {
ca1136c9
SL
1508 log_action(iter, what2act[what].act[long_act], has_cg);
1509 what2act[what].print(s, iter->ent, has_cg);
c71a8961 1510 }
f4a1d08c
SRRH
1511
1512 return trace_handle_return(s);
c71a8961
ACM
1513}
1514
b6a4b0c3 1515static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
a9a57763 1516 int flags, struct trace_event *event)
b6a4b0c3 1517{
b6a4b0c3
LZ
1518 return print_one_line(iter, false);
1519}
1520
f4a1d08c 1521static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
08a06b83
ACM
1522{
1523 struct trace_seq *s = &iter->seq;
1524 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1525 const int offset = offsetof(struct blk_io_trace, sector);
1526 struct blk_io_trace old = {
1527 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
6c051ce0 1528 .time = iter->ts,
08a06b83
ACM
1529 };
1530
f4a1d08c
SRRH
1531 trace_seq_putmem(s, &old, offset);
1532 trace_seq_putmem(s, &t->sector,
1533 sizeof(old) - offset + t->pdu_len);
08a06b83
ACM
1534}
1535
ae7462b4 1536static enum print_line_t
a9a57763
SR
1537blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1538 struct trace_event *event)
08a06b83 1539{
f4a1d08c
SRRH
1540 blk_trace_synthesize_old_trace(iter);
1541
1542 return trace_handle_return(&iter->seq);
08a06b83
ACM
1543}
1544
c71a8961
ACM
1545static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1546{
f596da3e
YJ
1547 if ((iter->ent->type != TRACE_BLK) ||
1548 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
c71a8961
ACM
1549 return TRACE_TYPE_UNHANDLED;
1550
b6a4b0c3 1551 return print_one_line(iter, true);
c71a8961
ACM
1552}
1553
8c1a49ae
SRRH
1554static int
1555blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
f3948f88
LZ
1556{
1557 /* don't output context-info for blk_classic output */
1558 if (bit == TRACE_BLK_OPT_CLASSIC) {
1559 if (set)
983f938a 1560 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
f3948f88 1561 else
983f938a 1562 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
f3948f88
LZ
1563 }
1564 return 0;
1565}
1566
c71a8961
ACM
1567static struct tracer blk_tracer __read_mostly = {
1568 .name = "blk",
1569 .init = blk_tracer_init,
1570 .reset = blk_tracer_reset,
1571 .start = blk_tracer_start,
1572 .stop = blk_tracer_stop,
1573 .print_header = blk_tracer_print_header,
1574 .print_line = blk_tracer_print_line,
1575 .flags = &blk_tracer_flags,
f3948f88 1576 .set_flag = blk_tracer_set_flag,
c71a8961
ACM
1577};
1578
a9a57763 1579static struct trace_event_functions trace_blk_event_funcs = {
c71a8961 1580 .trace = blk_trace_event_print,
08a06b83 1581 .binary = blk_trace_event_print_binary,
c71a8961
ACM
1582};
1583
a9a57763
SR
1584static struct trace_event trace_blk_event = {
1585 .type = TRACE_BLK,
1586 .funcs = &trace_blk_event_funcs,
1587};
1588
c71a8961
ACM
1589static int __init init_blk_tracer(void)
1590{
9023c930 1591 if (!register_trace_event(&trace_blk_event)) {
a395d6a7 1592 pr_warn("Warning: could not register block events\n");
c71a8961
ACM
1593 return 1;
1594 }
1595
1596 if (register_tracer(&blk_tracer) != 0) {
a395d6a7 1597 pr_warn("Warning: could not register the block tracer\n");
9023c930 1598 unregister_trace_event(&trace_blk_event);
c71a8961
ACM
1599 return 1;
1600 }
1601
1602 return 0;
1603}
1604
1605device_initcall(init_blk_tracer);
1606
1607static int blk_trace_remove_queue(struct request_queue *q)
1608{
1609 struct blk_trace *bt;
1610
c3dbe541 1611 bt = rcu_replace_pointer(q->blk_trace, NULL,
85e0cbbb 1612 lockdep_is_held(&q->debugfs_mutex));
c71a8961
ACM
1613 if (bt == NULL)
1614 return -EINVAL;
1615
60a9bb90 1616 blk_trace_stop(bt);
5afedf67 1617
a6da0024 1618 put_probe_ref();
c780e86d 1619 synchronize_rcu();
30939293 1620 blk_trace_free(q, bt);
c71a8961
ACM
1621 return 0;
1622}
1623
1624/*
1625 * Setup everything required to start tracing
1626 */
9908c309
LZ
1627static int blk_trace_setup_queue(struct request_queue *q,
1628 struct block_device *bdev)
c71a8961 1629{
cdea01b2 1630 struct blk_trace *bt = NULL;
18cea459 1631 int ret = -ENOMEM;
c71a8961 1632
c71a8961
ACM
1633 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1634 if (!bt)
15152e44 1635 return -ENOMEM;
c71a8961 1636
18cea459
LZ
1637 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1638 if (!bt->msg_data)
1639 goto free_bt;
1640
9908c309 1641 bt->dev = bdev->bd_dev;
c71a8961 1642 bt->act_mask = (u16)-1;
9908c309
LZ
1643
1644 blk_trace_setup_lba(bt, bdev);
c71a8961 1645
c3dbe541 1646 rcu_assign_pointer(q->blk_trace, bt);
a6da0024 1647 get_probe_ref();
c71a8961 1648 return 0;
18cea459
LZ
1649
1650free_bt:
30939293 1651 blk_trace_free(q, bt);
18cea459 1652 return ret;
c71a8961
ACM
1653}
1654
1655/*
1656 * sysfs interface to enable and configure tracing
1657 */
1658
c71a8961
ACM
1659static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1660 struct device_attribute *attr,
1661 char *buf);
1662static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1663 struct device_attribute *attr,
1664 const char *buf, size_t count);
1665#define BLK_TRACE_DEVICE_ATTR(_name) \
1666 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1667 sysfs_blk_trace_attr_show, \
1668 sysfs_blk_trace_attr_store)
1669
cd649b8b 1670static BLK_TRACE_DEVICE_ATTR(enable);
c71a8961
ACM
1671static BLK_TRACE_DEVICE_ATTR(act_mask);
1672static BLK_TRACE_DEVICE_ATTR(pid);
1673static BLK_TRACE_DEVICE_ATTR(start_lba);
1674static BLK_TRACE_DEVICE_ATTR(end_lba);
1675
1676static struct attribute *blk_trace_attrs[] = {
1677 &dev_attr_enable.attr,
1678 &dev_attr_act_mask.attr,
1679 &dev_attr_pid.attr,
1680 &dev_attr_start_lba.attr,
1681 &dev_attr_end_lba.attr,
1682 NULL
1683};
1684
1685struct attribute_group blk_trace_attr_group = {
1686 .name = "trace",
1687 .attrs = blk_trace_attrs,
1688};
1689
09341997
LZ
1690static const struct {
1691 int mask;
1692 const char *str;
1693} mask_maps[] = {
1694 { BLK_TC_READ, "read" },
1695 { BLK_TC_WRITE, "write" },
c09c47ca 1696 { BLK_TC_FLUSH, "flush" },
09341997
LZ
1697 { BLK_TC_SYNC, "sync" },
1698 { BLK_TC_QUEUE, "queue" },
1699 { BLK_TC_REQUEUE, "requeue" },
1700 { BLK_TC_ISSUE, "issue" },
1701 { BLK_TC_COMPLETE, "complete" },
1702 { BLK_TC_FS, "fs" },
1703 { BLK_TC_PC, "pc" },
8d1547e0 1704 { BLK_TC_NOTIFY, "notify" },
09341997
LZ
1705 { BLK_TC_AHEAD, "ahead" },
1706 { BLK_TC_META, "meta" },
1707 { BLK_TC_DISCARD, "discard" },
1708 { BLK_TC_DRV_DATA, "drv_data" },
c09c47ca 1709 { BLK_TC_FUA, "fua" },
09341997
LZ
1710};
1711
1712static int blk_trace_str2mask(const char *str)
c71a8961 1713{
09341997 1714 int i;
c71a8961 1715 int mask = 0;
9eb85125 1716 char *buf, *s, *token;
c71a8961 1717
9eb85125
LZ
1718 buf = kstrdup(str, GFP_KERNEL);
1719 if (buf == NULL)
c71a8961 1720 return -ENOMEM;
9eb85125 1721 s = strstrip(buf);
c71a8961
ACM
1722
1723 while (1) {
09341997
LZ
1724 token = strsep(&s, ",");
1725 if (token == NULL)
c71a8961
ACM
1726 break;
1727
09341997
LZ
1728 if (*token == '\0')
1729 continue;
1730
1731 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1732 if (strcasecmp(token, mask_maps[i].str) == 0) {
1733 mask |= mask_maps[i].mask;
1734 break;
1735 }
1736 }
1737 if (i == ARRAY_SIZE(mask_maps)) {
1738 mask = -EINVAL;
1739 break;
1740 }
c71a8961 1741 }
9eb85125 1742 kfree(buf);
c71a8961
ACM
1743
1744 return mask;
1745}
1746
09341997
LZ
1747static ssize_t blk_trace_mask2str(char *buf, int mask)
1748{
1749 int i;
1750 char *p = buf;
1751
1752 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1753 if (mask & mask_maps[i].mask) {
1754 p += sprintf(p, "%s%s",
1755 (p == buf) ? "" : ",", mask_maps[i].str);
1756 }
1757 }
1758 *p++ = '\n';
1759
1760 return p - buf;
1761}
1762
c71a8961
ACM
1763static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1764 struct device_attribute *attr,
1765 char *buf)
1766{
0d02129e
CH
1767 struct block_device *bdev = dev_to_bdev(dev);
1768 struct request_queue *q = bdev_get_queue(bdev);
c780e86d 1769 struct blk_trace *bt;
c71a8961
ACM
1770 ssize_t ret = -ENXIO;
1771
85e0cbbb 1772 mutex_lock(&q->debugfs_mutex);
cd649b8b 1773
c780e86d 1774 bt = rcu_dereference_protected(q->blk_trace,
85e0cbbb 1775 lockdep_is_held(&q->debugfs_mutex));
cd649b8b 1776 if (attr == &dev_attr_enable) {
c780e86d 1777 ret = sprintf(buf, "%u\n", !!bt);
cd649b8b
LZ
1778 goto out_unlock_bdev;
1779 }
1780
c780e86d 1781 if (bt == NULL)
c71a8961
ACM
1782 ret = sprintf(buf, "disabled\n");
1783 else if (attr == &dev_attr_act_mask)
c780e86d 1784 ret = blk_trace_mask2str(buf, bt->act_mask);
c71a8961 1785 else if (attr == &dev_attr_pid)
c780e86d 1786 ret = sprintf(buf, "%u\n", bt->pid);
c71a8961 1787 else if (attr == &dev_attr_start_lba)
c780e86d 1788 ret = sprintf(buf, "%llu\n", bt->start_lba);
c71a8961 1789 else if (attr == &dev_attr_end_lba)
c780e86d 1790 ret = sprintf(buf, "%llu\n", bt->end_lba);
cd649b8b
LZ
1791
1792out_unlock_bdev:
85e0cbbb 1793 mutex_unlock(&q->debugfs_mutex);
c71a8961
ACM
1794 return ret;
1795}
1796
1797static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1798 struct device_attribute *attr,
1799 const char *buf, size_t count)
1800{
0d02129e
CH
1801 struct block_device *bdev = dev_to_bdev(dev);
1802 struct request_queue *q = bdev_get_queue(bdev);
c780e86d 1803 struct blk_trace *bt;
c71a8961 1804 u64 value;
09341997 1805 ssize_t ret = -EINVAL;
c71a8961
ACM
1806
1807 if (count == 0)
1808 goto out;
1809
1810 if (attr == &dev_attr_act_mask) {
5f339453 1811 if (kstrtoull(buf, 0, &value)) {
c71a8961 1812 /* Assume it is a list of trace category names */
09341997
LZ
1813 ret = blk_trace_str2mask(buf);
1814 if (ret < 0)
c71a8961 1815 goto out;
09341997 1816 value = ret;
c71a8961 1817 }
0d02129e
CH
1818 } else {
1819 if (kstrtoull(buf, 0, &value))
1820 goto out;
1821 }
c71a8961 1822
85e0cbbb 1823 mutex_lock(&q->debugfs_mutex);
cd649b8b 1824
c780e86d 1825 bt = rcu_dereference_protected(q->blk_trace,
85e0cbbb 1826 lockdep_is_held(&q->debugfs_mutex));
cd649b8b 1827 if (attr == &dev_attr_enable) {
c780e86d 1828 if (!!value == !!bt) {
757d9140
SRV
1829 ret = 0;
1830 goto out_unlock_bdev;
1831 }
cd649b8b 1832 if (value)
9908c309 1833 ret = blk_trace_setup_queue(q, bdev);
cd649b8b
LZ
1834 else
1835 ret = blk_trace_remove_queue(q);
1836 goto out_unlock_bdev;
1837 }
1838
c71a8961 1839 ret = 0;
153031a3 1840 if (bt == NULL) {
9908c309 1841 ret = blk_trace_setup_queue(q, bdev);
153031a3 1842 bt = rcu_dereference_protected(q->blk_trace,
85e0cbbb 1843 lockdep_is_held(&q->debugfs_mutex));
153031a3 1844 }
c71a8961
ACM
1845
1846 if (ret == 0) {
1847 if (attr == &dev_attr_act_mask)
c780e86d 1848 bt->act_mask = value;
c71a8961 1849 else if (attr == &dev_attr_pid)
c780e86d 1850 bt->pid = value;
c71a8961 1851 else if (attr == &dev_attr_start_lba)
c780e86d 1852 bt->start_lba = value;
c71a8961 1853 else if (attr == &dev_attr_end_lba)
c780e86d 1854 bt->end_lba = value;
c71a8961 1855 }
cd649b8b
LZ
1856
1857out_unlock_bdev:
85e0cbbb 1858 mutex_unlock(&q->debugfs_mutex);
c71a8961 1859out:
cd649b8b 1860 return ret ? ret : count;
c71a8961 1861}
55782138
LZ
1862#endif /* CONFIG_BLK_DEV_IO_TRACE */
1863
1864#ifdef CONFIG_EVENT_TRACING
1865
1f83bb4b
CK
1866/**
1867 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
94d4bffd 1868 * @rwbs: buffer to be filled
020e3618 1869 * @opf: request operation type (REQ_OP_XXX) and flags for the tracepoint
1f83bb4b
CK
1870 *
1871 * Description:
020e3618
BVA
1872 * Maps each request operation and flag to a single character and fills the
1873 * buffer provided by the caller with resulting string.
1f83bb4b
CK
1874 *
1875 **/
919dbca8 1876void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
55782138
LZ
1877{
1878 int i = 0;
1879
919dbca8 1880 if (opf & REQ_PREFLUSH)
c09c47ca
NK
1881 rwbs[i++] = 'F';
1882
919dbca8 1883 switch (opf & REQ_OP_MASK) {
1b9a9ab7 1884 case REQ_OP_WRITE:
55782138 1885 rwbs[i++] = 'W';
1b9a9ab7
MC
1886 break;
1887 case REQ_OP_DISCARD:
55782138 1888 rwbs[i++] = 'D';
1b9a9ab7 1889 break;
288dab8a
CH
1890 case REQ_OP_SECURE_ERASE:
1891 rwbs[i++] = 'D';
1892 rwbs[i++] = 'E';
1893 break;
3a5e02ce
MC
1894 case REQ_OP_FLUSH:
1895 rwbs[i++] = 'F';
1896 break;
1b9a9ab7 1897 case REQ_OP_READ:
55782138 1898 rwbs[i++] = 'R';
1b9a9ab7
MC
1899 break;
1900 default:
55782138 1901 rwbs[i++] = 'N';
1b9a9ab7 1902 }
55782138 1903
919dbca8 1904 if (opf & REQ_FUA)
c09c47ca 1905 rwbs[i++] = 'F';
919dbca8 1906 if (opf & REQ_RAHEAD)
55782138 1907 rwbs[i++] = 'A';
919dbca8 1908 if (opf & REQ_SYNC)
55782138 1909 rwbs[i++] = 'S';
919dbca8 1910 if (opf & REQ_META)
55782138
LZ
1911 rwbs[i++] = 'M';
1912
1913 rwbs[i] = '\0';
1914}
9ca8f8e5 1915EXPORT_SYMBOL_GPL(blk_fill_rwbs);
55782138 1916
55782138
LZ
1917#endif /* CONFIG_EVENT_TRACING */
1918