Revert "bcache: ignore pending signals when creating gc and allocator thread"
[linux-block.git] / kernel / trace / blktrace.c
CommitLineData
91c1e6ba 1// SPDX-License-Identifier: GPL-2.0
2056a782 2/*
0fe23479 3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
2056a782 4 *
2056a782 5 */
2056a782
JA
6#include <linux/kernel.h>
7#include <linux/blkdev.h>
8#include <linux/blktrace_api.h>
9#include <linux/percpu.h>
10#include <linux/init.h>
11#include <linux/mutex.h>
5a0e3ad6 12#include <linux/slab.h>
2056a782 13#include <linux/debugfs.h>
6e5fdeed 14#include <linux/export.h>
be1c6341 15#include <linux/time.h>
939b3669 16#include <linux/uaccess.h>
a404d557 17#include <linux/list.h>
ca1136c9 18#include <linux/blk-cgroup.h>
55782138 19
18fbda91
OS
20#include "../../block/blk.h"
21
55782138
LZ
22#include <trace/events/block.h>
23
2db270a8 24#include "trace_output.h"
2056a782 25
55782138
LZ
26#ifdef CONFIG_BLK_DEV_IO_TRACE
27
2056a782
JA
28static unsigned int blktrace_seq __read_mostly = 1;
29
c71a8961 30static struct trace_array *blk_tr;
5006ea73 31static bool blk_tracer_enabled __read_mostly;
c71a8961 32
a404d557
JK
33static LIST_HEAD(running_trace_list);
34static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
35
c71a8961 36/* Select an alternative, minimalistic output than the original one */
ef18012b 37#define TRACE_BLK_OPT_CLASSIC 0x1
ca1136c9 38#define TRACE_BLK_OPT_CGROUP 0x2
69fd5c39 39#define TRACE_BLK_OPT_CGNAME 0x4
c71a8961
ACM
40
41static struct tracer_opt blk_tracer_opts[] = {
42 /* Default disable the minimalistic output */
157f9c00 43 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
ca1136c9
SL
44#ifdef CONFIG_BLK_CGROUP
45 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
69fd5c39 46 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
ca1136c9 47#endif
c71a8961
ACM
48 { }
49};
50
51static struct tracer_flags blk_tracer_flags = {
52 .val = 0,
53 .opts = blk_tracer_opts,
54};
55
5f3ea37c 56/* Global reference count of probes */
a6da0024
JA
57static DEFINE_MUTEX(blk_probe_mutex);
58static int blk_probes_ref;
5f3ea37c 59
3c289ba7 60static void blk_register_tracepoints(void);
5f3ea37c
ACM
61static void blk_unregister_tracepoints(void);
62
be1c6341
OK
63/*
64 * Send out a notify message.
65 */
a863055b 66static void trace_note(struct blk_trace *bt, pid_t pid, int action,
67c0496e 67 const void *data, size_t len, u64 cgid)
be1c6341
OK
68{
69 struct blk_io_trace *t;
18cea459 70 struct ring_buffer_event *event = NULL;
13292494 71 struct trace_buffer *buffer = NULL;
18cea459
LZ
72 int pc = 0;
73 int cpu = smp_processor_id();
74 bool blk_tracer = blk_tracer_enabled;
67c0496e 75 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
18cea459
LZ
76
77 if (blk_tracer) {
1c5eb448 78 buffer = blk_tr->array_buffer.buffer;
18cea459 79 pc = preempt_count();
e77405ad 80 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
ca1136c9 81 sizeof(*t) + len + cgid_len,
18cea459
LZ
82 0, pc);
83 if (!event)
84 return;
85 t = ring_buffer_event_data(event);
86 goto record_it;
87 }
be1c6341 88
c71a8961
ACM
89 if (!bt->rchan)
90 return;
91
ca1136c9 92 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
d3d9d2a5 93 if (t) {
d3d9d2a5 94 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
2997c8c4 95 t->time = ktime_to_ns(ktime_get());
18cea459 96record_it:
d3d9d2a5 97 t->device = bt->dev;
ca1136c9 98 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
d3d9d2a5
JA
99 t->pid = pid;
100 t->cpu = cpu;
ca1136c9 101 t->pdu_len = len + cgid_len;
67c0496e
TH
102 if (cgid_len)
103 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
ca1136c9 104 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
18cea459
LZ
105
106 if (blk_tracer)
b7f0c959 107 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
d3d9d2a5 108 }
be1c6341
OK
109}
110
2056a782
JA
111/*
112 * Send out a notify for this process, if we haven't done so since a trace
113 * started
114 */
a404d557 115static void trace_note_tsk(struct task_struct *tsk)
2056a782 116{
a404d557
JK
117 unsigned long flags;
118 struct blk_trace *bt;
119
a863055b 120 tsk->btrace_seq = blktrace_seq;
a404d557
JK
121 spin_lock_irqsave(&running_trace_lock, flags);
122 list_for_each_entry(bt, &running_trace_list, running_list) {
123 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
67c0496e 124 sizeof(tsk->comm), 0);
a404d557
JK
125 }
126 spin_unlock_irqrestore(&running_trace_lock, flags);
be1c6341 127}
2056a782 128
be1c6341
OK
129static void trace_note_time(struct blk_trace *bt)
130{
59a37f8b 131 struct timespec64 now;
be1c6341
OK
132 unsigned long flags;
133 u32 words[2];
134
59a37f8b
AB
135 /* need to check user space to see if this breaks in y2038 or y2106 */
136 ktime_get_real_ts64(&now);
137 words[0] = (u32)now.tv_sec;
be1c6341
OK
138 words[1] = now.tv_nsec;
139
140 local_irq_save(flags);
67c0496e 141 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
be1c6341 142 local_irq_restore(flags);
2056a782
JA
143}
144
35fe6d76
SL
145void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
146 const char *fmt, ...)
9d5f09a4
AB
147{
148 int n;
149 va_list args;
14a73f54 150 unsigned long flags;
64565911 151 char *buf;
9d5f09a4 152
18cea459
LZ
153 if (unlikely(bt->trace_state != Blktrace_running &&
154 !blk_tracer_enabled))
c71a8961
ACM
155 return;
156
490da40d
TM
157 /*
158 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
159 * message to the trace.
160 */
161 if (!(bt->act_mask & BLK_TC_NOTIFY))
162 return;
163
14a73f54 164 local_irq_save(flags);
d8a0349c 165 buf = this_cpu_ptr(bt->msg_data);
9d5f09a4 166 va_start(args, fmt);
64565911 167 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
9d5f09a4
AB
168 va_end(args);
169
35fe6d76
SL
170 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
171 blkcg = NULL;
172#ifdef CONFIG_BLK_CGROUP
173 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
74321038 174 blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
35fe6d76 175#else
67c0496e 176 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, 0);
35fe6d76 177#endif
14a73f54 178 local_irq_restore(flags);
9d5f09a4
AB
179}
180EXPORT_SYMBOL_GPL(__trace_note_message);
181
2056a782
JA
182static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
183 pid_t pid)
184{
185 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
186 return 1;
d0deef5b 187 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
2056a782
JA
188 return 1;
189 if (bt->pid && pid != bt->pid)
190 return 1;
191
192 return 0;
193}
194
195/*
196 * Data direction bit lookup
197 */
e4955c99
LZ
198static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
199 BLK_TC_ACT(BLK_TC_WRITE) };
2056a782 200
7b6d91da 201#define BLK_TC_RAHEAD BLK_TC_AHEAD
28a8f0d3 202#define BLK_TC_PREFLUSH BLK_TC_FLUSH
7b6d91da 203
35ba8f70 204/* The ilog2() calls fall out because they're constant */
7b6d91da
CH
205#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
206 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
2056a782
JA
207
208/*
209 * The worker for the various blk_add_trace*() types. Fills out a
210 * blk_io_trace structure and places it in a per-cpu subbuffer.
211 */
5f3ea37c 212static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
1b9a9ab7 213 int op, int op_flags, u32 what, int error, int pdu_len,
67c0496e 214 void *pdu_data, u64 cgid)
2056a782
JA
215{
216 struct task_struct *tsk = current;
c71a8961 217 struct ring_buffer_event *event = NULL;
13292494 218 struct trace_buffer *buffer = NULL;
2056a782 219 struct blk_io_trace *t;
0a987751 220 unsigned long flags = 0;
2056a782
JA
221 unsigned long *sequence;
222 pid_t pid;
c71a8961 223 int cpu, pc = 0;
18cea459 224 bool blk_tracer = blk_tracer_enabled;
67c0496e 225 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
2056a782 226
18cea459 227 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
2056a782
JA
228 return;
229
1b9a9ab7
MC
230 what |= ddir_act[op_is_write(op) ? WRITE : READ];
231 what |= MASK_TC_BIT(op_flags, SYNC);
232 what |= MASK_TC_BIT(op_flags, RAHEAD);
233 what |= MASK_TC_BIT(op_flags, META);
28a8f0d3 234 what |= MASK_TC_BIT(op_flags, PREFLUSH);
1b9a9ab7 235 what |= MASK_TC_BIT(op_flags, FUA);
7afafc8a 236 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
1b9a9ab7 237 what |= BLK_TC_ACT(BLK_TC_DISCARD);
3a5e02ce
MC
238 if (op == REQ_OP_FLUSH)
239 what |= BLK_TC_ACT(BLK_TC_FLUSH);
ca1136c9
SL
240 if (cgid)
241 what |= __BLK_TA_CGROUP;
2056a782
JA
242
243 pid = tsk->pid;
d0deef5b 244 if (act_log_check(bt, what, sector, pid))
2056a782 245 return;
c71a8961
ACM
246 cpu = raw_smp_processor_id();
247
18cea459 248 if (blk_tracer) {
c71a8961
ACM
249 tracing_record_cmdline(current);
250
1c5eb448 251 buffer = blk_tr->array_buffer.buffer;
51a763dd 252 pc = preempt_count();
e77405ad 253 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
ca1136c9 254 sizeof(*t) + pdu_len + cgid_len,
51a763dd 255 0, pc);
c71a8961
ACM
256 if (!event)
257 return;
51a763dd 258 t = ring_buffer_event_data(event);
c71a8961
ACM
259 goto record_it;
260 }
2056a782 261
a404d557
JK
262 if (unlikely(tsk->btrace_seq != blktrace_seq))
263 trace_note_tsk(tsk);
264
2056a782
JA
265 /*
266 * A word about the locking here - we disable interrupts to reserve
267 * some space in the relay per-cpu buffer, to prevent an irq
14a73f54 268 * from coming in and stepping on our toes.
2056a782
JA
269 */
270 local_irq_save(flags);
ca1136c9 271 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
2056a782 272 if (t) {
2056a782
JA
273 sequence = per_cpu_ptr(bt->sequence, cpu);
274
275 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
276 t->sequence = ++(*sequence);
2997c8c4 277 t->time = ktime_to_ns(ktime_get());
c71a8961 278record_it:
08a06b83 279 /*
939b3669
ACM
280 * These two are not needed in ftrace as they are in the
281 * generic trace_entry, filled by tracing_generic_entry_update,
282 * but for the trace_event->bin() synthesizer benefit we do it
283 * here too.
284 */
285 t->cpu = cpu;
286 t->pid = pid;
08a06b83 287
2056a782
JA
288 t->sector = sector;
289 t->bytes = bytes;
290 t->action = what;
2056a782 291 t->device = bt->dev;
2056a782 292 t->error = error;
ca1136c9 293 t->pdu_len = pdu_len + cgid_len;
2056a782 294
ca1136c9 295 if (cgid_len)
67c0496e 296 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
2056a782 297 if (pdu_len)
ca1136c9 298 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
c71a8961 299
18cea459 300 if (blk_tracer) {
b7f0c959 301 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
c71a8961
ACM
302 return;
303 }
2056a782
JA
304 }
305
306 local_irq_restore(flags);
307}
308
ad5dd549 309static void blk_trace_free(struct blk_trace *bt)
2056a782 310{
02c62304 311 debugfs_remove(bt->msg_file);
2056a782 312 debugfs_remove(bt->dropped_file);
f48fc4d3 313 relay_close(bt->rchan);
39cbb602 314 debugfs_remove(bt->dir);
2056a782 315 free_percpu(bt->sequence);
64565911 316 free_percpu(bt->msg_data);
2056a782 317 kfree(bt);
ad5dd549
LZ
318}
319
a6da0024
JA
320static void get_probe_ref(void)
321{
322 mutex_lock(&blk_probe_mutex);
323 if (++blk_probes_ref == 1)
324 blk_register_tracepoints();
325 mutex_unlock(&blk_probe_mutex);
326}
327
328static void put_probe_ref(void)
329{
330 mutex_lock(&blk_probe_mutex);
331 if (!--blk_probes_ref)
332 blk_unregister_tracepoints();
333 mutex_unlock(&blk_probe_mutex);
334}
335
ad5dd549
LZ
336static void blk_trace_cleanup(struct blk_trace *bt)
337{
c780e86d 338 synchronize_rcu();
ad5dd549 339 blk_trace_free(bt);
a6da0024 340 put_probe_ref();
2056a782
JA
341}
342
1f2cac10 343static int __blk_trace_remove(struct request_queue *q)
2056a782
JA
344{
345 struct blk_trace *bt;
346
347 bt = xchg(&q->blk_trace, NULL);
348 if (!bt)
349 return -EINVAL;
350
55547204 351 if (bt->trace_state != Blktrace_running)
2056a782
JA
352 blk_trace_cleanup(bt);
353
354 return 0;
355}
1f2cac10
JA
356
357int blk_trace_remove(struct request_queue *q)
358{
359 int ret;
360
361 mutex_lock(&q->blk_trace_mutex);
362 ret = __blk_trace_remove(q);
363 mutex_unlock(&q->blk_trace_mutex);
364
365 return ret;
366}
6da127ad 367EXPORT_SYMBOL_GPL(blk_trace_remove);
2056a782 368
2056a782
JA
369static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
370 size_t count, loff_t *ppos)
371{
372 struct blk_trace *bt = filp->private_data;
373 char buf[16];
374
375 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
376
377 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
378}
379
2b8693c0 380static const struct file_operations blk_dropped_fops = {
2056a782 381 .owner = THIS_MODULE,
234e3405 382 .open = simple_open,
2056a782 383 .read = blk_dropped_read,
6038f373 384 .llseek = default_llseek,
2056a782
JA
385};
386
02c62304
AB
387static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
388 size_t count, loff_t *ppos)
389{
390 char *msg;
391 struct blk_trace *bt;
392
7635b03a 393 if (count >= BLK_TN_MAX_MSG)
02c62304
AB
394 return -EINVAL;
395
16e5c1fc
AV
396 msg = memdup_user_nul(buffer, count);
397 if (IS_ERR(msg))
398 return PTR_ERR(msg);
02c62304 399
02c62304 400 bt = filp->private_data;
35fe6d76 401 __trace_note_message(bt, NULL, "%s", msg);
02c62304
AB
402 kfree(msg);
403
404 return count;
405}
406
407static const struct file_operations blk_msg_fops = {
408 .owner = THIS_MODULE,
234e3405 409 .open = simple_open,
02c62304 410 .write = blk_msg_write,
6038f373 411 .llseek = noop_llseek,
02c62304
AB
412};
413
2056a782
JA
414/*
415 * Keep track of how many times we encountered a full subbuffer, to aid
416 * the user space app in telling how many lost events there were.
417 */
418static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
419 void *prev_subbuf, size_t prev_padding)
420{
421 struct blk_trace *bt;
422
423 if (!relay_buf_full(buf))
424 return 1;
425
426 bt = buf->chan->private_data;
427 atomic_inc(&bt->dropped);
428 return 0;
429}
430
431static int blk_remove_buf_file_callback(struct dentry *dentry)
432{
433 debugfs_remove(dentry);
f48fc4d3 434
2056a782
JA
435 return 0;
436}
437
438static struct dentry *blk_create_buf_file_callback(const char *filename,
439 struct dentry *parent,
f4ae40a6 440 umode_t mode,
2056a782
JA
441 struct rchan_buf *buf,
442 int *is_global)
443{
444 return debugfs_create_file(filename, mode, parent, buf,
445 &relay_file_operations);
446}
447
448static struct rchan_callbacks blk_relay_callbacks = {
449 .subbuf_start = blk_subbuf_start_callback,
450 .create_buf_file = blk_create_buf_file_callback,
451 .remove_buf_file = blk_remove_buf_file_callback,
452};
453
9908c309
LZ
454static void blk_trace_setup_lba(struct blk_trace *bt,
455 struct block_device *bdev)
456{
457 struct hd_struct *part = NULL;
458
459 if (bdev)
460 part = bdev->bd_part;
461
462 if (part) {
463 bt->start_lba = part->start_sect;
464 bt->end_lba = part->start_sect + part->nr_sects;
465 } else {
466 bt->start_lba = 0;
467 bt->end_lba = -1ULL;
468 }
469}
470
2056a782
JA
471/*
472 * Setup everything required to start tracing
473 */
a428d314
OS
474static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
475 struct block_device *bdev,
476 struct blk_user_trace_setup *buts)
2056a782 477{
cdea01b2 478 struct blk_trace *bt = NULL;
2056a782 479 struct dentry *dir = NULL;
ff14417c 480 int ret;
2056a782 481
171044d4 482 if (!buts->buf_size || !buts->buf_nr)
2056a782
JA
483 return -EINVAL;
484
e1a41324
LB
485 if (!blk_debugfs_root)
486 return -ENOENT;
487
0497b345
JA
488 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
489 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
2056a782
JA
490
491 /*
492 * some device names have larger paths - convert the slashes
493 * to underscores for this to work as expected
494 */
ff14417c 495 strreplace(buts->name, '/', '_');
2056a782 496
2056a782
JA
497 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
498 if (!bt)
ad5dd549 499 return -ENOMEM;
2056a782 500
ad5dd549 501 ret = -ENOMEM;
2056a782
JA
502 bt->sequence = alloc_percpu(unsigned long);
503 if (!bt->sequence)
504 goto err;
505
313e458f 506 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
64565911
JA
507 if (!bt->msg_data)
508 goto err;
509
2056a782 510 ret = -ENOENT;
f48fc4d3 511
6ac93117
OS
512 dir = debugfs_lookup(buts->name, blk_debugfs_root);
513 if (!dir)
514 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
2056a782 515
6da127ad 516 bt->dev = dev;
2056a782 517 atomic_set(&bt->dropped, 0);
a404d557 518 INIT_LIST_HEAD(&bt->running_list);
2056a782
JA
519
520 ret = -EIO;
939b3669
ACM
521 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
522 &blk_dropped_fops);
2056a782 523
02c62304 524 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
02c62304 525
171044d4
AB
526 bt->rchan = relay_open("trace", dir, buts->buf_size,
527 buts->buf_nr, &blk_relay_callbacks, bt);
2056a782
JA
528 if (!bt->rchan)
529 goto err;
2056a782 530
171044d4 531 bt->act_mask = buts->act_mask;
2056a782
JA
532 if (!bt->act_mask)
533 bt->act_mask = (u16) -1;
534
9908c309 535 blk_trace_setup_lba(bt, bdev);
2056a782 536
d0deef5b
SD
537 /* overwrite with user settings */
538 if (buts->start_lba)
539 bt->start_lba = buts->start_lba;
540 if (buts->end_lba)
541 bt->end_lba = buts->end_lba;
542
171044d4 543 bt->pid = buts->pid;
2056a782
JA
544 bt->trace_state = Blktrace_setup;
545
546 ret = -EBUSY;
cdea01b2 547 if (cmpxchg(&q->blk_trace, NULL, bt))
2056a782 548 goto err;
2056a782 549
a6da0024 550 get_probe_ref();
cbe28296 551
6ac93117 552 ret = 0;
2056a782 553err:
6ac93117
OS
554 if (dir && !bt->dir)
555 dput(dir);
556 if (ret)
557 blk_trace_free(bt);
2056a782
JA
558 return ret;
559}
171044d4 560
1f2cac10
JA
561static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
562 struct block_device *bdev, char __user *arg)
171044d4
AB
563{
564 struct blk_user_trace_setup buts;
565 int ret;
566
567 ret = copy_from_user(&buts, arg, sizeof(buts));
568 if (ret)
569 return -EFAULT;
570
d0deef5b 571 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
171044d4
AB
572 if (ret)
573 return ret;
574
9a8c28c8 575 if (copy_to_user(arg, &buts, sizeof(buts))) {
2967acbb 576 __blk_trace_remove(q);
171044d4 577 return -EFAULT;
9a8c28c8 578 }
171044d4
AB
579 return 0;
580}
1f2cac10
JA
581
582int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
583 struct block_device *bdev,
584 char __user *arg)
585{
586 int ret;
587
588 mutex_lock(&q->blk_trace_mutex);
589 ret = __blk_trace_setup(q, name, dev, bdev, arg);
590 mutex_unlock(&q->blk_trace_mutex);
591
592 return ret;
593}
6da127ad 594EXPORT_SYMBOL_GPL(blk_trace_setup);
2056a782 595
62c2a7d9
AB
596#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
597static int compat_blk_trace_setup(struct request_queue *q, char *name,
598 dev_t dev, struct block_device *bdev,
599 char __user *arg)
600{
601 struct blk_user_trace_setup buts;
602 struct compat_blk_user_trace_setup cbuts;
603 int ret;
604
605 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
606 return -EFAULT;
607
608 buts = (struct blk_user_trace_setup) {
609 .act_mask = cbuts.act_mask,
610 .buf_size = cbuts.buf_size,
611 .buf_nr = cbuts.buf_nr,
612 .start_lba = cbuts.start_lba,
613 .end_lba = cbuts.end_lba,
614 .pid = cbuts.pid,
615 };
62c2a7d9
AB
616
617 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
618 if (ret)
619 return ret;
620
f8c5e944 621 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
2967acbb 622 __blk_trace_remove(q);
62c2a7d9
AB
623 return -EFAULT;
624 }
625
626 return 0;
627}
628#endif
629
1f2cac10 630static int __blk_trace_startstop(struct request_queue *q, int start)
2056a782 631{
2056a782 632 int ret;
c780e86d 633 struct blk_trace *bt;
2056a782 634
c780e86d
JK
635 bt = rcu_dereference_protected(q->blk_trace,
636 lockdep_is_held(&q->blk_trace_mutex));
939b3669 637 if (bt == NULL)
2056a782
JA
638 return -EINVAL;
639
640 /*
641 * For starting a trace, we can transition from a setup or stopped
642 * trace. For stopping a trace, the state must be running
643 */
644 ret = -EINVAL;
645 if (start) {
646 if (bt->trace_state == Blktrace_setup ||
647 bt->trace_state == Blktrace_stopped) {
648 blktrace_seq++;
649 smp_mb();
650 bt->trace_state = Blktrace_running;
a404d557
JK
651 spin_lock_irq(&running_trace_lock);
652 list_add(&bt->running_list, &running_trace_list);
653 spin_unlock_irq(&running_trace_lock);
be1c6341
OK
654
655 trace_note_time(bt);
2056a782
JA
656 ret = 0;
657 }
658 } else {
659 if (bt->trace_state == Blktrace_running) {
660 bt->trace_state = Blktrace_stopped;
a404d557
JK
661 spin_lock_irq(&running_trace_lock);
662 list_del_init(&bt->running_list);
663 spin_unlock_irq(&running_trace_lock);
2056a782
JA
664 relay_flush(bt->rchan);
665 ret = 0;
666 }
667 }
668
669 return ret;
670}
1f2cac10
JA
671
672int blk_trace_startstop(struct request_queue *q, int start)
673{
674 int ret;
675
676 mutex_lock(&q->blk_trace_mutex);
677 ret = __blk_trace_startstop(q, start);
678 mutex_unlock(&q->blk_trace_mutex);
679
680 return ret;
681}
6da127ad 682EXPORT_SYMBOL_GPL(blk_trace_startstop);
2056a782 683
5acb3cc2
WL
684/*
685 * When reading or writing the blktrace sysfs files, the references to the
686 * opened sysfs or device files should prevent the underlying block device
687 * from being removed. So no further delete protection is really needed.
688 */
689
2056a782
JA
690/**
691 * blk_trace_ioctl: - handle the ioctls associated with tracing
692 * @bdev: the block device
ef18012b 693 * @cmd: the ioctl cmd
2056a782
JA
694 * @arg: the argument data, if any
695 *
696 **/
697int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
698{
165125e1 699 struct request_queue *q;
2056a782 700 int ret, start = 0;
6da127ad 701 char b[BDEVNAME_SIZE];
2056a782
JA
702
703 q = bdev_get_queue(bdev);
704 if (!q)
705 return -ENXIO;
706
5acb3cc2 707 mutex_lock(&q->blk_trace_mutex);
2056a782
JA
708
709 switch (cmd) {
710 case BLKTRACESETUP:
f36f21ec 711 bdevname(bdev, b);
1f2cac10 712 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
2056a782 713 break;
62c2a7d9
AB
714#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
715 case BLKTRACESETUP32:
716 bdevname(bdev, b);
717 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
718 break;
719#endif
2056a782
JA
720 case BLKTRACESTART:
721 start = 1;
f6d85f04 722 /* fall through */
2056a782 723 case BLKTRACESTOP:
1f2cac10 724 ret = __blk_trace_startstop(q, start);
2056a782
JA
725 break;
726 case BLKTRACETEARDOWN:
1f2cac10 727 ret = __blk_trace_remove(q);
2056a782
JA
728 break;
729 default:
730 ret = -ENOTTY;
731 break;
732 }
733
5acb3cc2 734 mutex_unlock(&q->blk_trace_mutex);
2056a782
JA
735 return ret;
736}
737
738/**
739 * blk_trace_shutdown: - stop and cleanup trace structures
740 * @q: the request queue associated with the device
741 *
742 **/
165125e1 743void blk_trace_shutdown(struct request_queue *q)
2056a782 744{
1f2cac10 745 mutex_lock(&q->blk_trace_mutex);
c780e86d
JK
746 if (rcu_dereference_protected(q->blk_trace,
747 lockdep_is_held(&q->blk_trace_mutex))) {
1f2cac10
JA
748 __blk_trace_startstop(q, 0);
749 __blk_trace_remove(q);
6c5c9341 750 }
1f2cac10
JA
751
752 mutex_unlock(&q->blk_trace_mutex);
2056a782 753}
5f3ea37c 754
ca1136c9 755#ifdef CONFIG_BLK_CGROUP
67c0496e 756static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
ca1136c9 757{
c780e86d 758 struct blk_trace *bt;
ca1136c9 759
c780e86d
JK
760 /* We don't use the 'bt' value here except as an optimization... */
761 bt = rcu_dereference_protected(q->blk_trace, 1);
ca1136c9 762 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
67c0496e 763 return 0;
ca1136c9 764
db6638d7 765 if (!bio->bi_blkg)
67c0496e 766 return 0;
74321038 767 return cgroup_id(bio_blkcg(bio)->css.cgroup);
ca1136c9
SL
768}
769#else
67c0496e 770u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
ca1136c9 771{
67c0496e 772 return 0;
ca1136c9
SL
773}
774#endif
775
67c0496e 776static u64
ca1136c9
SL
777blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
778{
779 if (!rq->bio)
67c0496e 780 return 0;
ca1136c9
SL
781 /* Use the first bio */
782 return blk_trace_bio_get_cgid(q, rq->bio);
783}
784
5f3ea37c
ACM
785/*
786 * blktrace probes
787 */
788
789/**
790 * blk_add_trace_rq - Add a trace for a request oriented action
5f3ea37c 791 * @rq: the source request
caf7df12 792 * @error: return status to log
af5040da 793 * @nr_bytes: number of completed bytes
5f3ea37c 794 * @what: the action
ca1136c9 795 * @cgid: the cgroup info
5f3ea37c
ACM
796 *
797 * Description:
798 * Records an action against a request. Will log the bio offset + size.
799 *
800 **/
caf7df12 801static void blk_add_trace_rq(struct request *rq, int error,
67c0496e 802 unsigned int nr_bytes, u32 what, u64 cgid)
5f3ea37c 803{
c780e86d 804 struct blk_trace *bt;
5f3ea37c 805
c780e86d
JK
806 rcu_read_lock();
807 bt = rcu_dereference(rq->q->blk_trace);
808 if (likely(!bt)) {
809 rcu_read_unlock();
5f3ea37c 810 return;
c780e86d 811 }
5f3ea37c 812
57292b58 813 if (blk_rq_is_passthrough(rq))
5f3ea37c 814 what |= BLK_TC_ACT(BLK_TC_PC);
48b77ad6 815 else
5f3ea37c 816 what |= BLK_TC_ACT(BLK_TC_FS);
48b77ad6
CH
817
818 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
ca1136c9 819 rq->cmd_flags, what, error, 0, NULL, cgid);
c780e86d 820 rcu_read_unlock();
5f3ea37c
ACM
821}
822
38516ab5
SR
823static void blk_add_trace_rq_insert(void *ignore,
824 struct request_queue *q, struct request *rq)
5f3ea37c 825{
ca1136c9
SL
826 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
827 blk_trace_request_get_cgid(q, rq));
5f3ea37c
ACM
828}
829
38516ab5
SR
830static void blk_add_trace_rq_issue(void *ignore,
831 struct request_queue *q, struct request *rq)
5f3ea37c 832{
ca1136c9
SL
833 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
834 blk_trace_request_get_cgid(q, rq));
5f3ea37c
ACM
835}
836
38516ab5
SR
837static void blk_add_trace_rq_requeue(void *ignore,
838 struct request_queue *q,
939b3669 839 struct request *rq)
5f3ea37c 840{
ca1136c9
SL
841 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
842 blk_trace_request_get_cgid(q, rq));
5f3ea37c
ACM
843}
844
caf7df12
CH
845static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
846 int error, unsigned int nr_bytes)
5f3ea37c 847{
ca1136c9
SL
848 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
849 blk_trace_request_get_cgid(rq->q, rq));
5f3ea37c
ACM
850}
851
852/**
853 * blk_add_trace_bio - Add a trace for a bio oriented action
854 * @q: queue the io is for
855 * @bio: the source bio
856 * @what: the action
797a455d 857 * @error: error, if any
5f3ea37c
ACM
858 *
859 * Description:
860 * Records an action against a bio. Will log the bio offset + size.
861 *
862 **/
863static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
1690102d 864 u32 what, int error)
5f3ea37c 865{
c780e86d 866 struct blk_trace *bt;
5f3ea37c 867
c780e86d
JK
868 rcu_read_lock();
869 bt = rcu_dereference(q->blk_trace);
870 if (likely(!bt)) {
871 rcu_read_unlock();
5f3ea37c 872 return;
c780e86d 873 }
5f3ea37c 874
4f024f37 875 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1690102d
MPS
876 bio_op(bio), bio->bi_opf, what, error, 0, NULL,
877 blk_trace_bio_get_cgid(q, bio));
c780e86d 878 rcu_read_unlock();
5f3ea37c
ACM
879}
880
38516ab5
SR
881static void blk_add_trace_bio_bounce(void *ignore,
882 struct request_queue *q, struct bio *bio)
5f3ea37c 883{
1690102d 884 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
5f3ea37c
ACM
885}
886
0a82a8d1
LT
887static void blk_add_trace_bio_complete(void *ignore,
888 struct request_queue *q, struct bio *bio,
889 int error)
5f3ea37c 890{
1690102d 891 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
5f3ea37c
ACM
892}
893
38516ab5
SR
894static void blk_add_trace_bio_backmerge(void *ignore,
895 struct request_queue *q,
8c1cf6bb 896 struct request *rq,
939b3669 897 struct bio *bio)
5f3ea37c 898{
1690102d 899 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
5f3ea37c
ACM
900}
901
38516ab5
SR
902static void blk_add_trace_bio_frontmerge(void *ignore,
903 struct request_queue *q,
8c1cf6bb 904 struct request *rq,
939b3669 905 struct bio *bio)
5f3ea37c 906{
1690102d 907 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
5f3ea37c
ACM
908}
909
38516ab5
SR
910static void blk_add_trace_bio_queue(void *ignore,
911 struct request_queue *q, struct bio *bio)
5f3ea37c 912{
1690102d 913 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
5f3ea37c
ACM
914}
915
38516ab5
SR
916static void blk_add_trace_getrq(void *ignore,
917 struct request_queue *q,
939b3669 918 struct bio *bio, int rw)
5f3ea37c
ACM
919{
920 if (bio)
1690102d 921 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
5f3ea37c 922 else {
c780e86d 923 struct blk_trace *bt;
5f3ea37c 924
c780e86d
JK
925 rcu_read_lock();
926 bt = rcu_dereference(q->blk_trace);
5f3ea37c 927 if (bt)
1b9a9ab7 928 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
67c0496e 929 NULL, 0);
c780e86d 930 rcu_read_unlock();
5f3ea37c
ACM
931 }
932}
933
934
38516ab5
SR
935static void blk_add_trace_sleeprq(void *ignore,
936 struct request_queue *q,
939b3669 937 struct bio *bio, int rw)
5f3ea37c
ACM
938{
939 if (bio)
1690102d 940 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
5f3ea37c 941 else {
c780e86d 942 struct blk_trace *bt;
5f3ea37c 943
c780e86d
JK
944 rcu_read_lock();
945 bt = rcu_dereference(q->blk_trace);
5f3ea37c 946 if (bt)
1b9a9ab7 947 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
67c0496e 948 0, 0, NULL, 0);
c780e86d 949 rcu_read_unlock();
5f3ea37c
ACM
950 }
951}
952
38516ab5 953static void blk_add_trace_plug(void *ignore, struct request_queue *q)
5f3ea37c 954{
c780e86d 955 struct blk_trace *bt;
5f3ea37c 956
c780e86d
JK
957 rcu_read_lock();
958 bt = rcu_dereference(q->blk_trace);
5f3ea37c 959 if (bt)
67c0496e 960 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
c780e86d 961 rcu_read_unlock();
5f3ea37c
ACM
962}
963
49cac01e
JA
964static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
965 unsigned int depth, bool explicit)
5f3ea37c 966{
c780e86d 967 struct blk_trace *bt;
5f3ea37c 968
c780e86d
JK
969 rcu_read_lock();
970 bt = rcu_dereference(q->blk_trace);
5f3ea37c 971 if (bt) {
94b5eb28 972 __be64 rpdu = cpu_to_be64(depth);
49cac01e 973 u32 what;
5f3ea37c 974
49cac01e
JA
975 if (explicit)
976 what = BLK_TA_UNPLUG_IO;
977 else
978 what = BLK_TA_UNPLUG_TIMER;
979
67c0496e 980 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
5f3ea37c 981 }
c780e86d 982 rcu_read_unlock();
5f3ea37c
ACM
983}
984
38516ab5
SR
985static void blk_add_trace_split(void *ignore,
986 struct request_queue *q, struct bio *bio,
5f3ea37c
ACM
987 unsigned int pdu)
988{
c780e86d 989 struct blk_trace *bt;
5f3ea37c 990
c780e86d
JK
991 rcu_read_lock();
992 bt = rcu_dereference(q->blk_trace);
5f3ea37c
ACM
993 if (bt) {
994 __be64 rpdu = cpu_to_be64(pdu);
995
4f024f37 996 __blk_add_trace(bt, bio->bi_iter.bi_sector,
1eff9d32 997 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
4e4cbee9 998 BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
ca1136c9 999 &rpdu, blk_trace_bio_get_cgid(q, bio));
5f3ea37c 1000 }
c780e86d 1001 rcu_read_unlock();
5f3ea37c
ACM
1002}
1003
1004/**
d07335e5 1005 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
546cf44a 1006 * @ignore: trace callback data parameter (not used)
5f3ea37c
ACM
1007 * @q: queue the io is for
1008 * @bio: the source bio
1009 * @dev: target device
a42aaa3b 1010 * @from: source sector
5f3ea37c
ACM
1011 *
1012 * Description:
1013 * Device mapper or raid target sometimes need to split a bio because
1014 * it spans a stripe (or similar). Add a trace for that action.
1015 *
1016 **/
d07335e5
MS
1017static void blk_add_trace_bio_remap(void *ignore,
1018 struct request_queue *q, struct bio *bio,
1019 dev_t dev, sector_t from)
5f3ea37c 1020{
c780e86d 1021 struct blk_trace *bt;
5f3ea37c
ACM
1022 struct blk_io_trace_remap r;
1023
c780e86d
JK
1024 rcu_read_lock();
1025 bt = rcu_dereference(q->blk_trace);
1026 if (likely(!bt)) {
1027 rcu_read_unlock();
5f3ea37c 1028 return;
c780e86d 1029 }
5f3ea37c 1030
a42aaa3b 1031 r.device_from = cpu_to_be32(dev);
74d46992 1032 r.device_to = cpu_to_be32(bio_dev(bio));
a42aaa3b 1033 r.sector_from = cpu_to_be64(from);
5f3ea37c 1034
4f024f37 1035 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
4e4cbee9 1036 bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
ca1136c9 1037 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
c780e86d 1038 rcu_read_unlock();
5f3ea37c
ACM
1039}
1040
b0da3f0d
JN
1041/**
1042 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
546cf44a 1043 * @ignore: trace callback data parameter (not used)
b0da3f0d
JN
1044 * @q: queue the io is for
1045 * @rq: the source request
1046 * @dev: target device
1047 * @from: source sector
1048 *
1049 * Description:
1050 * Device mapper remaps request to other devices.
1051 * Add a trace for that action.
1052 *
1053 **/
38516ab5
SR
1054static void blk_add_trace_rq_remap(void *ignore,
1055 struct request_queue *q,
b0da3f0d
JN
1056 struct request *rq, dev_t dev,
1057 sector_t from)
1058{
c780e86d 1059 struct blk_trace *bt;
b0da3f0d
JN
1060 struct blk_io_trace_remap r;
1061
c780e86d
JK
1062 rcu_read_lock();
1063 bt = rcu_dereference(q->blk_trace);
1064 if (likely(!bt)) {
1065 rcu_read_unlock();
b0da3f0d 1066 return;
c780e86d 1067 }
b0da3f0d
JN
1068
1069 r.device_from = cpu_to_be32(dev);
1070 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
1071 r.sector_from = cpu_to_be64(from);
1072
1073 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
caf7df12 1074 rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
ca1136c9 1075 sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
c780e86d 1076 rcu_read_unlock();
b0da3f0d
JN
1077}
1078
5f3ea37c
ACM
1079/**
1080 * blk_add_driver_data - Add binary message with driver-specific data
1081 * @q: queue the io is for
1082 * @rq: io request
1083 * @data: driver-specific data
1084 * @len: length of driver-specific data
1085 *
1086 * Description:
1087 * Some drivers might want to write driver-specific data per request.
1088 *
1089 **/
1090void blk_add_driver_data(struct request_queue *q,
1091 struct request *rq,
1092 void *data, size_t len)
1093{
c780e86d 1094 struct blk_trace *bt;
5f3ea37c 1095
c780e86d
JK
1096 rcu_read_lock();
1097 bt = rcu_dereference(q->blk_trace);
1098 if (likely(!bt)) {
1099 rcu_read_unlock();
5f3ea37c 1100 return;
c780e86d 1101 }
5f3ea37c 1102
48b77ad6 1103 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
ca1136c9
SL
1104 BLK_TA_DRV_DATA, 0, len, data,
1105 blk_trace_request_get_cgid(q, rq));
c780e86d 1106 rcu_read_unlock();
5f3ea37c
ACM
1107}
1108EXPORT_SYMBOL_GPL(blk_add_driver_data);
1109
3c289ba7 1110static void blk_register_tracepoints(void)
5f3ea37c
ACM
1111{
1112 int ret;
1113
38516ab5 1114 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
5f3ea37c 1115 WARN_ON(ret);
38516ab5 1116 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
5f3ea37c 1117 WARN_ON(ret);
38516ab5 1118 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
5f3ea37c 1119 WARN_ON(ret);
38516ab5 1120 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
5f3ea37c 1121 WARN_ON(ret);
38516ab5 1122 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
5f3ea37c 1123 WARN_ON(ret);
38516ab5 1124 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
5f3ea37c 1125 WARN_ON(ret);
38516ab5 1126 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
5f3ea37c 1127 WARN_ON(ret);
38516ab5 1128 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
5f3ea37c 1129 WARN_ON(ret);
38516ab5 1130 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
5f3ea37c 1131 WARN_ON(ret);
38516ab5 1132 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
5f3ea37c 1133 WARN_ON(ret);
38516ab5 1134 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
5f3ea37c 1135 WARN_ON(ret);
38516ab5 1136 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
5f3ea37c 1137 WARN_ON(ret);
49cac01e 1138 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
5f3ea37c 1139 WARN_ON(ret);
38516ab5 1140 ret = register_trace_block_split(blk_add_trace_split, NULL);
5f3ea37c 1141 WARN_ON(ret);
d07335e5 1142 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
5f3ea37c 1143 WARN_ON(ret);
38516ab5 1144 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
b0da3f0d 1145 WARN_ON(ret);
5f3ea37c
ACM
1146}
1147
1148static void blk_unregister_tracepoints(void)
1149{
38516ab5 1150 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
d07335e5 1151 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
38516ab5 1152 unregister_trace_block_split(blk_add_trace_split, NULL);
49cac01e 1153 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
38516ab5
SR
1154 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1155 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1156 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1157 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1158 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1159 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1160 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1161 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1162 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1163 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1164 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1165 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
5f3ea37c
ACM
1166
1167 tracepoint_synchronize_unregister();
1168}
c71a8961
ACM
1169
1170/*
1171 * struct blk_io_tracer formatting routines
1172 */
1173
1174static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1175{
157f9c00 1176 int i = 0;
65796348 1177 int tc = t->action >> BLK_TC_SHIFT;
157f9c00 1178
ca1136c9 1179 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
18cea459
LZ
1180 rwbs[i++] = 'N';
1181 goto out;
1182 }
1183
c09c47ca
NK
1184 if (tc & BLK_TC_FLUSH)
1185 rwbs[i++] = 'F';
1186
65796348 1187 if (tc & BLK_TC_DISCARD)
157f9c00 1188 rwbs[i++] = 'D';
65796348 1189 else if (tc & BLK_TC_WRITE)
157f9c00
ACM
1190 rwbs[i++] = 'W';
1191 else if (t->bytes)
1192 rwbs[i++] = 'R';
1193 else
1194 rwbs[i++] = 'N';
1195
c09c47ca
NK
1196 if (tc & BLK_TC_FUA)
1197 rwbs[i++] = 'F';
65796348 1198 if (tc & BLK_TC_AHEAD)
157f9c00 1199 rwbs[i++] = 'A';
65796348 1200 if (tc & BLK_TC_SYNC)
157f9c00 1201 rwbs[i++] = 'S';
65796348 1202 if (tc & BLK_TC_META)
157f9c00 1203 rwbs[i++] = 'M';
18cea459 1204out:
157f9c00 1205 rwbs[i] = '\0';
c71a8961
ACM
1206}
1207
1208static inline
1209const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1210{
1211 return (const struct blk_io_trace *)ent;
1212}
1213
ca1136c9
SL
1214static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1215{
67c0496e 1216 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
ca1136c9
SL
1217}
1218
67c0496e 1219static inline u64 t_cgid(const struct trace_entry *ent)
c71a8961 1220{
67c0496e 1221 return *(u64 *)(te_blk_io_trace(ent) + 1);
ca1136c9
SL
1222}
1223
1224static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1225{
67c0496e 1226 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
c71a8961
ACM
1227}
1228
66de7792
LZ
1229static inline u32 t_action(const struct trace_entry *ent)
1230{
1231 return te_blk_io_trace(ent)->action;
1232}
1233
1234static inline u32 t_bytes(const struct trace_entry *ent)
1235{
1236 return te_blk_io_trace(ent)->bytes;
1237}
1238
c71a8961
ACM
1239static inline u32 t_sec(const struct trace_entry *ent)
1240{
1241 return te_blk_io_trace(ent)->bytes >> 9;
1242}
1243
1244static inline unsigned long long t_sector(const struct trace_entry *ent)
1245{
1246 return te_blk_io_trace(ent)->sector;
1247}
1248
1249static inline __u16 t_error(const struct trace_entry *ent)
1250{
e0dc81be 1251 return te_blk_io_trace(ent)->error;
c71a8961
ACM
1252}
1253
ca1136c9 1254static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
c71a8961 1255{
ca1136c9 1256 const __u64 *val = pdu_start(ent, has_cg);
c71a8961
ACM
1257 return be64_to_cpu(*val);
1258}
1259
1260static void get_pdu_remap(const struct trace_entry *ent,
ca1136c9 1261 struct blk_io_trace_remap *r, bool has_cg)
c71a8961 1262{
ca1136c9 1263 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
a42aaa3b 1264 __u64 sector_from = __r->sector_from;
c71a8961 1265
c71a8961 1266 r->device_from = be32_to_cpu(__r->device_from);
a42aaa3b
AB
1267 r->device_to = be32_to_cpu(__r->device_to);
1268 r->sector_from = be64_to_cpu(sector_from);
c71a8961
ACM
1269}
1270
ca1136c9
SL
1271typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1272 bool has_cg);
b6a4b0c3 1273
ca1136c9
SL
1274static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1275 bool has_cg)
c71a8961 1276{
c09c47ca 1277 char rwbs[RWBS_LEN];
35ac51bf
LZ
1278 unsigned long long ts = iter->ts;
1279 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
c71a8961 1280 unsigned secs = (unsigned long)ts;
b6a4b0c3 1281 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
c71a8961
ACM
1282
1283 fill_rwbs(rwbs, t);
1284
f4a1d08c
SRRH
1285 trace_seq_printf(&iter->seq,
1286 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1287 MAJOR(t->device), MINOR(t->device), iter->cpu,
1288 secs, nsec_rem, iter->ent->pid, act, rwbs);
c71a8961
ACM
1289}
1290
ca1136c9
SL
1291static void blk_log_action(struct trace_iterator *iter, const char *act,
1292 bool has_cg)
c71a8961 1293{
c09c47ca 1294 char rwbs[RWBS_LEN];
b6a4b0c3
LZ
1295 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1296
c71a8961 1297 fill_rwbs(rwbs, t);
ca1136c9 1298 if (has_cg) {
67c0496e 1299 u64 id = t_cgid(iter->ent);
ca1136c9 1300
69fd5c39
SL
1301 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1302 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1303
1304 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1305 sizeof(blkcg_name_buf));
1306 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1307 MAJOR(t->device), MINOR(t->device),
1308 blkcg_name_buf, act, rwbs);
40430452
TH
1309 } else {
1310 /*
1311 * The cgid portion used to be "INO,GEN". Userland
1312 * builds a FILEID_INO32_GEN fid out of them and
1313 * opens the cgroup using open_by_handle_at(2).
1314 * While 32bit ino setups are still the same, 64bit
1315 * ones now use the 64bit ino as the whole ID and
1316 * no longer use generation.
1317 *
1318 * Regarldess of the content, always output
1319 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1320 * be mapped back to @id on both 64 and 32bit ino
1321 * setups. See __kernfs_fh_to_dentry().
1322 */
69fd5c39 1323 trace_seq_printf(&iter->seq,
40430452 1324 "%3d,%-3d %llx,%-llx %2s %3s ",
ca1136c9 1325 MAJOR(t->device), MINOR(t->device),
40430452
TH
1326 id & U32_MAX, id >> 32, act, rwbs);
1327 }
ca1136c9
SL
1328 } else
1329 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1330 MAJOR(t->device), MINOR(t->device), act, rwbs);
c71a8961
ACM
1331}
1332
ca1136c9
SL
1333static void blk_log_dump_pdu(struct trace_seq *s,
1334 const struct trace_entry *ent, bool has_cg)
66de7792 1335{
04986257 1336 const unsigned char *pdu_buf;
66de7792 1337 int pdu_len;
f4a1d08c 1338 int i, end;
66de7792 1339
ca1136c9
SL
1340 pdu_buf = pdu_start(ent, has_cg);
1341 pdu_len = pdu_real_len(ent, has_cg);
66de7792
LZ
1342
1343 if (!pdu_len)
f4a1d08c 1344 return;
66de7792
LZ
1345
1346 /* find the last zero that needs to be printed */
1347 for (end = pdu_len - 1; end >= 0; end--)
1348 if (pdu_buf[end])
1349 break;
1350 end++;
1351
f4a1d08c 1352 trace_seq_putc(s, '(');
66de7792
LZ
1353
1354 for (i = 0; i < pdu_len; i++) {
1355
f4a1d08c
SRRH
1356 trace_seq_printf(s, "%s%02x",
1357 i == 0 ? "" : " ", pdu_buf[i]);
66de7792
LZ
1358
1359 /*
1360 * stop when the rest is just zeroes and indicate so
1361 * with a ".." appended
1362 */
f4a1d08c
SRRH
1363 if (i == end && end != pdu_len - 1) {
1364 trace_seq_puts(s, " ..) ");
1365 return;
1366 }
66de7792
LZ
1367 }
1368
f4a1d08c 1369 trace_seq_puts(s, ") ");
66de7792
LZ
1370}
1371
ca1136c9 1372static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1373{
4ca53085
SR
1374 char cmd[TASK_COMM_LEN];
1375
1376 trace_find_cmdline(ent->pid, cmd);
c71a8961 1377
66de7792 1378 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
f4a1d08c 1379 trace_seq_printf(s, "%u ", t_bytes(ent));
ca1136c9 1380 blk_log_dump_pdu(s, ent, has_cg);
f4a1d08c 1381 trace_seq_printf(s, "[%s]\n", cmd);
66de7792
LZ
1382 } else {
1383 if (t_sec(ent))
f4a1d08c 1384 trace_seq_printf(s, "%llu + %u [%s]\n",
66de7792 1385 t_sector(ent), t_sec(ent), cmd);
f4a1d08c
SRRH
1386 else
1387 trace_seq_printf(s, "[%s]\n", cmd);
66de7792 1388 }
c71a8961
ACM
1389}
1390
f4a1d08c 1391static void blk_log_with_error(struct trace_seq *s,
ca1136c9 1392 const struct trace_entry *ent, bool has_cg)
c71a8961 1393{
66de7792 1394 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
ca1136c9 1395 blk_log_dump_pdu(s, ent, has_cg);
f4a1d08c 1396 trace_seq_printf(s, "[%d]\n", t_error(ent));
66de7792
LZ
1397 } else {
1398 if (t_sec(ent))
f4a1d08c
SRRH
1399 trace_seq_printf(s, "%llu + %u [%d]\n",
1400 t_sector(ent),
1401 t_sec(ent), t_error(ent));
1402 else
1403 trace_seq_printf(s, "%llu [%d]\n",
1404 t_sector(ent), t_error(ent));
66de7792 1405 }
c71a8961
ACM
1406}
1407
ca1136c9 1408static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1409{
a42aaa3b 1410 struct blk_io_trace_remap r = { .device_from = 0, };
c71a8961 1411
ca1136c9 1412 get_pdu_remap(ent, &r, has_cg);
f4a1d08c
SRRH
1413 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1414 t_sector(ent), t_sec(ent),
1415 MAJOR(r.device_from), MINOR(r.device_from),
1416 (unsigned long long)r.sector_from);
c71a8961
ACM
1417}
1418
ca1136c9 1419static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1420{
4ca53085
SR
1421 char cmd[TASK_COMM_LEN];
1422
1423 trace_find_cmdline(ent->pid, cmd);
1424
f4a1d08c 1425 trace_seq_printf(s, "[%s]\n", cmd);
c71a8961
ACM
1426}
1427
ca1136c9 1428static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1429{
4ca53085
SR
1430 char cmd[TASK_COMM_LEN];
1431
1432 trace_find_cmdline(ent->pid, cmd);
1433
ca1136c9 1434 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
c71a8961
ACM
1435}
1436
ca1136c9 1437static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
c71a8961 1438{
4ca53085
SR
1439 char cmd[TASK_COMM_LEN];
1440
1441 trace_find_cmdline(ent->pid, cmd);
1442
f4a1d08c 1443 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
ca1136c9 1444 get_pdu_int(ent, has_cg), cmd);
c71a8961
ACM
1445}
1446
ca1136c9
SL
1447static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1448 bool has_cg)
18cea459 1449{
18cea459 1450
ca1136c9
SL
1451 trace_seq_putmem(s, pdu_start(ent, has_cg),
1452 pdu_real_len(ent, has_cg));
f4a1d08c 1453 trace_seq_putc(s, '\n');
18cea459
LZ
1454}
1455
c71a8961
ACM
1456/*
1457 * struct tracer operations
1458 */
1459
1460static void blk_tracer_print_header(struct seq_file *m)
1461{
1462 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1463 return;
1464 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1465 "# | | | | | |\n");
1466}
1467
1468static void blk_tracer_start(struct trace_array *tr)
1469{
ad5dd549 1470 blk_tracer_enabled = true;
c71a8961
ACM
1471}
1472
1473static int blk_tracer_init(struct trace_array *tr)
1474{
1475 blk_tr = tr;
1476 blk_tracer_start(tr);
c71a8961
ACM
1477 return 0;
1478}
1479
1480static void blk_tracer_stop(struct trace_array *tr)
1481{
ad5dd549 1482 blk_tracer_enabled = false;
c71a8961
ACM
1483}
1484
1485static void blk_tracer_reset(struct trace_array *tr)
1486{
c71a8961
ACM
1487 blk_tracer_stop(tr);
1488}
1489
e4955c99 1490static const struct {
c71a8961 1491 const char *act[2];
ca1136c9
SL
1492 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1493 bool has_cg);
e4955c99 1494} what2act[] = {
ef18012b 1495 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
c71a8961
ACM
1496 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1497 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1498 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1499 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1500 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1501 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1502 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1503 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1504 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
49cac01e 1505 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
c71a8961
ACM
1506 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1507 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1508 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1509 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1510};
1511
b6a4b0c3
LZ
1512static enum print_line_t print_one_line(struct trace_iterator *iter,
1513 bool classic)
c71a8961 1514{
983f938a 1515 struct trace_array *tr = iter->tr;
2c9b238e 1516 struct trace_seq *s = &iter->seq;
b6a4b0c3
LZ
1517 const struct blk_io_trace *t;
1518 u16 what;
b6a4b0c3
LZ
1519 bool long_act;
1520 blk_log_action_t *log_action;
ca1136c9 1521 bool has_cg;
c71a8961 1522
b6a4b0c3 1523 t = te_blk_io_trace(iter->ent);
ca1136c9 1524 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
983f938a 1525 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
b6a4b0c3 1526 log_action = classic ? &blk_log_action_classic : &blk_log_action;
ca1136c9 1527 has_cg = t->action & __BLK_TA_CGROUP;
08a06b83 1528
ca1136c9
SL
1529 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1530 log_action(iter, long_act ? "message" : "m", has_cg);
1531 blk_log_msg(s, iter->ent, has_cg);
b7d7641e 1532 return trace_handle_return(s);
18cea459
LZ
1533 }
1534
eb08f8eb 1535 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
f4a1d08c 1536 trace_seq_printf(s, "Unknown action %x\n", what);
c71a8961 1537 else {
ca1136c9
SL
1538 log_action(iter, what2act[what].act[long_act], has_cg);
1539 what2act[what].print(s, iter->ent, has_cg);
c71a8961 1540 }
f4a1d08c
SRRH
1541
1542 return trace_handle_return(s);
c71a8961
ACM
1543}
1544
b6a4b0c3 1545static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
a9a57763 1546 int flags, struct trace_event *event)
b6a4b0c3 1547{
b6a4b0c3
LZ
1548 return print_one_line(iter, false);
1549}
1550
f4a1d08c 1551static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
08a06b83
ACM
1552{
1553 struct trace_seq *s = &iter->seq;
1554 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1555 const int offset = offsetof(struct blk_io_trace, sector);
1556 struct blk_io_trace old = {
1557 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
6c051ce0 1558 .time = iter->ts,
08a06b83
ACM
1559 };
1560
f4a1d08c
SRRH
1561 trace_seq_putmem(s, &old, offset);
1562 trace_seq_putmem(s, &t->sector,
1563 sizeof(old) - offset + t->pdu_len);
08a06b83
ACM
1564}
1565
ae7462b4 1566static enum print_line_t
a9a57763
SR
1567blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1568 struct trace_event *event)
08a06b83 1569{
f4a1d08c
SRRH
1570 blk_trace_synthesize_old_trace(iter);
1571
1572 return trace_handle_return(&iter->seq);
08a06b83
ACM
1573}
1574
c71a8961
ACM
1575static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1576{
c71a8961
ACM
1577 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1578 return TRACE_TYPE_UNHANDLED;
1579
b6a4b0c3 1580 return print_one_line(iter, true);
c71a8961
ACM
1581}
1582
8c1a49ae
SRRH
1583static int
1584blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
f3948f88
LZ
1585{
1586 /* don't output context-info for blk_classic output */
1587 if (bit == TRACE_BLK_OPT_CLASSIC) {
1588 if (set)
983f938a 1589 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
f3948f88 1590 else
983f938a 1591 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
f3948f88
LZ
1592 }
1593 return 0;
1594}
1595
c71a8961
ACM
1596static struct tracer blk_tracer __read_mostly = {
1597 .name = "blk",
1598 .init = blk_tracer_init,
1599 .reset = blk_tracer_reset,
1600 .start = blk_tracer_start,
1601 .stop = blk_tracer_stop,
1602 .print_header = blk_tracer_print_header,
1603 .print_line = blk_tracer_print_line,
1604 .flags = &blk_tracer_flags,
f3948f88 1605 .set_flag = blk_tracer_set_flag,
c71a8961
ACM
1606};
1607
a9a57763 1608static struct trace_event_functions trace_blk_event_funcs = {
c71a8961 1609 .trace = blk_trace_event_print,
08a06b83 1610 .binary = blk_trace_event_print_binary,
c71a8961
ACM
1611};
1612
a9a57763
SR
1613static struct trace_event trace_blk_event = {
1614 .type = TRACE_BLK,
1615 .funcs = &trace_blk_event_funcs,
1616};
1617
c71a8961
ACM
1618static int __init init_blk_tracer(void)
1619{
9023c930 1620 if (!register_trace_event(&trace_blk_event)) {
a395d6a7 1621 pr_warn("Warning: could not register block events\n");
c71a8961
ACM
1622 return 1;
1623 }
1624
1625 if (register_tracer(&blk_tracer) != 0) {
a395d6a7 1626 pr_warn("Warning: could not register the block tracer\n");
9023c930 1627 unregister_trace_event(&trace_blk_event);
c71a8961
ACM
1628 return 1;
1629 }
1630
1631 return 0;
1632}
1633
1634device_initcall(init_blk_tracer);
1635
1636static int blk_trace_remove_queue(struct request_queue *q)
1637{
1638 struct blk_trace *bt;
1639
1640 bt = xchg(&q->blk_trace, NULL);
1641 if (bt == NULL)
1642 return -EINVAL;
1643
a6da0024 1644 put_probe_ref();
c780e86d 1645 synchronize_rcu();
ad5dd549 1646 blk_trace_free(bt);
c71a8961
ACM
1647 return 0;
1648}
1649
1650/*
1651 * Setup everything required to start tracing
1652 */
9908c309
LZ
1653static int blk_trace_setup_queue(struct request_queue *q,
1654 struct block_device *bdev)
c71a8961 1655{
cdea01b2 1656 struct blk_trace *bt = NULL;
18cea459 1657 int ret = -ENOMEM;
c71a8961 1658
c71a8961
ACM
1659 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1660 if (!bt)
15152e44 1661 return -ENOMEM;
c71a8961 1662
18cea459
LZ
1663 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1664 if (!bt->msg_data)
1665 goto free_bt;
1666
9908c309 1667 bt->dev = bdev->bd_dev;
c71a8961 1668 bt->act_mask = (u16)-1;
9908c309
LZ
1669
1670 blk_trace_setup_lba(bt, bdev);
c71a8961 1671
cdea01b2
DB
1672 ret = -EBUSY;
1673 if (cmpxchg(&q->blk_trace, NULL, bt))
18cea459 1674 goto free_bt;
15152e44 1675
a6da0024 1676 get_probe_ref();
c71a8961 1677 return 0;
18cea459
LZ
1678
1679free_bt:
1680 blk_trace_free(bt);
1681 return ret;
c71a8961
ACM
1682}
1683
1684/*
1685 * sysfs interface to enable and configure tracing
1686 */
1687
c71a8961
ACM
1688static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1689 struct device_attribute *attr,
1690 char *buf);
1691static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1692 struct device_attribute *attr,
1693 const char *buf, size_t count);
1694#define BLK_TRACE_DEVICE_ATTR(_name) \
1695 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1696 sysfs_blk_trace_attr_show, \
1697 sysfs_blk_trace_attr_store)
1698
cd649b8b 1699static BLK_TRACE_DEVICE_ATTR(enable);
c71a8961
ACM
1700static BLK_TRACE_DEVICE_ATTR(act_mask);
1701static BLK_TRACE_DEVICE_ATTR(pid);
1702static BLK_TRACE_DEVICE_ATTR(start_lba);
1703static BLK_TRACE_DEVICE_ATTR(end_lba);
1704
1705static struct attribute *blk_trace_attrs[] = {
1706 &dev_attr_enable.attr,
1707 &dev_attr_act_mask.attr,
1708 &dev_attr_pid.attr,
1709 &dev_attr_start_lba.attr,
1710 &dev_attr_end_lba.attr,
1711 NULL
1712};
1713
1714struct attribute_group blk_trace_attr_group = {
1715 .name = "trace",
1716 .attrs = blk_trace_attrs,
1717};
1718
09341997
LZ
1719static const struct {
1720 int mask;
1721 const char *str;
1722} mask_maps[] = {
1723 { BLK_TC_READ, "read" },
1724 { BLK_TC_WRITE, "write" },
c09c47ca 1725 { BLK_TC_FLUSH, "flush" },
09341997
LZ
1726 { BLK_TC_SYNC, "sync" },
1727 { BLK_TC_QUEUE, "queue" },
1728 { BLK_TC_REQUEUE, "requeue" },
1729 { BLK_TC_ISSUE, "issue" },
1730 { BLK_TC_COMPLETE, "complete" },
1731 { BLK_TC_FS, "fs" },
1732 { BLK_TC_PC, "pc" },
8d1547e0 1733 { BLK_TC_NOTIFY, "notify" },
09341997
LZ
1734 { BLK_TC_AHEAD, "ahead" },
1735 { BLK_TC_META, "meta" },
1736 { BLK_TC_DISCARD, "discard" },
1737 { BLK_TC_DRV_DATA, "drv_data" },
c09c47ca 1738 { BLK_TC_FUA, "fua" },
09341997
LZ
1739};
1740
1741static int blk_trace_str2mask(const char *str)
c71a8961 1742{
09341997 1743 int i;
c71a8961 1744 int mask = 0;
9eb85125 1745 char *buf, *s, *token;
c71a8961 1746
9eb85125
LZ
1747 buf = kstrdup(str, GFP_KERNEL);
1748 if (buf == NULL)
c71a8961 1749 return -ENOMEM;
9eb85125 1750 s = strstrip(buf);
c71a8961
ACM
1751
1752 while (1) {
09341997
LZ
1753 token = strsep(&s, ",");
1754 if (token == NULL)
c71a8961
ACM
1755 break;
1756
09341997
LZ
1757 if (*token == '\0')
1758 continue;
1759
1760 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1761 if (strcasecmp(token, mask_maps[i].str) == 0) {
1762 mask |= mask_maps[i].mask;
1763 break;
1764 }
1765 }
1766 if (i == ARRAY_SIZE(mask_maps)) {
1767 mask = -EINVAL;
1768 break;
1769 }
c71a8961 1770 }
9eb85125 1771 kfree(buf);
c71a8961
ACM
1772
1773 return mask;
1774}
1775
09341997
LZ
1776static ssize_t blk_trace_mask2str(char *buf, int mask)
1777{
1778 int i;
1779 char *p = buf;
1780
1781 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1782 if (mask & mask_maps[i].mask) {
1783 p += sprintf(p, "%s%s",
1784 (p == buf) ? "" : ",", mask_maps[i].str);
1785 }
1786 }
1787 *p++ = '\n';
1788
1789 return p - buf;
1790}
1791
b125130b
LZ
1792static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1793{
1794 if (bdev->bd_disk == NULL)
1795 return NULL;
1796
1797 return bdev_get_queue(bdev);
1798}
1799
c71a8961
ACM
1800static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1801 struct device_attribute *attr,
1802 char *buf)
1803{
1804 struct hd_struct *p = dev_to_part(dev);
1805 struct request_queue *q;
1806 struct block_device *bdev;
c780e86d 1807 struct blk_trace *bt;
c71a8961
ACM
1808 ssize_t ret = -ENXIO;
1809
c71a8961
ACM
1810 bdev = bdget(part_devt(p));
1811 if (bdev == NULL)
01b284f9 1812 goto out;
c71a8961 1813
b125130b 1814 q = blk_trace_get_queue(bdev);
c71a8961
ACM
1815 if (q == NULL)
1816 goto out_bdput;
b125130b 1817
5acb3cc2 1818 mutex_lock(&q->blk_trace_mutex);
cd649b8b 1819
c780e86d
JK
1820 bt = rcu_dereference_protected(q->blk_trace,
1821 lockdep_is_held(&q->blk_trace_mutex));
cd649b8b 1822 if (attr == &dev_attr_enable) {
c780e86d 1823 ret = sprintf(buf, "%u\n", !!bt);
cd649b8b
LZ
1824 goto out_unlock_bdev;
1825 }
1826
c780e86d 1827 if (bt == NULL)
c71a8961
ACM
1828 ret = sprintf(buf, "disabled\n");
1829 else if (attr == &dev_attr_act_mask)
c780e86d 1830 ret = blk_trace_mask2str(buf, bt->act_mask);
c71a8961 1831 else if (attr == &dev_attr_pid)
c780e86d 1832 ret = sprintf(buf, "%u\n", bt->pid);
c71a8961 1833 else if (attr == &dev_attr_start_lba)
c780e86d 1834 ret = sprintf(buf, "%llu\n", bt->start_lba);
c71a8961 1835 else if (attr == &dev_attr_end_lba)
c780e86d 1836 ret = sprintf(buf, "%llu\n", bt->end_lba);
cd649b8b
LZ
1837
1838out_unlock_bdev:
5acb3cc2 1839 mutex_unlock(&q->blk_trace_mutex);
c71a8961
ACM
1840out_bdput:
1841 bdput(bdev);
01b284f9 1842out:
c71a8961
ACM
1843 return ret;
1844}
1845
1846static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1847 struct device_attribute *attr,
1848 const char *buf, size_t count)
1849{
1850 struct block_device *bdev;
1851 struct request_queue *q;
1852 struct hd_struct *p;
c780e86d 1853 struct blk_trace *bt;
c71a8961 1854 u64 value;
09341997 1855 ssize_t ret = -EINVAL;
c71a8961
ACM
1856
1857 if (count == 0)
1858 goto out;
1859
1860 if (attr == &dev_attr_act_mask) {
5f339453 1861 if (kstrtoull(buf, 0, &value)) {
c71a8961 1862 /* Assume it is a list of trace category names */
09341997
LZ
1863 ret = blk_trace_str2mask(buf);
1864 if (ret < 0)
c71a8961 1865 goto out;
09341997 1866 value = ret;
c71a8961 1867 }
5f339453 1868 } else if (kstrtoull(buf, 0, &value))
c71a8961
ACM
1869 goto out;
1870
09341997
LZ
1871 ret = -ENXIO;
1872
c71a8961
ACM
1873 p = dev_to_part(dev);
1874 bdev = bdget(part_devt(p));
1875 if (bdev == NULL)
01b284f9 1876 goto out;
c71a8961 1877
b125130b 1878 q = blk_trace_get_queue(bdev);
c71a8961
ACM
1879 if (q == NULL)
1880 goto out_bdput;
1881
5acb3cc2 1882 mutex_lock(&q->blk_trace_mutex);
cd649b8b 1883
c780e86d
JK
1884 bt = rcu_dereference_protected(q->blk_trace,
1885 lockdep_is_held(&q->blk_trace_mutex));
cd649b8b 1886 if (attr == &dev_attr_enable) {
c780e86d 1887 if (!!value == !!bt) {
757d9140
SRV
1888 ret = 0;
1889 goto out_unlock_bdev;
1890 }
cd649b8b 1891 if (value)
9908c309 1892 ret = blk_trace_setup_queue(q, bdev);
cd649b8b
LZ
1893 else
1894 ret = blk_trace_remove_queue(q);
1895 goto out_unlock_bdev;
1896 }
1897
c71a8961 1898 ret = 0;
c780e86d 1899 if (bt == NULL)
9908c309 1900 ret = blk_trace_setup_queue(q, bdev);
c71a8961
ACM
1901
1902 if (ret == 0) {
1903 if (attr == &dev_attr_act_mask)
c780e86d 1904 bt->act_mask = value;
c71a8961 1905 else if (attr == &dev_attr_pid)
c780e86d 1906 bt->pid = value;
c71a8961 1907 else if (attr == &dev_attr_start_lba)
c780e86d 1908 bt->start_lba = value;
c71a8961 1909 else if (attr == &dev_attr_end_lba)
c780e86d 1910 bt->end_lba = value;
c71a8961 1911 }
cd649b8b
LZ
1912
1913out_unlock_bdev:
5acb3cc2 1914 mutex_unlock(&q->blk_trace_mutex);
c71a8961
ACM
1915out_bdput:
1916 bdput(bdev);
c71a8961 1917out:
cd649b8b 1918 return ret ? ret : count;
c71a8961 1919}
cd649b8b 1920
1d54ad6d
LZ
1921int blk_trace_init_sysfs(struct device *dev)
1922{
1923 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1924}
1925
48c0d4d4
ZK
1926void blk_trace_remove_sysfs(struct device *dev)
1927{
1928 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1929}
1930
55782138
LZ
1931#endif /* CONFIG_BLK_DEV_IO_TRACE */
1932
1933#ifdef CONFIG_EVENT_TRACING
1934
ef295ecf 1935void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
55782138
LZ
1936{
1937 int i = 0;
1938
ef295ecf 1939 if (op & REQ_PREFLUSH)
c09c47ca
NK
1940 rwbs[i++] = 'F';
1941
ef295ecf 1942 switch (op & REQ_OP_MASK) {
1b9a9ab7
MC
1943 case REQ_OP_WRITE:
1944 case REQ_OP_WRITE_SAME:
55782138 1945 rwbs[i++] = 'W';
1b9a9ab7
MC
1946 break;
1947 case REQ_OP_DISCARD:
55782138 1948 rwbs[i++] = 'D';
1b9a9ab7 1949 break;
288dab8a
CH
1950 case REQ_OP_SECURE_ERASE:
1951 rwbs[i++] = 'D';
1952 rwbs[i++] = 'E';
1953 break;
3a5e02ce
MC
1954 case REQ_OP_FLUSH:
1955 rwbs[i++] = 'F';
1956 break;
1b9a9ab7 1957 case REQ_OP_READ:
55782138 1958 rwbs[i++] = 'R';
1b9a9ab7
MC
1959 break;
1960 default:
55782138 1961 rwbs[i++] = 'N';
1b9a9ab7 1962 }
55782138 1963
ef295ecf 1964 if (op & REQ_FUA)
c09c47ca 1965 rwbs[i++] = 'F';
ef295ecf 1966 if (op & REQ_RAHEAD)
55782138 1967 rwbs[i++] = 'A';
ef295ecf 1968 if (op & REQ_SYNC)
55782138 1969 rwbs[i++] = 'S';
ef295ecf 1970 if (op & REQ_META)
55782138
LZ
1971 rwbs[i++] = 'M';
1972
1973 rwbs[i] = '\0';
1974}
9ca8f8e5 1975EXPORT_SYMBOL_GPL(blk_fill_rwbs);
55782138 1976
55782138
LZ
1977#endif /* CONFIG_EVENT_TRACING */
1978