1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 *
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/percpu.h>
13 #include <linux/init.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/debugfs.h>
17 #include <linux/export.h>
18 #include <linux/time.h>
19 #include <linux/uaccess.h>
20 #include <linux/list.h>
21 #include <linux/blk-cgroup.h>
22
23 #include "../../block/blk.h"
24
25 #include <trace/events/block.h>
26
27 #include "trace_output.h"
28
29 #ifdef CONFIG_BLK_DEV_IO_TRACE
30
31 static unsigned int blktrace_seq __read_mostly = 1;
32
33 static struct trace_array *blk_tr;
34 static bool blk_tracer_enabled __read_mostly;
35
36 static LIST_HEAD(running_trace_list);
37 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
38
39 /* Select an alternative, minimalistic output than the original one */
40 #define TRACE_BLK_OPT_CLASSIC 0x1
41 #define TRACE_BLK_OPT_CGROUP 0x2
42 #define TRACE_BLK_OPT_CGNAME 0x4
43
44 static struct tracer_opt blk_tracer_opts[] = {
45 /* Default disable the minimalistic output */
46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47 #ifdef CONFIG_BLK_CGROUP
48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
50 #endif
51 { }
52 };
53
54 static struct tracer_flags blk_tracer_flags = {
55 .val = 0,
56 .opts = blk_tracer_opts,
57 };
58
59 /* Global reference count of probes */
60 static DEFINE_MUTEX(blk_probe_mutex);
61 static int blk_probes_ref;
62
63 static void blk_register_tracepoints(void);
64 static void blk_unregister_tracepoints(void);
65
66 /*
67 * Send out a notify message.
68 */
trace_note(struct blk_trace * bt,pid_t pid,int action,const void * data,size_t len,u64 cgid)69 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
70 const void *data, size_t len, u64 cgid)
71 {
72 struct blk_io_trace *t;
73 struct ring_buffer_event *event = NULL;
74 struct trace_buffer *buffer = NULL;
75 unsigned int trace_ctx = 0;
76 int cpu = smp_processor_id();
77 bool blk_tracer = blk_tracer_enabled;
78 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
79
80 if (blk_tracer) {
81 buffer = blk_tr->array_buffer.buffer;
82 trace_ctx = tracing_gen_ctx_flags(0);
83 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
84 sizeof(*t) + len + cgid_len,
85 trace_ctx);
86 if (!event)
87 return;
88 t = ring_buffer_event_data(event);
89 goto record_it;
90 }
91
92 if (!bt->rchan)
93 return;
94
95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
96 if (t) {
97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
98 t->time = ktime_to_ns(ktime_get());
99 record_it:
100 t->device = bt->dev;
101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
102 t->pid = pid;
103 t->cpu = cpu;
104 t->pdu_len = len + cgid_len;
105 if (cgid_len)
106 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
107 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
108
109 if (blk_tracer)
110 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
111 }
112 }
113
114 /*
115 * Send out a notify for this process, if we haven't done so since a trace
116 * started
117 */
trace_note_tsk(struct task_struct * tsk)118 static void trace_note_tsk(struct task_struct *tsk)
119 {
120 unsigned long flags;
121 struct blk_trace *bt;
122
123 tsk->btrace_seq = blktrace_seq;
124 raw_spin_lock_irqsave(&running_trace_lock, flags);
125 list_for_each_entry(bt, &running_trace_list, running_list) {
126 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
127 sizeof(tsk->comm), 0);
128 }
129 raw_spin_unlock_irqrestore(&running_trace_lock, flags);
130 }
131
trace_note_time(struct blk_trace * bt)132 static void trace_note_time(struct blk_trace *bt)
133 {
134 struct timespec64 now;
135 unsigned long flags;
136 u32 words[2];
137
138 /* need to check user space to see if this breaks in y2038 or y2106 */
139 ktime_get_real_ts64(&now);
140 words[0] = (u32)now.tv_sec;
141 words[1] = now.tv_nsec;
142
143 local_irq_save(flags);
144 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
145 local_irq_restore(flags);
146 }
147
__blk_trace_note_message(struct blk_trace * bt,struct cgroup_subsys_state * css,const char * fmt,...)148 void __blk_trace_note_message(struct blk_trace *bt,
149 struct cgroup_subsys_state *css, const char *fmt, ...)
150 {
151 int n;
152 va_list args;
153 unsigned long flags;
154 char *buf;
155 u64 cgid = 0;
156
157 if (unlikely(bt->trace_state != Blktrace_running &&
158 !blk_tracer_enabled))
159 return;
160
161 /*
162 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
163 * message to the trace.
164 */
165 if (!(bt->act_mask & BLK_TC_NOTIFY))
166 return;
167
168 local_irq_save(flags);
169 buf = this_cpu_ptr(bt->msg_data);
170 va_start(args, fmt);
171 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
172 va_end(args);
173
174 #ifdef CONFIG_BLK_CGROUP
175 if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
176 cgid = cgroup_id(css->cgroup);
177 else
178 cgid = 1;
179 #endif
180 trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, cgid);
181 local_irq_restore(flags);
182 }
183 EXPORT_SYMBOL_GPL(__blk_trace_note_message);
184
act_log_check(struct blk_trace * bt,u32 what,sector_t sector,pid_t pid)185 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
186 pid_t pid)
187 {
188 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
189 return 1;
190 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
191 return 1;
192 if (bt->pid && pid != bt->pid)
193 return 1;
194
195 return 0;
196 }
197
198 /*
199 * Data direction bit lookup
200 */
201 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
202 BLK_TC_ACT(BLK_TC_WRITE) };
203
204 #define BLK_TC_RAHEAD BLK_TC_AHEAD
205 #define BLK_TC_PREFLUSH BLK_TC_FLUSH
206
207 /* The ilog2() calls fall out because they're constant */
208 #define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) << \
209 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
210
211 /*
212 * The worker for the various blk_add_trace*() types. Fills out a
213 * blk_io_trace structure and places it in a per-cpu subbuffer.
214 */
__blk_add_trace(struct blk_trace * bt,sector_t sector,int bytes,const blk_opf_t opf,u32 what,int error,int pdu_len,void * pdu_data,u64 cgid)215 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
216 const blk_opf_t opf, u32 what, int error,
217 int pdu_len, void *pdu_data, u64 cgid)
218 {
219 struct task_struct *tsk = current;
220 struct ring_buffer_event *event = NULL;
221 struct trace_buffer *buffer = NULL;
222 struct blk_io_trace *t;
223 unsigned long flags = 0;
224 unsigned long *sequence;
225 unsigned int trace_ctx = 0;
226 pid_t pid;
227 int cpu;
228 bool blk_tracer = blk_tracer_enabled;
229 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
230 const enum req_op op = opf & REQ_OP_MASK;
231
232 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
233 return;
234
235 what |= ddir_act[op_is_write(op) ? WRITE : READ];
236 what |= MASK_TC_BIT(opf, SYNC);
237 what |= MASK_TC_BIT(opf, RAHEAD);
238 what |= MASK_TC_BIT(opf, META);
239 what |= MASK_TC_BIT(opf, PREFLUSH);
240 what |= MASK_TC_BIT(opf, FUA);
241 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
242 what |= BLK_TC_ACT(BLK_TC_DISCARD);
243 if (op == REQ_OP_FLUSH)
244 what |= BLK_TC_ACT(BLK_TC_FLUSH);
245 if (cgid)
246 what |= __BLK_TA_CGROUP;
247
248 pid = tsk->pid;
249 if (act_log_check(bt, what, sector, pid))
250 return;
251 cpu = raw_smp_processor_id();
252
253 if (blk_tracer) {
254 tracing_record_cmdline(current);
255
256 buffer = blk_tr->array_buffer.buffer;
257 trace_ctx = tracing_gen_ctx_flags(0);
258 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
259 sizeof(*t) + pdu_len + cgid_len,
260 trace_ctx);
261 if (!event)
262 return;
263 t = ring_buffer_event_data(event);
264 goto record_it;
265 }
266
267 if (unlikely(tsk->btrace_seq != blktrace_seq))
268 trace_note_tsk(tsk);
269
270 /*
271 * A word about the locking here - we disable interrupts to reserve
272 * some space in the relay per-cpu buffer, to prevent an irq
273 * from coming in and stepping on our toes.
274 */
275 local_irq_save(flags);
276 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
277 if (t) {
278 sequence = per_cpu_ptr(bt->sequence, cpu);
279
280 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
281 t->sequence = ++(*sequence);
282 t->time = ktime_to_ns(ktime_get());
283 record_it:
284 /*
285 * These two are not needed in ftrace as they are in the
286 * generic trace_entry, filled by tracing_generic_entry_update,
287 * but for the trace_event->bin() synthesizer benefit we do it
288 * here too.
289 */
290 t->cpu = cpu;
291 t->pid = pid;
292
293 t->sector = sector;
294 t->bytes = bytes;
295 t->action = what;
296 t->device = bt->dev;
297 t->error = error;
298 t->pdu_len = pdu_len + cgid_len;
299
300 if (cgid_len)
301 memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
302 if (pdu_len)
303 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
304
305 if (blk_tracer) {
306 trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx);
307 return;
308 }
309 }
310
311 local_irq_restore(flags);
312 }
313
blk_trace_free(struct request_queue * q,struct blk_trace * bt)314 static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
315 {
316 relay_close(bt->rchan);
317
318 /*
319 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
320 * under 'q->debugfs_dir', thus lookup and remove them.
321 */
322 if (!bt->dir) {
323 debugfs_remove(debugfs_lookup("dropped", q->debugfs_dir));
324 debugfs_remove(debugfs_lookup("msg", q->debugfs_dir));
325 } else {
326 debugfs_remove(bt->dir);
327 }
328 free_percpu(bt->sequence);
329 free_percpu(bt->msg_data);
330 kfree(bt);
331 }
332
get_probe_ref(void)333 static void get_probe_ref(void)
334 {
335 mutex_lock(&blk_probe_mutex);
336 if (++blk_probes_ref == 1)
337 blk_register_tracepoints();
338 mutex_unlock(&blk_probe_mutex);
339 }
340
put_probe_ref(void)341 static void put_probe_ref(void)
342 {
343 mutex_lock(&blk_probe_mutex);
344 if (!--blk_probes_ref)
345 blk_unregister_tracepoints();
346 mutex_unlock(&blk_probe_mutex);
347 }
348
blk_trace_start(struct blk_trace * bt)349 static int blk_trace_start(struct blk_trace *bt)
350 {
351 if (bt->trace_state != Blktrace_setup &&
352 bt->trace_state != Blktrace_stopped)
353 return -EINVAL;
354
355 blktrace_seq++;
356 smp_mb();
357 bt->trace_state = Blktrace_running;
358 raw_spin_lock_irq(&running_trace_lock);
359 list_add(&bt->running_list, &running_trace_list);
360 raw_spin_unlock_irq(&running_trace_lock);
361 trace_note_time(bt);
362
363 return 0;
364 }
365
blk_trace_stop(struct blk_trace * bt)366 static int blk_trace_stop(struct blk_trace *bt)
367 {
368 if (bt->trace_state != Blktrace_running)
369 return -EINVAL;
370
371 bt->trace_state = Blktrace_stopped;
372 raw_spin_lock_irq(&running_trace_lock);
373 list_del_init(&bt->running_list);
374 raw_spin_unlock_irq(&running_trace_lock);
375 relay_flush(bt->rchan);
376
377 return 0;
378 }
379
blk_trace_cleanup(struct request_queue * q,struct blk_trace * bt)380 static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
381 {
382 blk_trace_stop(bt);
383 synchronize_rcu();
384 blk_trace_free(q, bt);
385 put_probe_ref();
386 }
387
__blk_trace_remove(struct request_queue * q)388 static int __blk_trace_remove(struct request_queue *q)
389 {
390 struct blk_trace *bt;
391
392 bt = rcu_replace_pointer(q->blk_trace, NULL,
393 lockdep_is_held(&q->debugfs_mutex));
394 if (!bt)
395 return -EINVAL;
396
397 blk_trace_cleanup(q, bt);
398
399 return 0;
400 }
401
blk_trace_remove(struct request_queue * q)402 int blk_trace_remove(struct request_queue *q)
403 {
404 int ret;
405
406 mutex_lock(&q->debugfs_mutex);
407 ret = __blk_trace_remove(q);
408 mutex_unlock(&q->debugfs_mutex);
409
410 return ret;
411 }
412 EXPORT_SYMBOL_GPL(blk_trace_remove);
413
blk_dropped_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)414 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
415 size_t count, loff_t *ppos)
416 {
417 struct blk_trace *bt = filp->private_data;
418 char buf[16];
419
420 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
421
422 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
423 }
424
425 static const struct file_operations blk_dropped_fops = {
426 .owner = THIS_MODULE,
427 .open = simple_open,
428 .read = blk_dropped_read,
429 .llseek = default_llseek,
430 };
431
blk_msg_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)432 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
433 size_t count, loff_t *ppos)
434 {
435 char *msg;
436 struct blk_trace *bt;
437
438 if (count >= BLK_TN_MAX_MSG)
439 return -EINVAL;
440
441 msg = memdup_user_nul(buffer, count);
442 if (IS_ERR(msg))
443 return PTR_ERR(msg);
444
445 bt = filp->private_data;
446 __blk_trace_note_message(bt, NULL, "%s", msg);
447 kfree(msg);
448
449 return count;
450 }
451
452 static const struct file_operations blk_msg_fops = {
453 .owner = THIS_MODULE,
454 .open = simple_open,
455 .write = blk_msg_write,
456 .llseek = noop_llseek,
457 };
458
459 /*
460 * Keep track of how many times we encountered a full subbuffer, to aid
461 * the user space app in telling how many lost events there were.
462 */
blk_subbuf_start_callback(struct rchan_buf * buf,void * subbuf,void * prev_subbuf,size_t prev_padding)463 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
464 void *prev_subbuf, size_t prev_padding)
465 {
466 struct blk_trace *bt;
467
468 if (!relay_buf_full(buf))
469 return 1;
470
471 bt = buf->chan->private_data;
472 atomic_inc(&bt->dropped);
473 return 0;
474 }
475
blk_remove_buf_file_callback(struct dentry * dentry)476 static int blk_remove_buf_file_callback(struct dentry *dentry)
477 {
478 debugfs_remove(dentry);
479
480 return 0;
481 }
482
blk_create_buf_file_callback(const char * filename,struct dentry * parent,umode_t mode,struct rchan_buf * buf,int * is_global)483 static struct dentry *blk_create_buf_file_callback(const char *filename,
484 struct dentry *parent,
485 umode_t mode,
486 struct rchan_buf *buf,
487 int *is_global)
488 {
489 return debugfs_create_file(filename, mode, parent, buf,
490 &relay_file_operations);
491 }
492
493 static const struct rchan_callbacks blk_relay_callbacks = {
494 .subbuf_start = blk_subbuf_start_callback,
495 .create_buf_file = blk_create_buf_file_callback,
496 .remove_buf_file = blk_remove_buf_file_callback,
497 };
498
blk_trace_setup_lba(struct blk_trace * bt,struct block_device * bdev)499 static void blk_trace_setup_lba(struct blk_trace *bt,
500 struct block_device *bdev)
501 {
502 if (bdev) {
503 bt->start_lba = bdev->bd_start_sect;
504 bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
505 } else {
506 bt->start_lba = 0;
507 bt->end_lba = -1ULL;
508 }
509 }
510
511 /*
512 * Setup everything required to start tracing
513 */
do_blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,struct blk_user_trace_setup * buts)514 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
515 struct block_device *bdev,
516 struct blk_user_trace_setup *buts)
517 {
518 struct blk_trace *bt = NULL;
519 struct dentry *dir = NULL;
520 int ret;
521
522 lockdep_assert_held(&q->debugfs_mutex);
523
524 if (!buts->buf_size || !buts->buf_nr)
525 return -EINVAL;
526
527 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
528 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
529
530 /*
531 * some device names have larger paths - convert the slashes
532 * to underscores for this to work as expected
533 */
534 strreplace(buts->name, '/', '_');
535
536 /*
537 * bdev can be NULL, as with scsi-generic, this is a helpful as
538 * we can be.
539 */
540 if (rcu_dereference_protected(q->blk_trace,
541 lockdep_is_held(&q->debugfs_mutex))) {
542 pr_warn("Concurrent blktraces are not allowed on %s\n",
543 buts->name);
544 return -EBUSY;
545 }
546
547 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
548 if (!bt)
549 return -ENOMEM;
550
551 ret = -ENOMEM;
552 bt->sequence = alloc_percpu(unsigned long);
553 if (!bt->sequence)
554 goto err;
555
556 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
557 if (!bt->msg_data)
558 goto err;
559
560 /*
561 * When tracing the whole disk reuse the existing debugfs directory
562 * created by the block layer on init. For partitions block devices,
563 * and scsi-generic block devices we create a temporary new debugfs
564 * directory that will be removed once the trace ends.
565 */
566 if (bdev && !bdev_is_partition(bdev))
567 dir = q->debugfs_dir;
568 else
569 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
570
571 /*
572 * As blktrace relies on debugfs for its interface the debugfs directory
573 * is required, contrary to the usual mantra of not checking for debugfs
574 * files or directories.
575 */
576 if (IS_ERR_OR_NULL(dir)) {
577 pr_warn("debugfs_dir not present for %s so skipping\n",
578 buts->name);
579 ret = -ENOENT;
580 goto err;
581 }
582
583 bt->dev = dev;
584 atomic_set(&bt->dropped, 0);
585 INIT_LIST_HEAD(&bt->running_list);
586
587 ret = -EIO;
588 debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
589 debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
590
591 bt->rchan = relay_open("trace", dir, buts->buf_size,
592 buts->buf_nr, &blk_relay_callbacks, bt);
593 if (!bt->rchan)
594 goto err;
595
596 bt->act_mask = buts->act_mask;
597 if (!bt->act_mask)
598 bt->act_mask = (u16) -1;
599
600 blk_trace_setup_lba(bt, bdev);
601
602 /* overwrite with user settings */
603 if (buts->start_lba)
604 bt->start_lba = buts->start_lba;
605 if (buts->end_lba)
606 bt->end_lba = buts->end_lba;
607
608 bt->pid = buts->pid;
609 bt->trace_state = Blktrace_setup;
610
611 rcu_assign_pointer(q->blk_trace, bt);
612 get_probe_ref();
613
614 ret = 0;
615 err:
616 if (ret)
617 blk_trace_free(q, bt);
618 return ret;
619 }
620
__blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)621 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
622 struct block_device *bdev, char __user *arg)
623 {
624 struct blk_user_trace_setup buts;
625 int ret;
626
627 ret = copy_from_user(&buts, arg, sizeof(buts));
628 if (ret)
629 return -EFAULT;
630
631 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
632 if (ret)
633 return ret;
634
635 if (copy_to_user(arg, &buts, sizeof(buts))) {
636 __blk_trace_remove(q);
637 return -EFAULT;
638 }
639 return 0;
640 }
641
blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)642 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
643 struct block_device *bdev,
644 char __user *arg)
645 {
646 int ret;
647
648 mutex_lock(&q->debugfs_mutex);
649 ret = __blk_trace_setup(q, name, dev, bdev, arg);
650 mutex_unlock(&q->debugfs_mutex);
651
652 return ret;
653 }
654 EXPORT_SYMBOL_GPL(blk_trace_setup);
655
656 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
compat_blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)657 static int compat_blk_trace_setup(struct request_queue *q, char *name,
658 dev_t dev, struct block_device *bdev,
659 char __user *arg)
660 {
661 struct blk_user_trace_setup buts;
662 struct compat_blk_user_trace_setup cbuts;
663 int ret;
664
665 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
666 return -EFAULT;
667
668 buts = (struct blk_user_trace_setup) {
669 .act_mask = cbuts.act_mask,
670 .buf_size = cbuts.buf_size,
671 .buf_nr = cbuts.buf_nr,
672 .start_lba = cbuts.start_lba,
673 .end_lba = cbuts.end_lba,
674 .pid = cbuts.pid,
675 };
676
677 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
678 if (ret)
679 return ret;
680
681 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
682 __blk_trace_remove(q);
683 return -EFAULT;
684 }
685
686 return 0;
687 }
688 #endif
689
__blk_trace_startstop(struct request_queue * q,int start)690 static int __blk_trace_startstop(struct request_queue *q, int start)
691 {
692 struct blk_trace *bt;
693
694 bt = rcu_dereference_protected(q->blk_trace,
695 lockdep_is_held(&q->debugfs_mutex));
696 if (bt == NULL)
697 return -EINVAL;
698
699 if (start)
700 return blk_trace_start(bt);
701 else
702 return blk_trace_stop(bt);
703 }
704
blk_trace_startstop(struct request_queue * q,int start)705 int blk_trace_startstop(struct request_queue *q, int start)
706 {
707 int ret;
708
709 mutex_lock(&q->debugfs_mutex);
710 ret = __blk_trace_startstop(q, start);
711 mutex_unlock(&q->debugfs_mutex);
712
713 return ret;
714 }
715 EXPORT_SYMBOL_GPL(blk_trace_startstop);
716
717 /*
718 * When reading or writing the blktrace sysfs files, the references to the
719 * opened sysfs or device files should prevent the underlying block device
720 * from being removed. So no further delete protection is really needed.
721 */
722
723 /**
724 * blk_trace_ioctl: - handle the ioctls associated with tracing
725 * @bdev: the block device
726 * @cmd: the ioctl cmd
727 * @arg: the argument data, if any
728 *
729 **/
blk_trace_ioctl(struct block_device * bdev,unsigned cmd,char __user * arg)730 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
731 {
732 struct request_queue *q;
733 int ret, start = 0;
734 char b[BDEVNAME_SIZE];
735
736 q = bdev_get_queue(bdev);
737 if (!q)
738 return -ENXIO;
739
740 mutex_lock(&q->debugfs_mutex);
741
742 switch (cmd) {
743 case BLKTRACESETUP:
744 snprintf(b, sizeof(b), "%pg", bdev);
745 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
746 break;
747 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
748 case BLKTRACESETUP32:
749 snprintf(b, sizeof(b), "%pg", bdev);
750 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
751 break;
752 #endif
753 case BLKTRACESTART:
754 start = 1;
755 fallthrough;
756 case BLKTRACESTOP:
757 ret = __blk_trace_startstop(q, start);
758 break;
759 case BLKTRACETEARDOWN:
760 ret = __blk_trace_remove(q);
761 break;
762 default:
763 ret = -ENOTTY;
764 break;
765 }
766
767 mutex_unlock(&q->debugfs_mutex);
768 return ret;
769 }
770
771 /**
772 * blk_trace_shutdown: - stop and cleanup trace structures
773 * @q: the request queue associated with the device
774 *
775 **/
blk_trace_shutdown(struct request_queue * q)776 void blk_trace_shutdown(struct request_queue *q)
777 {
778 if (rcu_dereference_protected(q->blk_trace,
779 lockdep_is_held(&q->debugfs_mutex)))
780 __blk_trace_remove(q);
781 }
782
783 #ifdef CONFIG_BLK_CGROUP
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)784 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
785 {
786 struct cgroup_subsys_state *blkcg_css;
787 struct blk_trace *bt;
788
789 /* We don't use the 'bt' value here except as an optimization... */
790 bt = rcu_dereference_protected(q->blk_trace, 1);
791 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
792 return 0;
793
794 blkcg_css = bio_blkcg_css(bio);
795 if (!blkcg_css)
796 return 0;
797 return cgroup_id(blkcg_css->cgroup);
798 }
799 #else
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)800 static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
801 {
802 return 0;
803 }
804 #endif
805
806 static u64
blk_trace_request_get_cgid(struct request * rq)807 blk_trace_request_get_cgid(struct request *rq)
808 {
809 if (!rq->bio)
810 return 0;
811 /* Use the first bio */
812 return blk_trace_bio_get_cgid(rq->q, rq->bio);
813 }
814
815 /*
816 * blktrace probes
817 */
818
819 /**
820 * blk_add_trace_rq - Add a trace for a request oriented action
821 * @rq: the source request
822 * @error: return status to log
823 * @nr_bytes: number of completed bytes
824 * @what: the action
825 * @cgid: the cgroup info
826 *
827 * Description:
828 * Records an action against a request. Will log the bio offset + size.
829 *
830 **/
blk_add_trace_rq(struct request * rq,blk_status_t error,unsigned int nr_bytes,u32 what,u64 cgid)831 static void blk_add_trace_rq(struct request *rq, blk_status_t error,
832 unsigned int nr_bytes, u32 what, u64 cgid)
833 {
834 struct blk_trace *bt;
835
836 rcu_read_lock();
837 bt = rcu_dereference(rq->q->blk_trace);
838 if (likely(!bt)) {
839 rcu_read_unlock();
840 return;
841 }
842
843 if (blk_rq_is_passthrough(rq))
844 what |= BLK_TC_ACT(BLK_TC_PC);
845 else
846 what |= BLK_TC_ACT(BLK_TC_FS);
847
848 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
849 what, blk_status_to_errno(error), 0, NULL, cgid);
850 rcu_read_unlock();
851 }
852
blk_add_trace_rq_insert(void * ignore,struct request * rq)853 static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
854 {
855 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
856 blk_trace_request_get_cgid(rq));
857 }
858
blk_add_trace_rq_issue(void * ignore,struct request * rq)859 static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
860 {
861 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
862 blk_trace_request_get_cgid(rq));
863 }
864
blk_add_trace_rq_merge(void * ignore,struct request * rq)865 static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
866 {
867 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
868 blk_trace_request_get_cgid(rq));
869 }
870
blk_add_trace_rq_requeue(void * ignore,struct request * rq)871 static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
872 {
873 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
874 blk_trace_request_get_cgid(rq));
875 }
876
blk_add_trace_rq_complete(void * ignore,struct request * rq,blk_status_t error,unsigned int nr_bytes)877 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
878 blk_status_t error, unsigned int nr_bytes)
879 {
880 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
881 blk_trace_request_get_cgid(rq));
882 }
883
884 /**
885 * blk_add_trace_bio - Add a trace for a bio oriented action
886 * @q: queue the io is for
887 * @bio: the source bio
888 * @what: the action
889 * @error: error, if any
890 *
891 * Description:
892 * Records an action against a bio. Will log the bio offset + size.
893 *
894 **/
blk_add_trace_bio(struct request_queue * q,struct bio * bio,u32 what,int error)895 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
896 u32 what, int error)
897 {
898 struct blk_trace *bt;
899
900 rcu_read_lock();
901 bt = rcu_dereference(q->blk_trace);
902 if (likely(!bt)) {
903 rcu_read_unlock();
904 return;
905 }
906
907 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
908 bio->bi_opf, what, error, 0, NULL,
909 blk_trace_bio_get_cgid(q, bio));
910 rcu_read_unlock();
911 }
912
blk_add_trace_bio_bounce(void * ignore,struct bio * bio)913 static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
914 {
915 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
916 }
917
blk_add_trace_bio_complete(void * ignore,struct request_queue * q,struct bio * bio)918 static void blk_add_trace_bio_complete(void *ignore,
919 struct request_queue *q, struct bio *bio)
920 {
921 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
922 blk_status_to_errno(bio->bi_status));
923 }
924
blk_add_trace_bio_backmerge(void * ignore,struct bio * bio)925 static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
926 {
927 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
928 0);
929 }
930
blk_add_trace_bio_frontmerge(void * ignore,struct bio * bio)931 static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
932 {
933 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
934 0);
935 }
936
blk_add_trace_bio_queue(void * ignore,struct bio * bio)937 static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
938 {
939 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, 0);
940 }
941
blk_add_trace_getrq(void * ignore,struct bio * bio)942 static void blk_add_trace_getrq(void *ignore, struct bio *bio)
943 {
944 blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, 0);
945 }
946
blk_add_trace_plug(void * ignore,struct request_queue * q)947 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
948 {
949 struct blk_trace *bt;
950
951 rcu_read_lock();
952 bt = rcu_dereference(q->blk_trace);
953 if (bt)
954 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
955 rcu_read_unlock();
956 }
957
blk_add_trace_unplug(void * ignore,struct request_queue * q,unsigned int depth,bool explicit)958 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
959 unsigned int depth, bool explicit)
960 {
961 struct blk_trace *bt;
962
963 rcu_read_lock();
964 bt = rcu_dereference(q->blk_trace);
965 if (bt) {
966 __be64 rpdu = cpu_to_be64(depth);
967 u32 what;
968
969 if (explicit)
970 what = BLK_TA_UNPLUG_IO;
971 else
972 what = BLK_TA_UNPLUG_TIMER;
973
974 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
975 }
976 rcu_read_unlock();
977 }
978
blk_add_trace_split(void * ignore,struct bio * bio,unsigned int pdu)979 static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
980 {
981 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
982 struct blk_trace *bt;
983
984 rcu_read_lock();
985 bt = rcu_dereference(q->blk_trace);
986 if (bt) {
987 __be64 rpdu = cpu_to_be64(pdu);
988
989 __blk_add_trace(bt, bio->bi_iter.bi_sector,
990 bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
991 blk_status_to_errno(bio->bi_status),
992 sizeof(rpdu), &rpdu,
993 blk_trace_bio_get_cgid(q, bio));
994 }
995 rcu_read_unlock();
996 }
997
998 /**
999 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1000 * @ignore: trace callback data parameter (not used)
1001 * @bio: the source bio
1002 * @dev: source device
1003 * @from: source sector
1004 *
1005 * Called after a bio is remapped to a different device and/or sector.
1006 **/
blk_add_trace_bio_remap(void * ignore,struct bio * bio,dev_t dev,sector_t from)1007 static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
1008 sector_t from)
1009 {
1010 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1011 struct blk_trace *bt;
1012 struct blk_io_trace_remap r;
1013
1014 rcu_read_lock();
1015 bt = rcu_dereference(q->blk_trace);
1016 if (likely(!bt)) {
1017 rcu_read_unlock();
1018 return;
1019 }
1020
1021 r.device_from = cpu_to_be32(dev);
1022 r.device_to = cpu_to_be32(bio_dev(bio));
1023 r.sector_from = cpu_to_be64(from);
1024
1025 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1026 bio->bi_opf, BLK_TA_REMAP,
1027 blk_status_to_errno(bio->bi_status),
1028 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1029 rcu_read_unlock();
1030 }
1031
1032 /**
1033 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1034 * @ignore: trace callback data parameter (not used)
1035 * @rq: the source request
1036 * @dev: target device
1037 * @from: source sector
1038 *
1039 * Description:
1040 * Device mapper remaps request to other devices.
1041 * Add a trace for that action.
1042 *
1043 **/
blk_add_trace_rq_remap(void * ignore,struct request * rq,dev_t dev,sector_t from)1044 static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
1045 sector_t from)
1046 {
1047 struct blk_trace *bt;
1048 struct blk_io_trace_remap r;
1049
1050 rcu_read_lock();
1051 bt = rcu_dereference(rq->q->blk_trace);
1052 if (likely(!bt)) {
1053 rcu_read_unlock();
1054 return;
1055 }
1056
1057 r.device_from = cpu_to_be32(dev);
1058 r.device_to = cpu_to_be32(disk_devt(rq->q->disk));
1059 r.sector_from = cpu_to_be64(from);
1060
1061 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1062 rq->cmd_flags, BLK_TA_REMAP, 0,
1063 sizeof(r), &r, blk_trace_request_get_cgid(rq));
1064 rcu_read_unlock();
1065 }
1066
1067 /**
1068 * blk_add_driver_data - Add binary message with driver-specific data
1069 * @rq: io request
1070 * @data: driver-specific data
1071 * @len: length of driver-specific data
1072 *
1073 * Description:
1074 * Some drivers might want to write driver-specific data per request.
1075 *
1076 **/
blk_add_driver_data(struct request * rq,void * data,size_t len)1077 void blk_add_driver_data(struct request *rq, void *data, size_t len)
1078 {
1079 struct blk_trace *bt;
1080
1081 rcu_read_lock();
1082 bt = rcu_dereference(rq->q->blk_trace);
1083 if (likely(!bt)) {
1084 rcu_read_unlock();
1085 return;
1086 }
1087
1088 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
1089 BLK_TA_DRV_DATA, 0, len, data,
1090 blk_trace_request_get_cgid(rq));
1091 rcu_read_unlock();
1092 }
1093 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1094
blk_register_tracepoints(void)1095 static void blk_register_tracepoints(void)
1096 {
1097 int ret;
1098
1099 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1100 WARN_ON(ret);
1101 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1102 WARN_ON(ret);
1103 ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1104 WARN_ON(ret);
1105 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1106 WARN_ON(ret);
1107 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1108 WARN_ON(ret);
1109 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1110 WARN_ON(ret);
1111 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1112 WARN_ON(ret);
1113 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1114 WARN_ON(ret);
1115 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1116 WARN_ON(ret);
1117 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1118 WARN_ON(ret);
1119 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1120 WARN_ON(ret);
1121 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1122 WARN_ON(ret);
1123 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1124 WARN_ON(ret);
1125 ret = register_trace_block_split(blk_add_trace_split, NULL);
1126 WARN_ON(ret);
1127 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1128 WARN_ON(ret);
1129 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1130 WARN_ON(ret);
1131 }
1132
blk_unregister_tracepoints(void)1133 static void blk_unregister_tracepoints(void)
1134 {
1135 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1136 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1137 unregister_trace_block_split(blk_add_trace_split, NULL);
1138 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1139 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1140 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1141 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1142 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1143 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1144 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1145 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1146 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1147 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1148 unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
1149 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1150 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1151
1152 tracepoint_synchronize_unregister();
1153 }
1154
1155 /*
1156 * struct blk_io_tracer formatting routines
1157 */
1158
fill_rwbs(char * rwbs,const struct blk_io_trace * t)1159 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1160 {
1161 int i = 0;
1162 int tc = t->action >> BLK_TC_SHIFT;
1163
1164 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1165 rwbs[i++] = 'N';
1166 goto out;
1167 }
1168
1169 if (tc & BLK_TC_FLUSH)
1170 rwbs[i++] = 'F';
1171
1172 if (tc & BLK_TC_DISCARD)
1173 rwbs[i++] = 'D';
1174 else if (tc & BLK_TC_WRITE)
1175 rwbs[i++] = 'W';
1176 else if (t->bytes)
1177 rwbs[i++] = 'R';
1178 else
1179 rwbs[i++] = 'N';
1180
1181 if (tc & BLK_TC_FUA)
1182 rwbs[i++] = 'F';
1183 if (tc & BLK_TC_AHEAD)
1184 rwbs[i++] = 'A';
1185 if (tc & BLK_TC_SYNC)
1186 rwbs[i++] = 'S';
1187 if (tc & BLK_TC_META)
1188 rwbs[i++] = 'M';
1189 out:
1190 rwbs[i] = '\0';
1191 }
1192
1193 static inline
te_blk_io_trace(const struct trace_entry * ent)1194 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1195 {
1196 return (const struct blk_io_trace *)ent;
1197 }
1198
pdu_start(const struct trace_entry * ent,bool has_cg)1199 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1200 {
1201 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1202 }
1203
t_cgid(const struct trace_entry * ent)1204 static inline u64 t_cgid(const struct trace_entry *ent)
1205 {
1206 return *(u64 *)(te_blk_io_trace(ent) + 1);
1207 }
1208
pdu_real_len(const struct trace_entry * ent,bool has_cg)1209 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1210 {
1211 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1212 }
1213
t_action(const struct trace_entry * ent)1214 static inline u32 t_action(const struct trace_entry *ent)
1215 {
1216 return te_blk_io_trace(ent)->action;
1217 }
1218
t_bytes(const struct trace_entry * ent)1219 static inline u32 t_bytes(const struct trace_entry *ent)
1220 {
1221 return te_blk_io_trace(ent)->bytes;
1222 }
1223
t_sec(const struct trace_entry * ent)1224 static inline u32 t_sec(const struct trace_entry *ent)
1225 {
1226 return te_blk_io_trace(ent)->bytes >> 9;
1227 }
1228
t_sector(const struct trace_entry * ent)1229 static inline unsigned long long t_sector(const struct trace_entry *ent)
1230 {
1231 return te_blk_io_trace(ent)->sector;
1232 }
1233
t_error(const struct trace_entry * ent)1234 static inline __u16 t_error(const struct trace_entry *ent)
1235 {
1236 return te_blk_io_trace(ent)->error;
1237 }
1238
get_pdu_int(const struct trace_entry * ent,bool has_cg)1239 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1240 {
1241 const __be64 *val = pdu_start(ent, has_cg);
1242 return be64_to_cpu(*val);
1243 }
1244
1245 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1246 bool has_cg);
1247
blk_log_action_classic(struct trace_iterator * iter,const char * act,bool has_cg)1248 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1249 bool has_cg)
1250 {
1251 char rwbs[RWBS_LEN];
1252 unsigned long long ts = iter->ts;
1253 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1254 unsigned secs = (unsigned long)ts;
1255 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1256
1257 fill_rwbs(rwbs, t);
1258
1259 trace_seq_printf(&iter->seq,
1260 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1261 MAJOR(t->device), MINOR(t->device), iter->cpu,
1262 secs, nsec_rem, iter->ent->pid, act, rwbs);
1263 }
1264
blk_log_action(struct trace_iterator * iter,const char * act,bool has_cg)1265 static void blk_log_action(struct trace_iterator *iter, const char *act,
1266 bool has_cg)
1267 {
1268 char rwbs[RWBS_LEN];
1269 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1270
1271 fill_rwbs(rwbs, t);
1272 if (has_cg) {
1273 u64 id = t_cgid(iter->ent);
1274
1275 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1276 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1277
1278 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1279 sizeof(blkcg_name_buf));
1280 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1281 MAJOR(t->device), MINOR(t->device),
1282 blkcg_name_buf, act, rwbs);
1283 } else {
1284 /*
1285 * The cgid portion used to be "INO,GEN". Userland
1286 * builds a FILEID_INO32_GEN fid out of them and
1287 * opens the cgroup using open_by_handle_at(2).
1288 * While 32bit ino setups are still the same, 64bit
1289 * ones now use the 64bit ino as the whole ID and
1290 * no longer use generation.
1291 *
1292 * Regardless of the content, always output
1293 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1294 * be mapped back to @id on both 64 and 32bit ino
1295 * setups. See __kernfs_fh_to_dentry().
1296 */
1297 trace_seq_printf(&iter->seq,
1298 "%3d,%-3d %llx,%-llx %2s %3s ",
1299 MAJOR(t->device), MINOR(t->device),
1300 id & U32_MAX, id >> 32, act, rwbs);
1301 }
1302 } else
1303 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1304 MAJOR(t->device), MINOR(t->device), act, rwbs);
1305 }
1306
blk_log_dump_pdu(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1307 static void blk_log_dump_pdu(struct trace_seq *s,
1308 const struct trace_entry *ent, bool has_cg)
1309 {
1310 const unsigned char *pdu_buf;
1311 int pdu_len;
1312 int i, end;
1313
1314 pdu_buf = pdu_start(ent, has_cg);
1315 pdu_len = pdu_real_len(ent, has_cg);
1316
1317 if (!pdu_len)
1318 return;
1319
1320 /* find the last zero that needs to be printed */
1321 for (end = pdu_len - 1; end >= 0; end--)
1322 if (pdu_buf[end])
1323 break;
1324 end++;
1325
1326 trace_seq_putc(s, '(');
1327
1328 for (i = 0; i < pdu_len; i++) {
1329
1330 trace_seq_printf(s, "%s%02x",
1331 i == 0 ? "" : " ", pdu_buf[i]);
1332
1333 /*
1334 * stop when the rest is just zeros and indicate so
1335 * with a ".." appended
1336 */
1337 if (i == end && end != pdu_len - 1) {
1338 trace_seq_puts(s, " ..) ");
1339 return;
1340 }
1341 }
1342
1343 trace_seq_puts(s, ") ");
1344 }
1345
blk_log_generic(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1346 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1347 {
1348 char cmd[TASK_COMM_LEN];
1349
1350 trace_find_cmdline(ent->pid, cmd);
1351
1352 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1353 trace_seq_printf(s, "%u ", t_bytes(ent));
1354 blk_log_dump_pdu(s, ent, has_cg);
1355 trace_seq_printf(s, "[%s]\n", cmd);
1356 } else {
1357 if (t_sec(ent))
1358 trace_seq_printf(s, "%llu + %u [%s]\n",
1359 t_sector(ent), t_sec(ent), cmd);
1360 else
1361 trace_seq_printf(s, "[%s]\n", cmd);
1362 }
1363 }
1364
blk_log_with_error(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1365 static void blk_log_with_error(struct trace_seq *s,
1366 const struct trace_entry *ent, bool has_cg)
1367 {
1368 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1369 blk_log_dump_pdu(s, ent, has_cg);
1370 trace_seq_printf(s, "[%d]\n", t_error(ent));
1371 } else {
1372 if (t_sec(ent))
1373 trace_seq_printf(s, "%llu + %u [%d]\n",
1374 t_sector(ent),
1375 t_sec(ent), t_error(ent));
1376 else
1377 trace_seq_printf(s, "%llu [%d]\n",
1378 t_sector(ent), t_error(ent));
1379 }
1380 }
1381
blk_log_remap(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1382 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1383 {
1384 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1385
1386 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1387 t_sector(ent), t_sec(ent),
1388 MAJOR(be32_to_cpu(__r->device_from)),
1389 MINOR(be32_to_cpu(__r->device_from)),
1390 be64_to_cpu(__r->sector_from));
1391 }
1392
blk_log_plug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1393 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1394 {
1395 char cmd[TASK_COMM_LEN];
1396
1397 trace_find_cmdline(ent->pid, cmd);
1398
1399 trace_seq_printf(s, "[%s]\n", cmd);
1400 }
1401
blk_log_unplug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1402 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1403 {
1404 char cmd[TASK_COMM_LEN];
1405
1406 trace_find_cmdline(ent->pid, cmd);
1407
1408 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1409 }
1410
blk_log_split(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1411 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1412 {
1413 char cmd[TASK_COMM_LEN];
1414
1415 trace_find_cmdline(ent->pid, cmd);
1416
1417 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1418 get_pdu_int(ent, has_cg), cmd);
1419 }
1420
blk_log_msg(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1421 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1422 bool has_cg)
1423 {
1424
1425 trace_seq_putmem(s, pdu_start(ent, has_cg),
1426 pdu_real_len(ent, has_cg));
1427 trace_seq_putc(s, '\n');
1428 }
1429
1430 /*
1431 * struct tracer operations
1432 */
1433
blk_tracer_print_header(struct seq_file * m)1434 static void blk_tracer_print_header(struct seq_file *m)
1435 {
1436 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1437 return;
1438 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1439 "# | | | | | |\n");
1440 }
1441
blk_tracer_start(struct trace_array * tr)1442 static void blk_tracer_start(struct trace_array *tr)
1443 {
1444 blk_tracer_enabled = true;
1445 }
1446
blk_tracer_init(struct trace_array * tr)1447 static int blk_tracer_init(struct trace_array *tr)
1448 {
1449 blk_tr = tr;
1450 blk_tracer_start(tr);
1451 return 0;
1452 }
1453
blk_tracer_stop(struct trace_array * tr)1454 static void blk_tracer_stop(struct trace_array *tr)
1455 {
1456 blk_tracer_enabled = false;
1457 }
1458
blk_tracer_reset(struct trace_array * tr)1459 static void blk_tracer_reset(struct trace_array *tr)
1460 {
1461 blk_tracer_stop(tr);
1462 }
1463
1464 static const struct {
1465 const char *act[2];
1466 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1467 bool has_cg);
1468 } what2act[] = {
1469 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1470 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1471 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1472 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1473 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1474 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1475 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1476 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1477 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1478 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1479 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1480 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1481 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1482 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1483 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1484 };
1485
print_one_line(struct trace_iterator * iter,bool classic)1486 static enum print_line_t print_one_line(struct trace_iterator *iter,
1487 bool classic)
1488 {
1489 struct trace_array *tr = iter->tr;
1490 struct trace_seq *s = &iter->seq;
1491 const struct blk_io_trace *t;
1492 u16 what;
1493 bool long_act;
1494 blk_log_action_t *log_action;
1495 bool has_cg;
1496
1497 t = te_blk_io_trace(iter->ent);
1498 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1499 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1500 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1501 has_cg = t->action & __BLK_TA_CGROUP;
1502
1503 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1504 log_action(iter, long_act ? "message" : "m", has_cg);
1505 blk_log_msg(s, iter->ent, has_cg);
1506 return trace_handle_return(s);
1507 }
1508
1509 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1510 trace_seq_printf(s, "Unknown action %x\n", what);
1511 else {
1512 log_action(iter, what2act[what].act[long_act], has_cg);
1513 what2act[what].print(s, iter->ent, has_cg);
1514 }
1515
1516 return trace_handle_return(s);
1517 }
1518
blk_trace_event_print(struct trace_iterator * iter,int flags,struct trace_event * event)1519 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1520 int flags, struct trace_event *event)
1521 {
1522 return print_one_line(iter, false);
1523 }
1524
blk_trace_synthesize_old_trace(struct trace_iterator * iter)1525 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1526 {
1527 struct trace_seq *s = &iter->seq;
1528 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1529 const int offset = offsetof(struct blk_io_trace, sector);
1530 struct blk_io_trace old = {
1531 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1532 .time = iter->ts,
1533 };
1534
1535 trace_seq_putmem(s, &old, offset);
1536 trace_seq_putmem(s, &t->sector,
1537 sizeof(old) - offset + t->pdu_len);
1538 }
1539
1540 static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator * iter,int flags,struct trace_event * event)1541 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1542 struct trace_event *event)
1543 {
1544 blk_trace_synthesize_old_trace(iter);
1545
1546 return trace_handle_return(&iter->seq);
1547 }
1548
blk_tracer_print_line(struct trace_iterator * iter)1549 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1550 {
1551 if ((iter->ent->type != TRACE_BLK) ||
1552 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1553 return TRACE_TYPE_UNHANDLED;
1554
1555 return print_one_line(iter, true);
1556 }
1557
1558 static int
blk_tracer_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1559 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1560 {
1561 /* don't output context-info for blk_classic output */
1562 if (bit == TRACE_BLK_OPT_CLASSIC) {
1563 if (set)
1564 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1565 else
1566 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1567 }
1568 return 0;
1569 }
1570
1571 static struct tracer blk_tracer __read_mostly = {
1572 .name = "blk",
1573 .init = blk_tracer_init,
1574 .reset = blk_tracer_reset,
1575 .start = blk_tracer_start,
1576 .stop = blk_tracer_stop,
1577 .print_header = blk_tracer_print_header,
1578 .print_line = blk_tracer_print_line,
1579 .flags = &blk_tracer_flags,
1580 .set_flag = blk_tracer_set_flag,
1581 };
1582
1583 static struct trace_event_functions trace_blk_event_funcs = {
1584 .trace = blk_trace_event_print,
1585 .binary = blk_trace_event_print_binary,
1586 };
1587
1588 static struct trace_event trace_blk_event = {
1589 .type = TRACE_BLK,
1590 .funcs = &trace_blk_event_funcs,
1591 };
1592
init_blk_tracer(void)1593 static int __init init_blk_tracer(void)
1594 {
1595 if (!register_trace_event(&trace_blk_event)) {
1596 pr_warn("Warning: could not register block events\n");
1597 return 1;
1598 }
1599
1600 if (register_tracer(&blk_tracer) != 0) {
1601 pr_warn("Warning: could not register the block tracer\n");
1602 unregister_trace_event(&trace_blk_event);
1603 return 1;
1604 }
1605
1606 return 0;
1607 }
1608
1609 device_initcall(init_blk_tracer);
1610
blk_trace_remove_queue(struct request_queue * q)1611 static int blk_trace_remove_queue(struct request_queue *q)
1612 {
1613 struct blk_trace *bt;
1614
1615 bt = rcu_replace_pointer(q->blk_trace, NULL,
1616 lockdep_is_held(&q->debugfs_mutex));
1617 if (bt == NULL)
1618 return -EINVAL;
1619
1620 blk_trace_stop(bt);
1621
1622 put_probe_ref();
1623 synchronize_rcu();
1624 blk_trace_free(q, bt);
1625 return 0;
1626 }
1627
1628 /*
1629 * Setup everything required to start tracing
1630 */
blk_trace_setup_queue(struct request_queue * q,struct block_device * bdev)1631 static int blk_trace_setup_queue(struct request_queue *q,
1632 struct block_device *bdev)
1633 {
1634 struct blk_trace *bt = NULL;
1635 int ret = -ENOMEM;
1636
1637 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1638 if (!bt)
1639 return -ENOMEM;
1640
1641 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1642 if (!bt->msg_data)
1643 goto free_bt;
1644
1645 bt->dev = bdev->bd_dev;
1646 bt->act_mask = (u16)-1;
1647
1648 blk_trace_setup_lba(bt, bdev);
1649
1650 rcu_assign_pointer(q->blk_trace, bt);
1651 get_probe_ref();
1652 return 0;
1653
1654 free_bt:
1655 blk_trace_free(q, bt);
1656 return ret;
1657 }
1658
1659 /*
1660 * sysfs interface to enable and configure tracing
1661 */
1662
1663 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1664 struct device_attribute *attr,
1665 char *buf);
1666 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1667 struct device_attribute *attr,
1668 const char *buf, size_t count);
1669 #define BLK_TRACE_DEVICE_ATTR(_name) \
1670 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1671 sysfs_blk_trace_attr_show, \
1672 sysfs_blk_trace_attr_store)
1673
1674 static BLK_TRACE_DEVICE_ATTR(enable);
1675 static BLK_TRACE_DEVICE_ATTR(act_mask);
1676 static BLK_TRACE_DEVICE_ATTR(pid);
1677 static BLK_TRACE_DEVICE_ATTR(start_lba);
1678 static BLK_TRACE_DEVICE_ATTR(end_lba);
1679
1680 static struct attribute *blk_trace_attrs[] = {
1681 &dev_attr_enable.attr,
1682 &dev_attr_act_mask.attr,
1683 &dev_attr_pid.attr,
1684 &dev_attr_start_lba.attr,
1685 &dev_attr_end_lba.attr,
1686 NULL
1687 };
1688
1689 struct attribute_group blk_trace_attr_group = {
1690 .name = "trace",
1691 .attrs = blk_trace_attrs,
1692 };
1693
1694 static const struct {
1695 int mask;
1696 const char *str;
1697 } mask_maps[] = {
1698 { BLK_TC_READ, "read" },
1699 { BLK_TC_WRITE, "write" },
1700 { BLK_TC_FLUSH, "flush" },
1701 { BLK_TC_SYNC, "sync" },
1702 { BLK_TC_QUEUE, "queue" },
1703 { BLK_TC_REQUEUE, "requeue" },
1704 { BLK_TC_ISSUE, "issue" },
1705 { BLK_TC_COMPLETE, "complete" },
1706 { BLK_TC_FS, "fs" },
1707 { BLK_TC_PC, "pc" },
1708 { BLK_TC_NOTIFY, "notify" },
1709 { BLK_TC_AHEAD, "ahead" },
1710 { BLK_TC_META, "meta" },
1711 { BLK_TC_DISCARD, "discard" },
1712 { BLK_TC_DRV_DATA, "drv_data" },
1713 { BLK_TC_FUA, "fua" },
1714 };
1715
blk_trace_str2mask(const char * str)1716 static int blk_trace_str2mask(const char *str)
1717 {
1718 int i;
1719 int mask = 0;
1720 char *buf, *s, *token;
1721
1722 buf = kstrdup(str, GFP_KERNEL);
1723 if (buf == NULL)
1724 return -ENOMEM;
1725 s = strstrip(buf);
1726
1727 while (1) {
1728 token = strsep(&s, ",");
1729 if (token == NULL)
1730 break;
1731
1732 if (*token == '\0')
1733 continue;
1734
1735 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1736 if (strcasecmp(token, mask_maps[i].str) == 0) {
1737 mask |= mask_maps[i].mask;
1738 break;
1739 }
1740 }
1741 if (i == ARRAY_SIZE(mask_maps)) {
1742 mask = -EINVAL;
1743 break;
1744 }
1745 }
1746 kfree(buf);
1747
1748 return mask;
1749 }
1750
blk_trace_mask2str(char * buf,int mask)1751 static ssize_t blk_trace_mask2str(char *buf, int mask)
1752 {
1753 int i;
1754 char *p = buf;
1755
1756 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1757 if (mask & mask_maps[i].mask) {
1758 p += sprintf(p, "%s%s",
1759 (p == buf) ? "" : ",", mask_maps[i].str);
1760 }
1761 }
1762 *p++ = '\n';
1763
1764 return p - buf;
1765 }
1766
sysfs_blk_trace_attr_show(struct device * dev,struct device_attribute * attr,char * buf)1767 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1768 struct device_attribute *attr,
1769 char *buf)
1770 {
1771 struct block_device *bdev = dev_to_bdev(dev);
1772 struct request_queue *q = bdev_get_queue(bdev);
1773 struct blk_trace *bt;
1774 ssize_t ret = -ENXIO;
1775
1776 mutex_lock(&q->debugfs_mutex);
1777
1778 bt = rcu_dereference_protected(q->blk_trace,
1779 lockdep_is_held(&q->debugfs_mutex));
1780 if (attr == &dev_attr_enable) {
1781 ret = sprintf(buf, "%u\n", !!bt);
1782 goto out_unlock_bdev;
1783 }
1784
1785 if (bt == NULL)
1786 ret = sprintf(buf, "disabled\n");
1787 else if (attr == &dev_attr_act_mask)
1788 ret = blk_trace_mask2str(buf, bt->act_mask);
1789 else if (attr == &dev_attr_pid)
1790 ret = sprintf(buf, "%u\n", bt->pid);
1791 else if (attr == &dev_attr_start_lba)
1792 ret = sprintf(buf, "%llu\n", bt->start_lba);
1793 else if (attr == &dev_attr_end_lba)
1794 ret = sprintf(buf, "%llu\n", bt->end_lba);
1795
1796 out_unlock_bdev:
1797 mutex_unlock(&q->debugfs_mutex);
1798 return ret;
1799 }
1800
sysfs_blk_trace_attr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1801 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1802 struct device_attribute *attr,
1803 const char *buf, size_t count)
1804 {
1805 struct block_device *bdev = dev_to_bdev(dev);
1806 struct request_queue *q = bdev_get_queue(bdev);
1807 struct blk_trace *bt;
1808 u64 value;
1809 ssize_t ret = -EINVAL;
1810
1811 if (count == 0)
1812 goto out;
1813
1814 if (attr == &dev_attr_act_mask) {
1815 if (kstrtoull(buf, 0, &value)) {
1816 /* Assume it is a list of trace category names */
1817 ret = blk_trace_str2mask(buf);
1818 if (ret < 0)
1819 goto out;
1820 value = ret;
1821 }
1822 } else {
1823 if (kstrtoull(buf, 0, &value))
1824 goto out;
1825 }
1826
1827 mutex_lock(&q->debugfs_mutex);
1828
1829 bt = rcu_dereference_protected(q->blk_trace,
1830 lockdep_is_held(&q->debugfs_mutex));
1831 if (attr == &dev_attr_enable) {
1832 if (!!value == !!bt) {
1833 ret = 0;
1834 goto out_unlock_bdev;
1835 }
1836 if (value)
1837 ret = blk_trace_setup_queue(q, bdev);
1838 else
1839 ret = blk_trace_remove_queue(q);
1840 goto out_unlock_bdev;
1841 }
1842
1843 ret = 0;
1844 if (bt == NULL) {
1845 ret = blk_trace_setup_queue(q, bdev);
1846 bt = rcu_dereference_protected(q->blk_trace,
1847 lockdep_is_held(&q->debugfs_mutex));
1848 }
1849
1850 if (ret == 0) {
1851 if (attr == &dev_attr_act_mask)
1852 bt->act_mask = value;
1853 else if (attr == &dev_attr_pid)
1854 bt->pid = value;
1855 else if (attr == &dev_attr_start_lba)
1856 bt->start_lba = value;
1857 else if (attr == &dev_attr_end_lba)
1858 bt->end_lba = value;
1859 }
1860
1861 out_unlock_bdev:
1862 mutex_unlock(&q->debugfs_mutex);
1863 out:
1864 return ret ? ret : count;
1865 }
1866 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1867
1868 #ifdef CONFIG_EVENT_TRACING
1869
1870 /**
1871 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
1872 * @rwbs: buffer to be filled
1873 * @opf: request operation type (REQ_OP_XXX) and flags for the tracepoint
1874 *
1875 * Description:
1876 * Maps each request operation and flag to a single character and fills the
1877 * buffer provided by the caller with resulting string.
1878 *
1879 **/
blk_fill_rwbs(char * rwbs,blk_opf_t opf)1880 void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
1881 {
1882 int i = 0;
1883
1884 if (opf & REQ_PREFLUSH)
1885 rwbs[i++] = 'F';
1886
1887 switch (opf & REQ_OP_MASK) {
1888 case REQ_OP_WRITE:
1889 rwbs[i++] = 'W';
1890 break;
1891 case REQ_OP_DISCARD:
1892 rwbs[i++] = 'D';
1893 break;
1894 case REQ_OP_SECURE_ERASE:
1895 rwbs[i++] = 'D';
1896 rwbs[i++] = 'E';
1897 break;
1898 case REQ_OP_FLUSH:
1899 rwbs[i++] = 'F';
1900 break;
1901 case REQ_OP_READ:
1902 rwbs[i++] = 'R';
1903 break;
1904 default:
1905 rwbs[i++] = 'N';
1906 }
1907
1908 if (opf & REQ_FUA)
1909 rwbs[i++] = 'F';
1910 if (opf & REQ_RAHEAD)
1911 rwbs[i++] = 'A';
1912 if (opf & REQ_SYNC)
1913 rwbs[i++] = 'S';
1914 if (opf & REQ_META)
1915 rwbs[i++] = 'M';
1916
1917 rwbs[i] = '\0';
1918 }
1919 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1920
1921 #endif /* CONFIG_EVENT_TRACING */
1922
1923