1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/printk.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * Modified to make sys_syslog() more flexible: added commands to
8 * return the last 4k of kernel messages, regardless of whether
9 * they've been read or not. Added option to suppress kernel printk's
10 * to the console. Added hook for sending the console messages
11 * elsewhere, in preparation for a serial line console (someday).
12 * Ted Ts'o, 2/11/93.
13 * Modified for sysctl support, 1/8/97, Chris Horn.
14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15 * manfred@colorfullife.com
16 * Rewrote bits to get rid of console_lock
17 * 01Mar01 Andrew Morton
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/tty.h>
25 #include <linux/tty_driver.h>
26 #include <linux/console.h>
27 #include <linux/init.h>
28 #include <linux/jiffies.h>
29 #include <linux/nmi.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/delay.h>
33 #include <linux/smp.h>
34 #include <linux/security.h>
35 #include <linux/memblock.h>
36 #include <linux/syscalls.h>
37 #include <linux/crash_core.h>
38 #include <linux/ratelimit.h>
39 #include <linux/kmsg_dump.h>
40 #include <linux/syslog.h>
41 #include <linux/cpu.h>
42 #include <linux/rculist.h>
43 #include <linux/poll.h>
44 #include <linux/irq_work.h>
45 #include <linux/ctype.h>
46 #include <linux/uio.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/debug.h>
49 #include <linux/sched/task_stack.h>
50
51 #include <linux/uaccess.h>
52 #include <asm/sections.h>
53
54 #include <trace/events/initcall.h>
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/printk.h>
57
58 #include "printk_ringbuffer.h"
59 #include "console_cmdline.h"
60 #include "braille.h"
61 #include "internal.h"
62
63 int console_printk[4] = {
64 CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
65 MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
66 CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
67 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
68 };
69 EXPORT_SYMBOL_GPL(console_printk);
70
71 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
72 EXPORT_SYMBOL(ignore_console_lock_warning);
73
74 /*
75 * Low level drivers may need that to know if they can schedule in
76 * their unblank() callback or not. So let's export it.
77 */
78 int oops_in_progress;
79 EXPORT_SYMBOL(oops_in_progress);
80
81 /*
82 * console_sem protects the console_drivers list, and also
83 * provides serialisation for access to the entire console
84 * driver system.
85 */
86 static DEFINE_SEMAPHORE(console_sem);
87 struct console *console_drivers;
88 EXPORT_SYMBOL_GPL(console_drivers);
89
90 /*
91 * System may need to suppress printk message under certain
92 * circumstances, like after kernel panic happens.
93 */
94 int __read_mostly suppress_printk;
95
96 /*
97 * During panic, heavy printk by other CPUs can delay the
98 * panic and risk deadlock on console resources.
99 */
100 static int __read_mostly suppress_panic_printk;
101
102 #ifdef CONFIG_LOCKDEP
103 static struct lockdep_map console_lock_dep_map = {
104 .name = "console_lock"
105 };
106 #endif
107
108 enum devkmsg_log_bits {
109 __DEVKMSG_LOG_BIT_ON = 0,
110 __DEVKMSG_LOG_BIT_OFF,
111 __DEVKMSG_LOG_BIT_LOCK,
112 };
113
114 enum devkmsg_log_masks {
115 DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON),
116 DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF),
117 DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK),
118 };
119
120 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
121 #define DEVKMSG_LOG_MASK_DEFAULT 0
122
123 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
124
__control_devkmsg(char * str)125 static int __control_devkmsg(char *str)
126 {
127 size_t len;
128
129 if (!str)
130 return -EINVAL;
131
132 len = str_has_prefix(str, "on");
133 if (len) {
134 devkmsg_log = DEVKMSG_LOG_MASK_ON;
135 return len;
136 }
137
138 len = str_has_prefix(str, "off");
139 if (len) {
140 devkmsg_log = DEVKMSG_LOG_MASK_OFF;
141 return len;
142 }
143
144 len = str_has_prefix(str, "ratelimit");
145 if (len) {
146 devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
147 return len;
148 }
149
150 return -EINVAL;
151 }
152
control_devkmsg(char * str)153 static int __init control_devkmsg(char *str)
154 {
155 if (__control_devkmsg(str) < 0) {
156 pr_warn("printk.devkmsg: bad option string '%s'\n", str);
157 return 1;
158 }
159
160 /*
161 * Set sysctl string accordingly:
162 */
163 if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
164 strcpy(devkmsg_log_str, "on");
165 else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
166 strcpy(devkmsg_log_str, "off");
167 /* else "ratelimit" which is set by default. */
168
169 /*
170 * Sysctl cannot change it anymore. The kernel command line setting of
171 * this parameter is to force the setting to be permanent throughout the
172 * runtime of the system. This is a precation measure against userspace
173 * trying to be a smarta** and attempting to change it up on us.
174 */
175 devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
176
177 return 1;
178 }
179 __setup("printk.devkmsg=", control_devkmsg);
180
181 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
182 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
devkmsg_sysctl_set_loglvl(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)183 int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
184 void *buffer, size_t *lenp, loff_t *ppos)
185 {
186 char old_str[DEVKMSG_STR_MAX_SIZE];
187 unsigned int old;
188 int err;
189
190 if (write) {
191 if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
192 return -EINVAL;
193
194 old = devkmsg_log;
195 strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE);
196 }
197
198 err = proc_dostring(table, write, buffer, lenp, ppos);
199 if (err)
200 return err;
201
202 if (write) {
203 err = __control_devkmsg(devkmsg_log_str);
204
205 /*
206 * Do not accept an unknown string OR a known string with
207 * trailing crap...
208 */
209 if (err < 0 || (err + 1 != *lenp)) {
210
211 /* ... and restore old setting. */
212 devkmsg_log = old;
213 strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE);
214
215 return -EINVAL;
216 }
217 }
218
219 return 0;
220 }
221 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
222
223 /*
224 * Helper macros to handle lockdep when locking/unlocking console_sem. We use
225 * macros instead of functions so that _RET_IP_ contains useful information.
226 */
227 #define down_console_sem() do { \
228 down(&console_sem);\
229 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
230 } while (0)
231
__down_trylock_console_sem(unsigned long ip)232 static int __down_trylock_console_sem(unsigned long ip)
233 {
234 int lock_failed;
235 unsigned long flags;
236
237 /*
238 * Here and in __up_console_sem() we need to be in safe mode,
239 * because spindump/WARN/etc from under console ->lock will
240 * deadlock in printk()->down_trylock_console_sem() otherwise.
241 */
242 printk_safe_enter_irqsave(flags);
243 lock_failed = down_trylock(&console_sem);
244 printk_safe_exit_irqrestore(flags);
245
246 if (lock_failed)
247 return 1;
248 mutex_acquire(&console_lock_dep_map, 0, 1, ip);
249 return 0;
250 }
251 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
252
__up_console_sem(unsigned long ip)253 static void __up_console_sem(unsigned long ip)
254 {
255 unsigned long flags;
256
257 mutex_release(&console_lock_dep_map, ip);
258
259 printk_safe_enter_irqsave(flags);
260 up(&console_sem);
261 printk_safe_exit_irqrestore(flags);
262 }
263 #define up_console_sem() __up_console_sem(_RET_IP_)
264
panic_in_progress(void)265 static bool panic_in_progress(void)
266 {
267 return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
268 }
269
270 /*
271 * This is used for debugging the mess that is the VT code by
272 * keeping track if we have the console semaphore held. It's
273 * definitely not the perfect debug tool (we don't know if _WE_
274 * hold it and are racing, but it helps tracking those weird code
275 * paths in the console code where we end up in places I want
276 * locked without the console semaphore held).
277 */
278 static int console_locked, console_suspended;
279
280 /*
281 * Array of consoles built from command line options (console=)
282 */
283
284 #define MAX_CMDLINECONSOLES 8
285
286 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
287
288 static int preferred_console = -1;
289 int console_set_on_cmdline;
290 EXPORT_SYMBOL(console_set_on_cmdline);
291
292 /* Flag: console code may call schedule() */
293 static int console_may_schedule;
294
295 enum con_msg_format_flags {
296 MSG_FORMAT_DEFAULT = 0,
297 MSG_FORMAT_SYSLOG = (1 << 0),
298 };
299
300 static int console_msg_format = MSG_FORMAT_DEFAULT;
301
302 /*
303 * The printk log buffer consists of a sequenced collection of records, each
304 * containing variable length message text. Every record also contains its
305 * own meta-data (@info).
306 *
307 * Every record meta-data carries the timestamp in microseconds, as well as
308 * the standard userspace syslog level and syslog facility. The usual kernel
309 * messages use LOG_KERN; userspace-injected messages always carry a matching
310 * syslog facility, by default LOG_USER. The origin of every message can be
311 * reliably determined that way.
312 *
313 * The human readable log message of a record is available in @text, the
314 * length of the message text in @text_len. The stored message is not
315 * terminated.
316 *
317 * Optionally, a record can carry a dictionary of properties (key/value
318 * pairs), to provide userspace with a machine-readable message context.
319 *
320 * Examples for well-defined, commonly used property names are:
321 * DEVICE=b12:8 device identifier
322 * b12:8 block dev_t
323 * c127:3 char dev_t
324 * n8 netdev ifindex
325 * +sound:card0 subsystem:devname
326 * SUBSYSTEM=pci driver-core subsystem name
327 *
328 * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
329 * and values are terminated by a '\0' character.
330 *
331 * Example of record values:
332 * record.text_buf = "it's a line" (unterminated)
333 * record.info.seq = 56
334 * record.info.ts_nsec = 36863
335 * record.info.text_len = 11
336 * record.info.facility = 0 (LOG_KERN)
337 * record.info.flags = 0
338 * record.info.level = 3 (LOG_ERR)
339 * record.info.caller_id = 299 (task 299)
340 * record.info.dev_info.subsystem = "pci" (terminated)
341 * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated)
342 *
343 * The 'struct printk_info' buffer must never be directly exported to
344 * userspace, it is a kernel-private implementation detail that might
345 * need to be changed in the future, when the requirements change.
346 *
347 * /dev/kmsg exports the structured data in the following line format:
348 * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
349 *
350 * Users of the export format should ignore possible additional values
351 * separated by ',', and find the message after the ';' character.
352 *
353 * The optional key/value pairs are attached as continuation lines starting
354 * with a space character and terminated by a newline. All possible
355 * non-prinatable characters are escaped in the "\xff" notation.
356 */
357
358 /* syslog_lock protects syslog_* variables and write access to clear_seq. */
359 static DEFINE_MUTEX(syslog_lock);
360
361 #ifdef CONFIG_PRINTK
362 DECLARE_WAIT_QUEUE_HEAD(log_wait);
363 /* All 3 protected by @syslog_lock. */
364 /* the next printk record to read by syslog(READ) or /proc/kmsg */
365 static u64 syslog_seq;
366 static size_t syslog_partial;
367 static bool syslog_time;
368
369 struct latched_seq {
370 seqcount_latch_t latch;
371 u64 val[2];
372 };
373
374 /*
375 * The next printk record to read after the last 'clear' command. There are
376 * two copies (updated with seqcount_latch) so that reads can locklessly
377 * access a valid value. Writers are synchronized by @syslog_lock.
378 */
379 static struct latched_seq clear_seq = {
380 .latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
381 .val[0] = 0,
382 .val[1] = 0,
383 };
384
385 #ifdef CONFIG_PRINTK_CALLER
386 #define PREFIX_MAX 48
387 #else
388 #define PREFIX_MAX 32
389 #endif
390
391 /* the maximum size of a formatted record (i.e. with prefix added per line) */
392 #define CONSOLE_LOG_MAX 1024
393
394 /* the maximum size for a dropped text message */
395 #define DROPPED_TEXT_MAX 64
396
397 /* the maximum size allowed to be reserved for a record */
398 #define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
399
400 #define LOG_LEVEL(v) ((v) & 0x07)
401 #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
402
403 /* record buffer */
404 #define LOG_ALIGN __alignof__(unsigned long)
405 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
406 #define LOG_BUF_LEN_MAX (u32)(1 << 31)
407 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
408 static char *log_buf = __log_buf;
409 static u32 log_buf_len = __LOG_BUF_LEN;
410
411 /*
412 * Define the average message size. This only affects the number of
413 * descriptors that will be available. Underestimating is better than
414 * overestimating (too many available descriptors is better than not enough).
415 */
416 #define PRB_AVGBITS 5 /* 32 character average length */
417
418 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
419 #error CONFIG_LOG_BUF_SHIFT value too small.
420 #endif
421 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
422 PRB_AVGBITS, &__log_buf[0]);
423
424 static struct printk_ringbuffer printk_rb_dynamic;
425
426 static struct printk_ringbuffer *prb = &printk_rb_static;
427
428 /*
429 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
430 * per_cpu_areas are initialised. This variable is set to true when
431 * it's safe to access per-CPU data.
432 */
433 static bool __printk_percpu_data_ready __ro_after_init;
434
printk_percpu_data_ready(void)435 bool printk_percpu_data_ready(void)
436 {
437 return __printk_percpu_data_ready;
438 }
439
440 /* Must be called under syslog_lock. */
latched_seq_write(struct latched_seq * ls,u64 val)441 static void latched_seq_write(struct latched_seq *ls, u64 val)
442 {
443 raw_write_seqcount_latch(&ls->latch);
444 ls->val[0] = val;
445 raw_write_seqcount_latch(&ls->latch);
446 ls->val[1] = val;
447 }
448
449 /* Can be called from any context. */
latched_seq_read_nolock(struct latched_seq * ls)450 static u64 latched_seq_read_nolock(struct latched_seq *ls)
451 {
452 unsigned int seq;
453 unsigned int idx;
454 u64 val;
455
456 do {
457 seq = raw_read_seqcount_latch(&ls->latch);
458 idx = seq & 0x1;
459 val = ls->val[idx];
460 } while (read_seqcount_latch_retry(&ls->latch, seq));
461
462 return val;
463 }
464
465 /* Return log buffer address */
log_buf_addr_get(void)466 char *log_buf_addr_get(void)
467 {
468 return log_buf;
469 }
470
471 /* Return log buffer size */
log_buf_len_get(void)472 u32 log_buf_len_get(void)
473 {
474 return log_buf_len;
475 }
476
477 /*
478 * Define how much of the log buffer we could take at maximum. The value
479 * must be greater than two. Note that only half of the buffer is available
480 * when the index points to the middle.
481 */
482 #define MAX_LOG_TAKE_PART 4
483 static const char trunc_msg[] = "<truncated>";
484
truncate_msg(u16 * text_len,u16 * trunc_msg_len)485 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
486 {
487 /*
488 * The message should not take the whole buffer. Otherwise, it might
489 * get removed too soon.
490 */
491 u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
492
493 if (*text_len > max_text_len)
494 *text_len = max_text_len;
495
496 /* enable the warning message (if there is room) */
497 *trunc_msg_len = strlen(trunc_msg);
498 if (*text_len >= *trunc_msg_len)
499 *text_len -= *trunc_msg_len;
500 else
501 *trunc_msg_len = 0;
502 }
503
504 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
505
syslog_action_restricted(int type)506 static int syslog_action_restricted(int type)
507 {
508 if (dmesg_restrict)
509 return 1;
510 /*
511 * Unless restricted, we allow "read all" and "get buffer size"
512 * for everybody.
513 */
514 return type != SYSLOG_ACTION_READ_ALL &&
515 type != SYSLOG_ACTION_SIZE_BUFFER;
516 }
517
check_syslog_permissions(int type,int source)518 static int check_syslog_permissions(int type, int source)
519 {
520 /*
521 * If this is from /proc/kmsg and we've already opened it, then we've
522 * already done the capabilities checks at open time.
523 */
524 if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
525 goto ok;
526
527 if (syslog_action_restricted(type)) {
528 if (capable(CAP_SYSLOG))
529 goto ok;
530 /*
531 * For historical reasons, accept CAP_SYS_ADMIN too, with
532 * a warning.
533 */
534 if (capable(CAP_SYS_ADMIN)) {
535 pr_warn_once("%s (%d): Attempt to access syslog with "
536 "CAP_SYS_ADMIN but no CAP_SYSLOG "
537 "(deprecated).\n",
538 current->comm, task_pid_nr(current));
539 goto ok;
540 }
541 return -EPERM;
542 }
543 ok:
544 return security_syslog(type);
545 }
546
append_char(char ** pp,char * e,char c)547 static void append_char(char **pp, char *e, char c)
548 {
549 if (*pp < e)
550 *(*pp)++ = c;
551 }
552
info_print_ext_header(char * buf,size_t size,struct printk_info * info)553 static ssize_t info_print_ext_header(char *buf, size_t size,
554 struct printk_info *info)
555 {
556 u64 ts_usec = info->ts_nsec;
557 char caller[20];
558 #ifdef CONFIG_PRINTK_CALLER
559 u32 id = info->caller_id;
560
561 snprintf(caller, sizeof(caller), ",caller=%c%u",
562 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
563 #else
564 caller[0] = '\0';
565 #endif
566
567 do_div(ts_usec, 1000);
568
569 return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
570 (info->facility << 3) | info->level, info->seq,
571 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
572 }
573
msg_add_ext_text(char * buf,size_t size,const char * text,size_t text_len,unsigned char endc)574 static ssize_t msg_add_ext_text(char *buf, size_t size,
575 const char *text, size_t text_len,
576 unsigned char endc)
577 {
578 char *p = buf, *e = buf + size;
579 size_t i;
580
581 /* escape non-printable characters */
582 for (i = 0; i < text_len; i++) {
583 unsigned char c = text[i];
584
585 if (c < ' ' || c >= 127 || c == '\\')
586 p += scnprintf(p, e - p, "\\x%02x", c);
587 else
588 append_char(&p, e, c);
589 }
590 append_char(&p, e, endc);
591
592 return p - buf;
593 }
594
msg_add_dict_text(char * buf,size_t size,const char * key,const char * val)595 static ssize_t msg_add_dict_text(char *buf, size_t size,
596 const char *key, const char *val)
597 {
598 size_t val_len = strlen(val);
599 ssize_t len;
600
601 if (!val_len)
602 return 0;
603
604 len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */
605 len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
606 len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
607
608 return len;
609 }
610
msg_print_ext_body(char * buf,size_t size,char * text,size_t text_len,struct dev_printk_info * dev_info)611 static ssize_t msg_print_ext_body(char *buf, size_t size,
612 char *text, size_t text_len,
613 struct dev_printk_info *dev_info)
614 {
615 ssize_t len;
616
617 len = msg_add_ext_text(buf, size, text, text_len, '\n');
618
619 if (!dev_info)
620 goto out;
621
622 len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
623 dev_info->subsystem);
624 len += msg_add_dict_text(buf + len, size - len, "DEVICE",
625 dev_info->device);
626 out:
627 return len;
628 }
629
630 /* /dev/kmsg - userspace message inject/listen interface */
631 struct devkmsg_user {
632 atomic64_t seq;
633 struct ratelimit_state rs;
634 struct mutex lock;
635 char buf[CONSOLE_EXT_LOG_MAX];
636
637 struct printk_info info;
638 char text_buf[CONSOLE_EXT_LOG_MAX];
639 struct printk_record record;
640 };
641
642 static __printf(3, 4) __cold
devkmsg_emit(int facility,int level,const char * fmt,...)643 int devkmsg_emit(int facility, int level, const char *fmt, ...)
644 {
645 va_list args;
646 int r;
647
648 va_start(args, fmt);
649 r = vprintk_emit(facility, level, NULL, fmt, args);
650 va_end(args);
651
652 return r;
653 }
654
devkmsg_write(struct kiocb * iocb,struct iov_iter * from)655 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
656 {
657 char *buf, *line;
658 int level = default_message_loglevel;
659 int facility = 1; /* LOG_USER */
660 struct file *file = iocb->ki_filp;
661 struct devkmsg_user *user = file->private_data;
662 size_t len = iov_iter_count(from);
663 ssize_t ret = len;
664
665 if (!user || len > LOG_LINE_MAX)
666 return -EINVAL;
667
668 /* Ignore when user logging is disabled. */
669 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
670 return len;
671
672 /* Ratelimit when not explicitly enabled. */
673 if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
674 if (!___ratelimit(&user->rs, current->comm))
675 return ret;
676 }
677
678 buf = kmalloc(len+1, GFP_KERNEL);
679 if (buf == NULL)
680 return -ENOMEM;
681
682 buf[len] = '\0';
683 if (!copy_from_iter_full(buf, len, from)) {
684 kfree(buf);
685 return -EFAULT;
686 }
687
688 /*
689 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
690 * the decimal value represents 32bit, the lower 3 bit are the log
691 * level, the rest are the log facility.
692 *
693 * If no prefix or no userspace facility is specified, we
694 * enforce LOG_USER, to be able to reliably distinguish
695 * kernel-generated messages from userspace-injected ones.
696 */
697 line = buf;
698 if (line[0] == '<') {
699 char *endp = NULL;
700 unsigned int u;
701
702 u = simple_strtoul(line + 1, &endp, 10);
703 if (endp && endp[0] == '>') {
704 level = LOG_LEVEL(u);
705 if (LOG_FACILITY(u) != 0)
706 facility = LOG_FACILITY(u);
707 endp++;
708 line = endp;
709 }
710 }
711
712 devkmsg_emit(facility, level, "%s", line);
713 kfree(buf);
714 return ret;
715 }
716
devkmsg_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)717 static ssize_t devkmsg_read(struct file *file, char __user *buf,
718 size_t count, loff_t *ppos)
719 {
720 struct devkmsg_user *user = file->private_data;
721 struct printk_record *r = &user->record;
722 size_t len;
723 ssize_t ret;
724
725 if (!user)
726 return -EBADF;
727
728 ret = mutex_lock_interruptible(&user->lock);
729 if (ret)
730 return ret;
731
732 if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) {
733 if (file->f_flags & O_NONBLOCK) {
734 ret = -EAGAIN;
735 goto out;
736 }
737
738 /*
739 * Guarantee this task is visible on the waitqueue before
740 * checking the wake condition.
741 *
742 * The full memory barrier within set_current_state() of
743 * prepare_to_wait_event() pairs with the full memory barrier
744 * within wq_has_sleeper().
745 *
746 * This pairs with __wake_up_klogd:A.
747 */
748 ret = wait_event_interruptible(log_wait,
749 prb_read_valid(prb,
750 atomic64_read(&user->seq), r)); /* LMM(devkmsg_read:A) */
751 if (ret)
752 goto out;
753 }
754
755 if (r->info->seq != atomic64_read(&user->seq)) {
756 /* our last seen message is gone, return error and reset */
757 atomic64_set(&user->seq, r->info->seq);
758 ret = -EPIPE;
759 goto out;
760 }
761
762 len = info_print_ext_header(user->buf, sizeof(user->buf), r->info);
763 len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
764 &r->text_buf[0], r->info->text_len,
765 &r->info->dev_info);
766
767 atomic64_set(&user->seq, r->info->seq + 1);
768
769 if (len > count) {
770 ret = -EINVAL;
771 goto out;
772 }
773
774 if (copy_to_user(buf, user->buf, len)) {
775 ret = -EFAULT;
776 goto out;
777 }
778 ret = len;
779 out:
780 mutex_unlock(&user->lock);
781 return ret;
782 }
783
784 /*
785 * Be careful when modifying this function!!!
786 *
787 * Only few operations are supported because the device works only with the
788 * entire variable length messages (records). Non-standard values are
789 * returned in the other cases and has been this way for quite some time.
790 * User space applications might depend on this behavior.
791 */
devkmsg_llseek(struct file * file,loff_t offset,int whence)792 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
793 {
794 struct devkmsg_user *user = file->private_data;
795 loff_t ret = 0;
796
797 if (!user)
798 return -EBADF;
799 if (offset)
800 return -ESPIPE;
801
802 switch (whence) {
803 case SEEK_SET:
804 /* the first record */
805 atomic64_set(&user->seq, prb_first_valid_seq(prb));
806 break;
807 case SEEK_DATA:
808 /*
809 * The first record after the last SYSLOG_ACTION_CLEAR,
810 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
811 * changes no global state, and does not clear anything.
812 */
813 atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
814 break;
815 case SEEK_END:
816 /* after the last record */
817 atomic64_set(&user->seq, prb_next_seq(prb));
818 break;
819 default:
820 ret = -EINVAL;
821 }
822 return ret;
823 }
824
devkmsg_poll(struct file * file,poll_table * wait)825 static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
826 {
827 struct devkmsg_user *user = file->private_data;
828 struct printk_info info;
829 __poll_t ret = 0;
830
831 if (!user)
832 return EPOLLERR|EPOLLNVAL;
833
834 poll_wait(file, &log_wait, wait);
835
836 if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
837 /* return error when data has vanished underneath us */
838 if (info.seq != atomic64_read(&user->seq))
839 ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
840 else
841 ret = EPOLLIN|EPOLLRDNORM;
842 }
843
844 return ret;
845 }
846
devkmsg_open(struct inode * inode,struct file * file)847 static int devkmsg_open(struct inode *inode, struct file *file)
848 {
849 struct devkmsg_user *user;
850 int err;
851
852 if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
853 return -EPERM;
854
855 /* write-only does not need any file context */
856 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
857 err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
858 SYSLOG_FROM_READER);
859 if (err)
860 return err;
861 }
862
863 user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
864 if (!user)
865 return -ENOMEM;
866
867 ratelimit_default_init(&user->rs);
868 ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
869
870 mutex_init(&user->lock);
871
872 prb_rec_init_rd(&user->record, &user->info,
873 &user->text_buf[0], sizeof(user->text_buf));
874
875 atomic64_set(&user->seq, prb_first_valid_seq(prb));
876
877 file->private_data = user;
878 return 0;
879 }
880
devkmsg_release(struct inode * inode,struct file * file)881 static int devkmsg_release(struct inode *inode, struct file *file)
882 {
883 struct devkmsg_user *user = file->private_data;
884
885 if (!user)
886 return 0;
887
888 ratelimit_state_exit(&user->rs);
889
890 mutex_destroy(&user->lock);
891 kvfree(user);
892 return 0;
893 }
894
895 const struct file_operations kmsg_fops = {
896 .open = devkmsg_open,
897 .read = devkmsg_read,
898 .write_iter = devkmsg_write,
899 .llseek = devkmsg_llseek,
900 .poll = devkmsg_poll,
901 .release = devkmsg_release,
902 };
903
904 #ifdef CONFIG_CRASH_CORE
905 /*
906 * This appends the listed symbols to /proc/vmcore
907 *
908 * /proc/vmcore is used by various utilities, like crash and makedumpfile to
909 * obtain access to symbols that are otherwise very difficult to locate. These
910 * symbols are specifically used so that utilities can access and extract the
911 * dmesg log from a vmcore file after a crash.
912 */
log_buf_vmcoreinfo_setup(void)913 void log_buf_vmcoreinfo_setup(void)
914 {
915 struct dev_printk_info *dev_info = NULL;
916
917 VMCOREINFO_SYMBOL(prb);
918 VMCOREINFO_SYMBOL(printk_rb_static);
919 VMCOREINFO_SYMBOL(clear_seq);
920
921 /*
922 * Export struct size and field offsets. User space tools can
923 * parse it and detect any changes to structure down the line.
924 */
925
926 VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
927 VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
928 VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
929 VMCOREINFO_OFFSET(printk_ringbuffer, fail);
930
931 VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
932 VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
933 VMCOREINFO_OFFSET(prb_desc_ring, descs);
934 VMCOREINFO_OFFSET(prb_desc_ring, infos);
935 VMCOREINFO_OFFSET(prb_desc_ring, head_id);
936 VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
937
938 VMCOREINFO_STRUCT_SIZE(prb_desc);
939 VMCOREINFO_OFFSET(prb_desc, state_var);
940 VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
941
942 VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
943 VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
944 VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
945
946 VMCOREINFO_STRUCT_SIZE(printk_info);
947 VMCOREINFO_OFFSET(printk_info, seq);
948 VMCOREINFO_OFFSET(printk_info, ts_nsec);
949 VMCOREINFO_OFFSET(printk_info, text_len);
950 VMCOREINFO_OFFSET(printk_info, caller_id);
951 VMCOREINFO_OFFSET(printk_info, dev_info);
952
953 VMCOREINFO_STRUCT_SIZE(dev_printk_info);
954 VMCOREINFO_OFFSET(dev_printk_info, subsystem);
955 VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
956 VMCOREINFO_OFFSET(dev_printk_info, device);
957 VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
958
959 VMCOREINFO_STRUCT_SIZE(prb_data_ring);
960 VMCOREINFO_OFFSET(prb_data_ring, size_bits);
961 VMCOREINFO_OFFSET(prb_data_ring, data);
962 VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
963 VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
964
965 VMCOREINFO_SIZE(atomic_long_t);
966 VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
967
968 VMCOREINFO_STRUCT_SIZE(latched_seq);
969 VMCOREINFO_OFFSET(latched_seq, val);
970 }
971 #endif
972
973 /* requested log_buf_len from kernel cmdline */
974 static unsigned long __initdata new_log_buf_len;
975
976 /* we practice scaling the ring buffer by powers of 2 */
log_buf_len_update(u64 size)977 static void __init log_buf_len_update(u64 size)
978 {
979 if (size > (u64)LOG_BUF_LEN_MAX) {
980 size = (u64)LOG_BUF_LEN_MAX;
981 pr_err("log_buf over 2G is not supported.\n");
982 }
983
984 if (size)
985 size = roundup_pow_of_two(size);
986 if (size > log_buf_len)
987 new_log_buf_len = (unsigned long)size;
988 }
989
990 /* save requested log_buf_len since it's too early to process it */
log_buf_len_setup(char * str)991 static int __init log_buf_len_setup(char *str)
992 {
993 u64 size;
994
995 if (!str)
996 return -EINVAL;
997
998 size = memparse(str, &str);
999
1000 log_buf_len_update(size);
1001
1002 return 0;
1003 }
1004 early_param("log_buf_len", log_buf_len_setup);
1005
1006 #ifdef CONFIG_SMP
1007 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1008
log_buf_add_cpu(void)1009 static void __init log_buf_add_cpu(void)
1010 {
1011 unsigned int cpu_extra;
1012
1013 /*
1014 * archs should set up cpu_possible_bits properly with
1015 * set_cpu_possible() after setup_arch() but just in
1016 * case lets ensure this is valid.
1017 */
1018 if (num_possible_cpus() == 1)
1019 return;
1020
1021 cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1022
1023 /* by default this will only continue through for large > 64 CPUs */
1024 if (cpu_extra <= __LOG_BUF_LEN / 2)
1025 return;
1026
1027 pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1028 __LOG_CPU_MAX_BUF_LEN);
1029 pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1030 cpu_extra);
1031 pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1032
1033 log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1034 }
1035 #else /* !CONFIG_SMP */
log_buf_add_cpu(void)1036 static inline void log_buf_add_cpu(void) {}
1037 #endif /* CONFIG_SMP */
1038
set_percpu_data_ready(void)1039 static void __init set_percpu_data_ready(void)
1040 {
1041 __printk_percpu_data_ready = true;
1042 }
1043
add_to_rb(struct printk_ringbuffer * rb,struct printk_record * r)1044 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1045 struct printk_record *r)
1046 {
1047 struct prb_reserved_entry e;
1048 struct printk_record dest_r;
1049
1050 prb_rec_init_wr(&dest_r, r->info->text_len);
1051
1052 if (!prb_reserve(&e, rb, &dest_r))
1053 return 0;
1054
1055 memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1056 dest_r.info->text_len = r->info->text_len;
1057 dest_r.info->facility = r->info->facility;
1058 dest_r.info->level = r->info->level;
1059 dest_r.info->flags = r->info->flags;
1060 dest_r.info->ts_nsec = r->info->ts_nsec;
1061 dest_r.info->caller_id = r->info->caller_id;
1062 memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1063
1064 prb_final_commit(&e);
1065
1066 return prb_record_text_space(&e);
1067 }
1068
1069 static char setup_text_buf[LOG_LINE_MAX] __initdata;
1070
setup_log_buf(int early)1071 void __init setup_log_buf(int early)
1072 {
1073 struct printk_info *new_infos;
1074 unsigned int new_descs_count;
1075 struct prb_desc *new_descs;
1076 struct printk_info info;
1077 struct printk_record r;
1078 unsigned int text_size;
1079 size_t new_descs_size;
1080 size_t new_infos_size;
1081 unsigned long flags;
1082 char *new_log_buf;
1083 unsigned int free;
1084 u64 seq;
1085
1086 /*
1087 * Some archs call setup_log_buf() multiple times - first is very
1088 * early, e.g. from setup_arch(), and second - when percpu_areas
1089 * are initialised.
1090 */
1091 if (!early)
1092 set_percpu_data_ready();
1093
1094 if (log_buf != __log_buf)
1095 return;
1096
1097 if (!early && !new_log_buf_len)
1098 log_buf_add_cpu();
1099
1100 if (!new_log_buf_len)
1101 return;
1102
1103 new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1104 if (new_descs_count == 0) {
1105 pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1106 return;
1107 }
1108
1109 new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1110 if (unlikely(!new_log_buf)) {
1111 pr_err("log_buf_len: %lu text bytes not available\n",
1112 new_log_buf_len);
1113 return;
1114 }
1115
1116 new_descs_size = new_descs_count * sizeof(struct prb_desc);
1117 new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1118 if (unlikely(!new_descs)) {
1119 pr_err("log_buf_len: %zu desc bytes not available\n",
1120 new_descs_size);
1121 goto err_free_log_buf;
1122 }
1123
1124 new_infos_size = new_descs_count * sizeof(struct printk_info);
1125 new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1126 if (unlikely(!new_infos)) {
1127 pr_err("log_buf_len: %zu info bytes not available\n",
1128 new_infos_size);
1129 goto err_free_descs;
1130 }
1131
1132 prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1133
1134 prb_init(&printk_rb_dynamic,
1135 new_log_buf, ilog2(new_log_buf_len),
1136 new_descs, ilog2(new_descs_count),
1137 new_infos);
1138
1139 local_irq_save(flags);
1140
1141 log_buf_len = new_log_buf_len;
1142 log_buf = new_log_buf;
1143 new_log_buf_len = 0;
1144
1145 free = __LOG_BUF_LEN;
1146 prb_for_each_record(0, &printk_rb_static, seq, &r) {
1147 text_size = add_to_rb(&printk_rb_dynamic, &r);
1148 if (text_size > free)
1149 free = 0;
1150 else
1151 free -= text_size;
1152 }
1153
1154 prb = &printk_rb_dynamic;
1155
1156 local_irq_restore(flags);
1157
1158 /*
1159 * Copy any remaining messages that might have appeared from
1160 * NMI context after copying but before switching to the
1161 * dynamic buffer.
1162 */
1163 prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1164 text_size = add_to_rb(&printk_rb_dynamic, &r);
1165 if (text_size > free)
1166 free = 0;
1167 else
1168 free -= text_size;
1169 }
1170
1171 if (seq != prb_next_seq(&printk_rb_static)) {
1172 pr_err("dropped %llu messages\n",
1173 prb_next_seq(&printk_rb_static) - seq);
1174 }
1175
1176 pr_info("log_buf_len: %u bytes\n", log_buf_len);
1177 pr_info("early log buf free: %u(%u%%)\n",
1178 free, (free * 100) / __LOG_BUF_LEN);
1179 return;
1180
1181 err_free_descs:
1182 memblock_free(new_descs, new_descs_size);
1183 err_free_log_buf:
1184 memblock_free(new_log_buf, new_log_buf_len);
1185 }
1186
1187 static bool __read_mostly ignore_loglevel;
1188
ignore_loglevel_setup(char * str)1189 static int __init ignore_loglevel_setup(char *str)
1190 {
1191 ignore_loglevel = true;
1192 pr_info("debug: ignoring loglevel setting.\n");
1193
1194 return 0;
1195 }
1196
1197 early_param("ignore_loglevel", ignore_loglevel_setup);
1198 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1199 MODULE_PARM_DESC(ignore_loglevel,
1200 "ignore loglevel setting (prints all kernel messages to the console)");
1201
suppress_message_printing(int level)1202 static bool suppress_message_printing(int level)
1203 {
1204 return (level >= console_loglevel && !ignore_loglevel);
1205 }
1206
1207 #ifdef CONFIG_BOOT_PRINTK_DELAY
1208
1209 static int boot_delay; /* msecs delay after each printk during bootup */
1210 static unsigned long long loops_per_msec; /* based on boot_delay */
1211
boot_delay_setup(char * str)1212 static int __init boot_delay_setup(char *str)
1213 {
1214 unsigned long lpj;
1215
1216 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
1217 loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1218
1219 get_option(&str, &boot_delay);
1220 if (boot_delay > 10 * 1000)
1221 boot_delay = 0;
1222
1223 pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1224 "HZ: %d, loops_per_msec: %llu\n",
1225 boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1226 return 0;
1227 }
1228 early_param("boot_delay", boot_delay_setup);
1229
boot_delay_msec(int level)1230 static void boot_delay_msec(int level)
1231 {
1232 unsigned long long k;
1233 unsigned long timeout;
1234
1235 if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
1236 || suppress_message_printing(level)) {
1237 return;
1238 }
1239
1240 k = (unsigned long long)loops_per_msec * boot_delay;
1241
1242 timeout = jiffies + msecs_to_jiffies(boot_delay);
1243 while (k) {
1244 k--;
1245 cpu_relax();
1246 /*
1247 * use (volatile) jiffies to prevent
1248 * compiler reduction; loop termination via jiffies
1249 * is secondary and may or may not happen.
1250 */
1251 if (time_after(jiffies, timeout))
1252 break;
1253 touch_nmi_watchdog();
1254 }
1255 }
1256 #else
boot_delay_msec(int level)1257 static inline void boot_delay_msec(int level)
1258 {
1259 }
1260 #endif
1261
1262 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1263 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1264
print_syslog(unsigned int level,char * buf)1265 static size_t print_syslog(unsigned int level, char *buf)
1266 {
1267 return sprintf(buf, "<%u>", level);
1268 }
1269
print_time(u64 ts,char * buf)1270 static size_t print_time(u64 ts, char *buf)
1271 {
1272 unsigned long rem_nsec = do_div(ts, 1000000000);
1273
1274 return sprintf(buf, "[%5lu.%06lu]",
1275 (unsigned long)ts, rem_nsec / 1000);
1276 }
1277
1278 #ifdef CONFIG_PRINTK_CALLER
print_caller(u32 id,char * buf)1279 static size_t print_caller(u32 id, char *buf)
1280 {
1281 char caller[12];
1282
1283 snprintf(caller, sizeof(caller), "%c%u",
1284 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1285 return sprintf(buf, "[%6s]", caller);
1286 }
1287 #else
1288 #define print_caller(id, buf) 0
1289 #endif
1290
info_print_prefix(const struct printk_info * info,bool syslog,bool time,char * buf)1291 static size_t info_print_prefix(const struct printk_info *info, bool syslog,
1292 bool time, char *buf)
1293 {
1294 size_t len = 0;
1295
1296 if (syslog)
1297 len = print_syslog((info->facility << 3) | info->level, buf);
1298
1299 if (time)
1300 len += print_time(info->ts_nsec, buf + len);
1301
1302 len += print_caller(info->caller_id, buf + len);
1303
1304 if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1305 buf[len++] = ' ';
1306 buf[len] = '\0';
1307 }
1308
1309 return len;
1310 }
1311
1312 /*
1313 * Prepare the record for printing. The text is shifted within the given
1314 * buffer to avoid a need for another one. The following operations are
1315 * done:
1316 *
1317 * - Add prefix for each line.
1318 * - Drop truncated lines that no longer fit into the buffer.
1319 * - Add the trailing newline that has been removed in vprintk_store().
1320 * - Add a string terminator.
1321 *
1322 * Since the produced string is always terminated, the maximum possible
1323 * return value is @r->text_buf_size - 1;
1324 *
1325 * Return: The length of the updated/prepared text, including the added
1326 * prefixes and the newline. The terminator is not counted. The dropped
1327 * line(s) are not counted.
1328 */
record_print_text(struct printk_record * r,bool syslog,bool time)1329 static size_t record_print_text(struct printk_record *r, bool syslog,
1330 bool time)
1331 {
1332 size_t text_len = r->info->text_len;
1333 size_t buf_size = r->text_buf_size;
1334 char *text = r->text_buf;
1335 char prefix[PREFIX_MAX];
1336 bool truncated = false;
1337 size_t prefix_len;
1338 size_t line_len;
1339 size_t len = 0;
1340 char *next;
1341
1342 /*
1343 * If the message was truncated because the buffer was not large
1344 * enough, treat the available text as if it were the full text.
1345 */
1346 if (text_len > buf_size)
1347 text_len = buf_size;
1348
1349 prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1350
1351 /*
1352 * @text_len: bytes of unprocessed text
1353 * @line_len: bytes of current line _without_ newline
1354 * @text: pointer to beginning of current line
1355 * @len: number of bytes prepared in r->text_buf
1356 */
1357 for (;;) {
1358 next = memchr(text, '\n', text_len);
1359 if (next) {
1360 line_len = next - text;
1361 } else {
1362 /* Drop truncated line(s). */
1363 if (truncated)
1364 break;
1365 line_len = text_len;
1366 }
1367
1368 /*
1369 * Truncate the text if there is not enough space to add the
1370 * prefix and a trailing newline and a terminator.
1371 */
1372 if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1373 /* Drop even the current line if no space. */
1374 if (len + prefix_len + line_len + 1 + 1 > buf_size)
1375 break;
1376
1377 text_len = buf_size - len - prefix_len - 1 - 1;
1378 truncated = true;
1379 }
1380
1381 memmove(text + prefix_len, text, text_len);
1382 memcpy(text, prefix, prefix_len);
1383
1384 /*
1385 * Increment the prepared length to include the text and
1386 * prefix that were just moved+copied. Also increment for the
1387 * newline at the end of this line. If this is the last line,
1388 * there is no newline, but it will be added immediately below.
1389 */
1390 len += prefix_len + line_len + 1;
1391 if (text_len == line_len) {
1392 /*
1393 * This is the last line. Add the trailing newline
1394 * removed in vprintk_store().
1395 */
1396 text[prefix_len + line_len] = '\n';
1397 break;
1398 }
1399
1400 /*
1401 * Advance beyond the added prefix and the related line with
1402 * its newline.
1403 */
1404 text += prefix_len + line_len + 1;
1405
1406 /*
1407 * The remaining text has only decreased by the line with its
1408 * newline.
1409 *
1410 * Note that @text_len can become zero. It happens when @text
1411 * ended with a newline (either due to truncation or the
1412 * original string ending with "\n\n"). The loop is correctly
1413 * repeated and (if not truncated) an empty line with a prefix
1414 * will be prepared.
1415 */
1416 text_len -= line_len + 1;
1417 }
1418
1419 /*
1420 * If a buffer was provided, it will be terminated. Space for the
1421 * string terminator is guaranteed to be available. The terminator is
1422 * not counted in the return value.
1423 */
1424 if (buf_size > 0)
1425 r->text_buf[len] = 0;
1426
1427 return len;
1428 }
1429
get_record_print_text_size(struct printk_info * info,unsigned int line_count,bool syslog,bool time)1430 static size_t get_record_print_text_size(struct printk_info *info,
1431 unsigned int line_count,
1432 bool syslog, bool time)
1433 {
1434 char prefix[PREFIX_MAX];
1435 size_t prefix_len;
1436
1437 prefix_len = info_print_prefix(info, syslog, time, prefix);
1438
1439 /*
1440 * Each line will be preceded with a prefix. The intermediate
1441 * newlines are already within the text, but a final trailing
1442 * newline will be added.
1443 */
1444 return ((prefix_len * line_count) + info->text_len + 1);
1445 }
1446
1447 /*
1448 * Beginning with @start_seq, find the first record where it and all following
1449 * records up to (but not including) @max_seq fit into @size.
1450 *
1451 * @max_seq is simply an upper bound and does not need to exist. If the caller
1452 * does not require an upper bound, -1 can be used for @max_seq.
1453 */
find_first_fitting_seq(u64 start_seq,u64 max_seq,size_t size,bool syslog,bool time)1454 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1455 bool syslog, bool time)
1456 {
1457 struct printk_info info;
1458 unsigned int line_count;
1459 size_t len = 0;
1460 u64 seq;
1461
1462 /* Determine the size of the records up to @max_seq. */
1463 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1464 if (info.seq >= max_seq)
1465 break;
1466 len += get_record_print_text_size(&info, line_count, syslog, time);
1467 }
1468
1469 /*
1470 * Adjust the upper bound for the next loop to avoid subtracting
1471 * lengths that were never added.
1472 */
1473 if (seq < max_seq)
1474 max_seq = seq;
1475
1476 /*
1477 * Move first record forward until length fits into the buffer. Ignore
1478 * newest messages that were not counted in the above cycle. Messages
1479 * might appear and get lost in the meantime. This is a best effort
1480 * that prevents an infinite loop that could occur with a retry.
1481 */
1482 prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1483 if (len <= size || info.seq >= max_seq)
1484 break;
1485 len -= get_record_print_text_size(&info, line_count, syslog, time);
1486 }
1487
1488 return seq;
1489 }
1490
1491 /* The caller is responsible for making sure @size is greater than 0. */
syslog_print(char __user * buf,int size)1492 static int syslog_print(char __user *buf, int size)
1493 {
1494 struct printk_info info;
1495 struct printk_record r;
1496 char *text;
1497 int len = 0;
1498 u64 seq;
1499
1500 text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
1501 if (!text)
1502 return -ENOMEM;
1503
1504 prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
1505
1506 mutex_lock(&syslog_lock);
1507
1508 /*
1509 * Wait for the @syslog_seq record to be available. @syslog_seq may
1510 * change while waiting.
1511 */
1512 do {
1513 seq = syslog_seq;
1514
1515 mutex_unlock(&syslog_lock);
1516 /*
1517 * Guarantee this task is visible on the waitqueue before
1518 * checking the wake condition.
1519 *
1520 * The full memory barrier within set_current_state() of
1521 * prepare_to_wait_event() pairs with the full memory barrier
1522 * within wq_has_sleeper().
1523 *
1524 * This pairs with __wake_up_klogd:A.
1525 */
1526 len = wait_event_interruptible(log_wait,
1527 prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1528 mutex_lock(&syslog_lock);
1529
1530 if (len)
1531 goto out;
1532 } while (syslog_seq != seq);
1533
1534 /*
1535 * Copy records that fit into the buffer. The above cycle makes sure
1536 * that the first record is always available.
1537 */
1538 do {
1539 size_t n;
1540 size_t skip;
1541 int err;
1542
1543 if (!prb_read_valid(prb, syslog_seq, &r))
1544 break;
1545
1546 if (r.info->seq != syslog_seq) {
1547 /* message is gone, move to next valid one */
1548 syslog_seq = r.info->seq;
1549 syslog_partial = 0;
1550 }
1551
1552 /*
1553 * To keep reading/counting partial line consistent,
1554 * use printk_time value as of the beginning of a line.
1555 */
1556 if (!syslog_partial)
1557 syslog_time = printk_time;
1558
1559 skip = syslog_partial;
1560 n = record_print_text(&r, true, syslog_time);
1561 if (n - syslog_partial <= size) {
1562 /* message fits into buffer, move forward */
1563 syslog_seq = r.info->seq + 1;
1564 n -= syslog_partial;
1565 syslog_partial = 0;
1566 } else if (!len){
1567 /* partial read(), remember position */
1568 n = size;
1569 syslog_partial += n;
1570 } else
1571 n = 0;
1572
1573 if (!n)
1574 break;
1575
1576 mutex_unlock(&syslog_lock);
1577 err = copy_to_user(buf, text + skip, n);
1578 mutex_lock(&syslog_lock);
1579
1580 if (err) {
1581 if (!len)
1582 len = -EFAULT;
1583 break;
1584 }
1585
1586 len += n;
1587 size -= n;
1588 buf += n;
1589 } while (size);
1590 out:
1591 mutex_unlock(&syslog_lock);
1592 kfree(text);
1593 return len;
1594 }
1595
syslog_print_all(char __user * buf,int size,bool clear)1596 static int syslog_print_all(char __user *buf, int size, bool clear)
1597 {
1598 struct printk_info info;
1599 struct printk_record r;
1600 char *text;
1601 int len = 0;
1602 u64 seq;
1603 bool time;
1604
1605 text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
1606 if (!text)
1607 return -ENOMEM;
1608
1609 time = printk_time;
1610 /*
1611 * Find first record that fits, including all following records,
1612 * into the user-provided buffer for this dump.
1613 */
1614 seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1615 size, true, time);
1616
1617 prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
1618
1619 len = 0;
1620 prb_for_each_record(seq, prb, seq, &r) {
1621 int textlen;
1622
1623 textlen = record_print_text(&r, true, time);
1624
1625 if (len + textlen > size) {
1626 seq--;
1627 break;
1628 }
1629
1630 if (copy_to_user(buf + len, text, textlen))
1631 len = -EFAULT;
1632 else
1633 len += textlen;
1634
1635 if (len < 0)
1636 break;
1637 }
1638
1639 if (clear) {
1640 mutex_lock(&syslog_lock);
1641 latched_seq_write(&clear_seq, seq);
1642 mutex_unlock(&syslog_lock);
1643 }
1644
1645 kfree(text);
1646 return len;
1647 }
1648
syslog_clear(void)1649 static void syslog_clear(void)
1650 {
1651 mutex_lock(&syslog_lock);
1652 latched_seq_write(&clear_seq, prb_next_seq(prb));
1653 mutex_unlock(&syslog_lock);
1654 }
1655
do_syslog(int type,char __user * buf,int len,int source)1656 int do_syslog(int type, char __user *buf, int len, int source)
1657 {
1658 struct printk_info info;
1659 bool clear = false;
1660 static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1661 int error;
1662
1663 error = check_syslog_permissions(type, source);
1664 if (error)
1665 return error;
1666
1667 switch (type) {
1668 case SYSLOG_ACTION_CLOSE: /* Close log */
1669 break;
1670 case SYSLOG_ACTION_OPEN: /* Open log */
1671 break;
1672 case SYSLOG_ACTION_READ: /* Read from log */
1673 if (!buf || len < 0)
1674 return -EINVAL;
1675 if (!len)
1676 return 0;
1677 if (!access_ok(buf, len))
1678 return -EFAULT;
1679 error = syslog_print(buf, len);
1680 break;
1681 /* Read/clear last kernel messages */
1682 case SYSLOG_ACTION_READ_CLEAR:
1683 clear = true;
1684 fallthrough;
1685 /* Read last kernel messages */
1686 case SYSLOG_ACTION_READ_ALL:
1687 if (!buf || len < 0)
1688 return -EINVAL;
1689 if (!len)
1690 return 0;
1691 if (!access_ok(buf, len))
1692 return -EFAULT;
1693 error = syslog_print_all(buf, len, clear);
1694 break;
1695 /* Clear ring buffer */
1696 case SYSLOG_ACTION_CLEAR:
1697 syslog_clear();
1698 break;
1699 /* Disable logging to console */
1700 case SYSLOG_ACTION_CONSOLE_OFF:
1701 if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1702 saved_console_loglevel = console_loglevel;
1703 console_loglevel = minimum_console_loglevel;
1704 break;
1705 /* Enable logging to console */
1706 case SYSLOG_ACTION_CONSOLE_ON:
1707 if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1708 console_loglevel = saved_console_loglevel;
1709 saved_console_loglevel = LOGLEVEL_DEFAULT;
1710 }
1711 break;
1712 /* Set level of messages printed to console */
1713 case SYSLOG_ACTION_CONSOLE_LEVEL:
1714 if (len < 1 || len > 8)
1715 return -EINVAL;
1716 if (len < minimum_console_loglevel)
1717 len = minimum_console_loglevel;
1718 console_loglevel = len;
1719 /* Implicitly re-enable logging to console */
1720 saved_console_loglevel = LOGLEVEL_DEFAULT;
1721 break;
1722 /* Number of chars in the log buffer */
1723 case SYSLOG_ACTION_SIZE_UNREAD:
1724 mutex_lock(&syslog_lock);
1725 if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1726 /* No unread messages. */
1727 mutex_unlock(&syslog_lock);
1728 return 0;
1729 }
1730 if (info.seq != syslog_seq) {
1731 /* messages are gone, move to first one */
1732 syslog_seq = info.seq;
1733 syslog_partial = 0;
1734 }
1735 if (source == SYSLOG_FROM_PROC) {
1736 /*
1737 * Short-cut for poll(/"proc/kmsg") which simply checks
1738 * for pending data, not the size; return the count of
1739 * records, not the length.
1740 */
1741 error = prb_next_seq(prb) - syslog_seq;
1742 } else {
1743 bool time = syslog_partial ? syslog_time : printk_time;
1744 unsigned int line_count;
1745 u64 seq;
1746
1747 prb_for_each_info(syslog_seq, prb, seq, &info,
1748 &line_count) {
1749 error += get_record_print_text_size(&info, line_count,
1750 true, time);
1751 time = printk_time;
1752 }
1753 error -= syslog_partial;
1754 }
1755 mutex_unlock(&syslog_lock);
1756 break;
1757 /* Size of the log buffer */
1758 case SYSLOG_ACTION_SIZE_BUFFER:
1759 error = log_buf_len;
1760 break;
1761 default:
1762 error = -EINVAL;
1763 break;
1764 }
1765
1766 return error;
1767 }
1768
SYSCALL_DEFINE3(syslog,int,type,char __user *,buf,int,len)1769 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1770 {
1771 return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1772 }
1773
1774 /*
1775 * Special console_lock variants that help to reduce the risk of soft-lockups.
1776 * They allow to pass console_lock to another printk() call using a busy wait.
1777 */
1778
1779 #ifdef CONFIG_LOCKDEP
1780 static struct lockdep_map console_owner_dep_map = {
1781 .name = "console_owner"
1782 };
1783 #endif
1784
1785 static DEFINE_RAW_SPINLOCK(console_owner_lock);
1786 static struct task_struct *console_owner;
1787 static bool console_waiter;
1788
1789 /**
1790 * console_lock_spinning_enable - mark beginning of code where another
1791 * thread might safely busy wait
1792 *
1793 * This basically converts console_lock into a spinlock. This marks
1794 * the section where the console_lock owner can not sleep, because
1795 * there may be a waiter spinning (like a spinlock). Also it must be
1796 * ready to hand over the lock at the end of the section.
1797 */
console_lock_spinning_enable(void)1798 static void console_lock_spinning_enable(void)
1799 {
1800 raw_spin_lock(&console_owner_lock);
1801 console_owner = current;
1802 raw_spin_unlock(&console_owner_lock);
1803
1804 /* The waiter may spin on us after setting console_owner */
1805 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1806 }
1807
1808 /**
1809 * console_lock_spinning_disable_and_check - mark end of code where another
1810 * thread was able to busy wait and check if there is a waiter
1811 *
1812 * This is called at the end of the section where spinning is allowed.
1813 * It has two functions. First, it is a signal that it is no longer
1814 * safe to start busy waiting for the lock. Second, it checks if
1815 * there is a busy waiter and passes the lock rights to her.
1816 *
1817 * Important: Callers lose the lock if there was a busy waiter.
1818 * They must not touch items synchronized by console_lock
1819 * in this case.
1820 *
1821 * Return: 1 if the lock rights were passed, 0 otherwise.
1822 */
console_lock_spinning_disable_and_check(void)1823 static int console_lock_spinning_disable_and_check(void)
1824 {
1825 int waiter;
1826
1827 raw_spin_lock(&console_owner_lock);
1828 waiter = READ_ONCE(console_waiter);
1829 console_owner = NULL;
1830 raw_spin_unlock(&console_owner_lock);
1831
1832 if (!waiter) {
1833 spin_release(&console_owner_dep_map, _THIS_IP_);
1834 return 0;
1835 }
1836
1837 /* The waiter is now free to continue */
1838 WRITE_ONCE(console_waiter, false);
1839
1840 spin_release(&console_owner_dep_map, _THIS_IP_);
1841
1842 /*
1843 * Hand off console_lock to waiter. The waiter will perform
1844 * the up(). After this, the waiter is the console_lock owner.
1845 */
1846 mutex_release(&console_lock_dep_map, _THIS_IP_);
1847 return 1;
1848 }
1849
1850 /**
1851 * console_trylock_spinning - try to get console_lock by busy waiting
1852 *
1853 * This allows to busy wait for the console_lock when the current
1854 * owner is running in specially marked sections. It means that
1855 * the current owner is running and cannot reschedule until it
1856 * is ready to lose the lock.
1857 *
1858 * Return: 1 if we got the lock, 0 othrewise
1859 */
console_trylock_spinning(void)1860 static int console_trylock_spinning(void)
1861 {
1862 struct task_struct *owner = NULL;
1863 bool waiter;
1864 bool spin = false;
1865 unsigned long flags;
1866
1867 if (console_trylock())
1868 return 1;
1869
1870 /*
1871 * It's unsafe to spin once a panic has begun. If we are the
1872 * panic CPU, we may have already halted the owner of the
1873 * console_sem. If we are not the panic CPU, then we should
1874 * avoid taking console_sem, so the panic CPU has a better
1875 * chance of cleanly acquiring it later.
1876 */
1877 if (panic_in_progress())
1878 return 0;
1879
1880 printk_safe_enter_irqsave(flags);
1881
1882 raw_spin_lock(&console_owner_lock);
1883 owner = READ_ONCE(console_owner);
1884 waiter = READ_ONCE(console_waiter);
1885 if (!waiter && owner && owner != current) {
1886 WRITE_ONCE(console_waiter, true);
1887 spin = true;
1888 }
1889 raw_spin_unlock(&console_owner_lock);
1890
1891 /*
1892 * If there is an active printk() writing to the
1893 * consoles, instead of having it write our data too,
1894 * see if we can offload that load from the active
1895 * printer, and do some printing ourselves.
1896 * Go into a spin only if there isn't already a waiter
1897 * spinning, and there is an active printer, and
1898 * that active printer isn't us (recursive printk?).
1899 */
1900 if (!spin) {
1901 printk_safe_exit_irqrestore(flags);
1902 return 0;
1903 }
1904
1905 /* We spin waiting for the owner to release us */
1906 spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1907 /* Owner will clear console_waiter on hand off */
1908 while (READ_ONCE(console_waiter))
1909 cpu_relax();
1910 spin_release(&console_owner_dep_map, _THIS_IP_);
1911
1912 printk_safe_exit_irqrestore(flags);
1913 /*
1914 * The owner passed the console lock to us.
1915 * Since we did not spin on console lock, annotate
1916 * this as a trylock. Otherwise lockdep will
1917 * complain.
1918 */
1919 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
1920
1921 return 1;
1922 }
1923
1924 /*
1925 * Call the specified console driver, asking it to write out the specified
1926 * text and length. If @dropped_text is non-NULL and any records have been
1927 * dropped, a dropped message will be written out first.
1928 */
call_console_driver(struct console * con,const char * text,size_t len,char * dropped_text)1929 static void call_console_driver(struct console *con, const char *text, size_t len,
1930 char *dropped_text)
1931 {
1932 size_t dropped_len;
1933
1934 if (con->dropped && dropped_text) {
1935 dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX,
1936 "** %lu printk messages dropped **\n",
1937 con->dropped);
1938 con->dropped = 0;
1939 con->write(con, dropped_text, dropped_len);
1940 }
1941
1942 con->write(con, text, len);
1943 }
1944
1945 /*
1946 * Recursion is tracked separately on each CPU. If NMIs are supported, an
1947 * additional NMI context per CPU is also separately tracked. Until per-CPU
1948 * is available, a separate "early tracking" is performed.
1949 */
1950 static DEFINE_PER_CPU(u8, printk_count);
1951 static u8 printk_count_early;
1952 #ifdef CONFIG_HAVE_NMI
1953 static DEFINE_PER_CPU(u8, printk_count_nmi);
1954 static u8 printk_count_nmi_early;
1955 #endif
1956
1957 /*
1958 * Recursion is limited to keep the output sane. printk() should not require
1959 * more than 1 level of recursion (allowing, for example, printk() to trigger
1960 * a WARN), but a higher value is used in case some printk-internal errors
1961 * exist, such as the ringbuffer validation checks failing.
1962 */
1963 #define PRINTK_MAX_RECURSION 3
1964
1965 /*
1966 * Return a pointer to the dedicated counter for the CPU+context of the
1967 * caller.
1968 */
__printk_recursion_counter(void)1969 static u8 *__printk_recursion_counter(void)
1970 {
1971 #ifdef CONFIG_HAVE_NMI
1972 if (in_nmi()) {
1973 if (printk_percpu_data_ready())
1974 return this_cpu_ptr(&printk_count_nmi);
1975 return &printk_count_nmi_early;
1976 }
1977 #endif
1978 if (printk_percpu_data_ready())
1979 return this_cpu_ptr(&printk_count);
1980 return &printk_count_early;
1981 }
1982
1983 /*
1984 * Enter recursion tracking. Interrupts are disabled to simplify tracking.
1985 * The caller must check the boolean return value to see if the recursion is
1986 * allowed. On failure, interrupts are not disabled.
1987 *
1988 * @recursion_ptr must be a variable of type (u8 *) and is the same variable
1989 * that is passed to printk_exit_irqrestore().
1990 */
1991 #define printk_enter_irqsave(recursion_ptr, flags) \
1992 ({ \
1993 bool success = true; \
1994 \
1995 typecheck(u8 *, recursion_ptr); \
1996 local_irq_save(flags); \
1997 (recursion_ptr) = __printk_recursion_counter(); \
1998 if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
1999 local_irq_restore(flags); \
2000 success = false; \
2001 } else { \
2002 (*(recursion_ptr))++; \
2003 } \
2004 success; \
2005 })
2006
2007 /* Exit recursion tracking, restoring interrupts. */
2008 #define printk_exit_irqrestore(recursion_ptr, flags) \
2009 do { \
2010 typecheck(u8 *, recursion_ptr); \
2011 (*(recursion_ptr))--; \
2012 local_irq_restore(flags); \
2013 } while (0)
2014
2015 int printk_delay_msec __read_mostly;
2016
printk_delay(int level)2017 static inline void printk_delay(int level)
2018 {
2019 boot_delay_msec(level);
2020
2021 if (unlikely(printk_delay_msec)) {
2022 int m = printk_delay_msec;
2023
2024 while (m--) {
2025 mdelay(1);
2026 touch_nmi_watchdog();
2027 }
2028 }
2029 }
2030
printk_caller_id(void)2031 static inline u32 printk_caller_id(void)
2032 {
2033 return in_task() ? task_pid_nr(current) :
2034 0x80000000 + smp_processor_id();
2035 }
2036
2037 /**
2038 * printk_parse_prefix - Parse level and control flags.
2039 *
2040 * @text: The terminated text message.
2041 * @level: A pointer to the current level value, will be updated.
2042 * @flags: A pointer to the current printk_info flags, will be updated.
2043 *
2044 * @level may be NULL if the caller is not interested in the parsed value.
2045 * Otherwise the variable pointed to by @level must be set to
2046 * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2047 *
2048 * @flags may be NULL if the caller is not interested in the parsed value.
2049 * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2050 * value.
2051 *
2052 * Return: The length of the parsed level and control flags.
2053 */
printk_parse_prefix(const char * text,int * level,enum printk_info_flags * flags)2054 u16 printk_parse_prefix(const char *text, int *level,
2055 enum printk_info_flags *flags)
2056 {
2057 u16 prefix_len = 0;
2058 int kern_level;
2059
2060 while (*text) {
2061 kern_level = printk_get_level(text);
2062 if (!kern_level)
2063 break;
2064
2065 switch (kern_level) {
2066 case '0' ... '7':
2067 if (level && *level == LOGLEVEL_DEFAULT)
2068 *level = kern_level - '0';
2069 break;
2070 case 'c': /* KERN_CONT */
2071 if (flags)
2072 *flags |= LOG_CONT;
2073 }
2074
2075 prefix_len += 2;
2076 text += 2;
2077 }
2078
2079 return prefix_len;
2080 }
2081
2082 __printf(5, 0)
printk_sprint(char * text,u16 size,int facility,enum printk_info_flags * flags,const char * fmt,va_list args)2083 static u16 printk_sprint(char *text, u16 size, int facility,
2084 enum printk_info_flags *flags, const char *fmt,
2085 va_list args)
2086 {
2087 u16 text_len;
2088
2089 text_len = vscnprintf(text, size, fmt, args);
2090
2091 /* Mark and strip a trailing newline. */
2092 if (text_len && text[text_len - 1] == '\n') {
2093 text_len--;
2094 *flags |= LOG_NEWLINE;
2095 }
2096
2097 /* Strip log level and control flags. */
2098 if (facility == 0) {
2099 u16 prefix_len;
2100
2101 prefix_len = printk_parse_prefix(text, NULL, NULL);
2102 if (prefix_len) {
2103 text_len -= prefix_len;
2104 memmove(text, text + prefix_len, text_len);
2105 }
2106 }
2107
2108 trace_console_rcuidle(text, text_len);
2109
2110 return text_len;
2111 }
2112
2113 __printf(4, 0)
vprintk_store(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2114 int vprintk_store(int facility, int level,
2115 const struct dev_printk_info *dev_info,
2116 const char *fmt, va_list args)
2117 {
2118 struct prb_reserved_entry e;
2119 enum printk_info_flags flags = 0;
2120 struct printk_record r;
2121 unsigned long irqflags;
2122 u16 trunc_msg_len = 0;
2123 char prefix_buf[8];
2124 u8 *recursion_ptr;
2125 u16 reserve_size;
2126 va_list args2;
2127 u32 caller_id;
2128 u16 text_len;
2129 int ret = 0;
2130 u64 ts_nsec;
2131
2132 if (!printk_enter_irqsave(recursion_ptr, irqflags))
2133 return 0;
2134
2135 /*
2136 * Since the duration of printk() can vary depending on the message
2137 * and state of the ringbuffer, grab the timestamp now so that it is
2138 * close to the call of printk(). This provides a more deterministic
2139 * timestamp with respect to the caller.
2140 */
2141 ts_nsec = local_clock();
2142
2143 caller_id = printk_caller_id();
2144
2145 /*
2146 * The sprintf needs to come first since the syslog prefix might be
2147 * passed in as a parameter. An extra byte must be reserved so that
2148 * later the vscnprintf() into the reserved buffer has room for the
2149 * terminating '\0', which is not counted by vsnprintf().
2150 */
2151 va_copy(args2, args);
2152 reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2153 va_end(args2);
2154
2155 if (reserve_size > LOG_LINE_MAX)
2156 reserve_size = LOG_LINE_MAX;
2157
2158 /* Extract log level or control flags. */
2159 if (facility == 0)
2160 printk_parse_prefix(&prefix_buf[0], &level, &flags);
2161
2162 if (level == LOGLEVEL_DEFAULT)
2163 level = default_message_loglevel;
2164
2165 if (dev_info)
2166 flags |= LOG_NEWLINE;
2167
2168 if (flags & LOG_CONT) {
2169 prb_rec_init_wr(&r, reserve_size);
2170 if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
2171 text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2172 facility, &flags, fmt, args);
2173 r.info->text_len += text_len;
2174
2175 if (flags & LOG_NEWLINE) {
2176 r.info->flags |= LOG_NEWLINE;
2177 prb_final_commit(&e);
2178 } else {
2179 prb_commit(&e);
2180 }
2181
2182 ret = text_len;
2183 goto out;
2184 }
2185 }
2186
2187 /*
2188 * Explicitly initialize the record before every prb_reserve() call.
2189 * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2190 * structure when they fail.
2191 */
2192 prb_rec_init_wr(&r, reserve_size);
2193 if (!prb_reserve(&e, prb, &r)) {
2194 /* truncate the message if it is too long for empty buffer */
2195 truncate_msg(&reserve_size, &trunc_msg_len);
2196
2197 prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2198 if (!prb_reserve(&e, prb, &r))
2199 goto out;
2200 }
2201
2202 /* fill message */
2203 text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2204 if (trunc_msg_len)
2205 memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2206 r.info->text_len = text_len + trunc_msg_len;
2207 r.info->facility = facility;
2208 r.info->level = level & 7;
2209 r.info->flags = flags & 0x1f;
2210 r.info->ts_nsec = ts_nsec;
2211 r.info->caller_id = caller_id;
2212 if (dev_info)
2213 memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2214
2215 /* A message without a trailing newline can be continued. */
2216 if (!(flags & LOG_NEWLINE))
2217 prb_commit(&e);
2218 else
2219 prb_final_commit(&e);
2220
2221 ret = text_len + trunc_msg_len;
2222 out:
2223 printk_exit_irqrestore(recursion_ptr, irqflags);
2224 return ret;
2225 }
2226
vprintk_emit(int facility,int level,const struct dev_printk_info * dev_info,const char * fmt,va_list args)2227 asmlinkage int vprintk_emit(int facility, int level,
2228 const struct dev_printk_info *dev_info,
2229 const char *fmt, va_list args)
2230 {
2231 int printed_len;
2232 bool in_sched = false;
2233
2234 /* Suppress unimportant messages after panic happens */
2235 if (unlikely(suppress_printk))
2236 return 0;
2237
2238 if (unlikely(suppress_panic_printk) &&
2239 atomic_read(&panic_cpu) != raw_smp_processor_id())
2240 return 0;
2241
2242 if (level == LOGLEVEL_SCHED) {
2243 level = LOGLEVEL_DEFAULT;
2244 in_sched = true;
2245 }
2246
2247 printk_delay(level);
2248
2249 printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2250
2251 /* If called from the scheduler, we can not call up(). */
2252 if (!in_sched) {
2253 /*
2254 * The caller may be holding system-critical or
2255 * timing-sensitive locks. Disable preemption during
2256 * printing of all remaining records to all consoles so that
2257 * this context can return as soon as possible. Hopefully
2258 * another printk() caller will take over the printing.
2259 */
2260 preempt_disable();
2261 /*
2262 * Try to acquire and then immediately release the console
2263 * semaphore. The release will print out buffers. With the
2264 * spinning variant, this context tries to take over the
2265 * printing from another printing context.
2266 */
2267 if (console_trylock_spinning())
2268 console_unlock();
2269 preempt_enable();
2270 }
2271
2272 wake_up_klogd();
2273 return printed_len;
2274 }
2275 EXPORT_SYMBOL(vprintk_emit);
2276
vprintk_default(const char * fmt,va_list args)2277 int vprintk_default(const char *fmt, va_list args)
2278 {
2279 return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
2280 }
2281 EXPORT_SYMBOL_GPL(vprintk_default);
2282
_printk(const char * fmt,...)2283 asmlinkage __visible int _printk(const char *fmt, ...)
2284 {
2285 va_list args;
2286 int r;
2287
2288 va_start(args, fmt);
2289 r = vprintk(fmt, args);
2290 va_end(args);
2291
2292 return r;
2293 }
2294 EXPORT_SYMBOL(_printk);
2295
2296 static bool pr_flush(int timeout_ms, bool reset_on_progress);
2297 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2298
2299 #else /* CONFIG_PRINTK */
2300
2301 #define CONSOLE_LOG_MAX 0
2302 #define DROPPED_TEXT_MAX 0
2303 #define printk_time false
2304
2305 #define prb_read_valid(rb, seq, r) false
2306 #define prb_first_valid_seq(rb) 0
2307 #define prb_next_seq(rb) 0
2308
2309 static u64 syslog_seq;
2310
record_print_text(const struct printk_record * r,bool syslog,bool time)2311 static size_t record_print_text(const struct printk_record *r,
2312 bool syslog, bool time)
2313 {
2314 return 0;
2315 }
info_print_ext_header(char * buf,size_t size,struct printk_info * info)2316 static ssize_t info_print_ext_header(char *buf, size_t size,
2317 struct printk_info *info)
2318 {
2319 return 0;
2320 }
msg_print_ext_body(char * buf,size_t size,char * text,size_t text_len,struct dev_printk_info * dev_info)2321 static ssize_t msg_print_ext_body(char *buf, size_t size,
2322 char *text, size_t text_len,
2323 struct dev_printk_info *dev_info) { return 0; }
console_lock_spinning_enable(void)2324 static void console_lock_spinning_enable(void) { }
console_lock_spinning_disable_and_check(void)2325 static int console_lock_spinning_disable_and_check(void) { return 0; }
call_console_driver(struct console * con,const char * text,size_t len,char * dropped_text)2326 static void call_console_driver(struct console *con, const char *text, size_t len,
2327 char *dropped_text)
2328 {
2329 }
suppress_message_printing(int level)2330 static bool suppress_message_printing(int level) { return false; }
pr_flush(int timeout_ms,bool reset_on_progress)2331 static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)2332 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2333
2334 #endif /* CONFIG_PRINTK */
2335
2336 #ifdef CONFIG_EARLY_PRINTK
2337 struct console *early_console;
2338
early_printk(const char * fmt,...)2339 asmlinkage __visible void early_printk(const char *fmt, ...)
2340 {
2341 va_list ap;
2342 char buf[512];
2343 int n;
2344
2345 if (!early_console)
2346 return;
2347
2348 va_start(ap, fmt);
2349 n = vscnprintf(buf, sizeof(buf), fmt, ap);
2350 va_end(ap);
2351
2352 early_console->write(early_console, buf, n);
2353 }
2354 #endif
2355
set_user_specified(struct console_cmdline * c,bool user_specified)2356 static void set_user_specified(struct console_cmdline *c, bool user_specified)
2357 {
2358 if (!user_specified)
2359 return;
2360
2361 /*
2362 * @c console was defined by the user on the command line.
2363 * Do not clear when added twice also by SPCR or the device tree.
2364 */
2365 c->user_specified = true;
2366 /* At least one console defined by the user on the command line. */
2367 console_set_on_cmdline = 1;
2368 }
2369
__add_preferred_console(char * name,int idx,char * options,char * brl_options,bool user_specified)2370 static int __add_preferred_console(char *name, int idx, char *options,
2371 char *brl_options, bool user_specified)
2372 {
2373 struct console_cmdline *c;
2374 int i;
2375
2376 /*
2377 * See if this tty is not yet registered, and
2378 * if we have a slot free.
2379 */
2380 for (i = 0, c = console_cmdline;
2381 i < MAX_CMDLINECONSOLES && c->name[0];
2382 i++, c++) {
2383 if (strcmp(c->name, name) == 0 && c->index == idx) {
2384 if (!brl_options)
2385 preferred_console = i;
2386 set_user_specified(c, user_specified);
2387 return 0;
2388 }
2389 }
2390 if (i == MAX_CMDLINECONSOLES)
2391 return -E2BIG;
2392 if (!brl_options)
2393 preferred_console = i;
2394 strlcpy(c->name, name, sizeof(c->name));
2395 c->options = options;
2396 set_user_specified(c, user_specified);
2397 braille_set_options(c, brl_options);
2398
2399 c->index = idx;
2400 return 0;
2401 }
2402
console_msg_format_setup(char * str)2403 static int __init console_msg_format_setup(char *str)
2404 {
2405 if (!strcmp(str, "syslog"))
2406 console_msg_format = MSG_FORMAT_SYSLOG;
2407 if (!strcmp(str, "default"))
2408 console_msg_format = MSG_FORMAT_DEFAULT;
2409 return 1;
2410 }
2411 __setup("console_msg_format=", console_msg_format_setup);
2412
2413 /*
2414 * Set up a console. Called via do_early_param() in init/main.c
2415 * for each "console=" parameter in the boot command line.
2416 */
console_setup(char * str)2417 static int __init console_setup(char *str)
2418 {
2419 char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */
2420 char *s, *options, *brl_options = NULL;
2421 int idx;
2422
2423 /*
2424 * console="" or console=null have been suggested as a way to
2425 * disable console output. Use ttynull that has been created
2426 * for exactly this purpose.
2427 */
2428 if (str[0] == 0 || strcmp(str, "null") == 0) {
2429 __add_preferred_console("ttynull", 0, NULL, NULL, true);
2430 return 1;
2431 }
2432
2433 if (_braille_console_setup(&str, &brl_options))
2434 return 1;
2435
2436 /*
2437 * Decode str into name, index, options.
2438 */
2439 if (str[0] >= '0' && str[0] <= '9') {
2440 strcpy(buf, "ttyS");
2441 strncpy(buf + 4, str, sizeof(buf) - 5);
2442 } else {
2443 strncpy(buf, str, sizeof(buf) - 1);
2444 }
2445 buf[sizeof(buf) - 1] = 0;
2446 options = strchr(str, ',');
2447 if (options)
2448 *(options++) = 0;
2449 #ifdef __sparc__
2450 if (!strcmp(str, "ttya"))
2451 strcpy(buf, "ttyS0");
2452 if (!strcmp(str, "ttyb"))
2453 strcpy(buf, "ttyS1");
2454 #endif
2455 for (s = buf; *s; s++)
2456 if (isdigit(*s) || *s == ',')
2457 break;
2458 idx = simple_strtoul(s, NULL, 10);
2459 *s = 0;
2460
2461 __add_preferred_console(buf, idx, options, brl_options, true);
2462 return 1;
2463 }
2464 __setup("console=", console_setup);
2465
2466 /**
2467 * add_preferred_console - add a device to the list of preferred consoles.
2468 * @name: device name
2469 * @idx: device index
2470 * @options: options for this console
2471 *
2472 * The last preferred console added will be used for kernel messages
2473 * and stdin/out/err for init. Normally this is used by console_setup
2474 * above to handle user-supplied console arguments; however it can also
2475 * be used by arch-specific code either to override the user or more
2476 * commonly to provide a default console (ie from PROM variables) when
2477 * the user has not supplied one.
2478 */
add_preferred_console(char * name,int idx,char * options)2479 int add_preferred_console(char *name, int idx, char *options)
2480 {
2481 return __add_preferred_console(name, idx, options, NULL, false);
2482 }
2483
2484 bool console_suspend_enabled = true;
2485 EXPORT_SYMBOL(console_suspend_enabled);
2486
console_suspend_disable(char * str)2487 static int __init console_suspend_disable(char *str)
2488 {
2489 console_suspend_enabled = false;
2490 return 1;
2491 }
2492 __setup("no_console_suspend", console_suspend_disable);
2493 module_param_named(console_suspend, console_suspend_enabled,
2494 bool, S_IRUGO | S_IWUSR);
2495 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2496 " and hibernate operations");
2497
2498 static bool printk_console_no_auto_verbose;
2499
console_verbose(void)2500 void console_verbose(void)
2501 {
2502 if (console_loglevel && !printk_console_no_auto_verbose)
2503 console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2504 }
2505 EXPORT_SYMBOL_GPL(console_verbose);
2506
2507 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2508 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2509
2510 /**
2511 * suspend_console - suspend the console subsystem
2512 *
2513 * This disables printk() while we go into suspend states
2514 */
suspend_console(void)2515 void suspend_console(void)
2516 {
2517 if (!console_suspend_enabled)
2518 return;
2519 pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2520 pr_flush(1000, true);
2521 console_lock();
2522 console_suspended = 1;
2523 up_console_sem();
2524 }
2525
resume_console(void)2526 void resume_console(void)
2527 {
2528 if (!console_suspend_enabled)
2529 return;
2530 down_console_sem();
2531 console_suspended = 0;
2532 console_unlock();
2533 pr_flush(1000, true);
2534 }
2535
2536 /**
2537 * console_cpu_notify - print deferred console messages after CPU hotplug
2538 * @cpu: unused
2539 *
2540 * If printk() is called from a CPU that is not online yet, the messages
2541 * will be printed on the console only if there are CON_ANYTIME consoles.
2542 * This function is called when a new CPU comes online (or fails to come
2543 * up) or goes offline.
2544 */
console_cpu_notify(unsigned int cpu)2545 static int console_cpu_notify(unsigned int cpu)
2546 {
2547 if (!cpuhp_tasks_frozen) {
2548 /* If trylock fails, someone else is doing the printing */
2549 if (console_trylock())
2550 console_unlock();
2551 }
2552 return 0;
2553 }
2554
2555 /**
2556 * console_lock - lock the console system for exclusive use.
2557 *
2558 * Acquires a lock which guarantees that the caller has
2559 * exclusive access to the console system and the console_drivers list.
2560 *
2561 * Can sleep, returns nothing.
2562 */
console_lock(void)2563 void console_lock(void)
2564 {
2565 might_sleep();
2566
2567 down_console_sem();
2568 if (console_suspended)
2569 return;
2570 console_locked = 1;
2571 console_may_schedule = 1;
2572 }
2573 EXPORT_SYMBOL(console_lock);
2574
2575 /**
2576 * console_trylock - try to lock the console system for exclusive use.
2577 *
2578 * Try to acquire a lock which guarantees that the caller has exclusive
2579 * access to the console system and the console_drivers list.
2580 *
2581 * returns 1 on success, and 0 on failure to acquire the lock.
2582 */
console_trylock(void)2583 int console_trylock(void)
2584 {
2585 if (down_trylock_console_sem())
2586 return 0;
2587 if (console_suspended) {
2588 up_console_sem();
2589 return 0;
2590 }
2591 console_locked = 1;
2592 console_may_schedule = 0;
2593 return 1;
2594 }
2595 EXPORT_SYMBOL(console_trylock);
2596
is_console_locked(void)2597 int is_console_locked(void)
2598 {
2599 return console_locked;
2600 }
2601 EXPORT_SYMBOL(is_console_locked);
2602
2603 /*
2604 * Return true when this CPU should unlock console_sem without pushing all
2605 * messages to the console. This reduces the chance that the console is
2606 * locked when the panic CPU tries to use it.
2607 */
abandon_console_lock_in_panic(void)2608 static bool abandon_console_lock_in_panic(void)
2609 {
2610 if (!panic_in_progress())
2611 return false;
2612
2613 /*
2614 * We can use raw_smp_processor_id() here because it is impossible for
2615 * the task to be migrated to the panic_cpu, or away from it. If
2616 * panic_cpu has already been set, and we're not currently executing on
2617 * that CPU, then we never will be.
2618 */
2619 return atomic_read(&panic_cpu) != raw_smp_processor_id();
2620 }
2621
2622 /*
2623 * Check if the given console is currently capable and allowed to print
2624 * records.
2625 *
2626 * Requires the console_lock.
2627 */
console_is_usable(struct console * con)2628 static inline bool console_is_usable(struct console *con)
2629 {
2630 if (!(con->flags & CON_ENABLED))
2631 return false;
2632
2633 if (!con->write)
2634 return false;
2635
2636 /*
2637 * Console drivers may assume that per-cpu resources have been
2638 * allocated. So unless they're explicitly marked as being able to
2639 * cope (CON_ANYTIME) don't call them until this CPU is officially up.
2640 */
2641 if (!cpu_online(raw_smp_processor_id()) &&
2642 !(con->flags & CON_ANYTIME))
2643 return false;
2644
2645 return true;
2646 }
2647
__console_unlock(void)2648 static void __console_unlock(void)
2649 {
2650 console_locked = 0;
2651 up_console_sem();
2652 }
2653
2654 /*
2655 * Print one record for the given console. The record printed is whatever
2656 * record is the next available record for the given console.
2657 *
2658 * @text is a buffer of size CONSOLE_LOG_MAX.
2659 *
2660 * If extended messages should be printed, @ext_text is a buffer of size
2661 * CONSOLE_EXT_LOG_MAX. Otherwise @ext_text must be NULL.
2662 *
2663 * If dropped messages should be printed, @dropped_text is a buffer of size
2664 * DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL.
2665 *
2666 * @handover will be set to true if a printk waiter has taken over the
2667 * console_lock, in which case the caller is no longer holding the
2668 * console_lock. Otherwise it is set to false.
2669 *
2670 * Returns false if the given console has no next record to print, otherwise
2671 * true.
2672 *
2673 * Requires the console_lock.
2674 */
console_emit_next_record(struct console * con,char * text,char * ext_text,char * dropped_text,bool * handover)2675 static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
2676 char *dropped_text, bool *handover)
2677 {
2678 static int panic_console_dropped;
2679 struct printk_info info;
2680 struct printk_record r;
2681 unsigned long flags;
2682 char *write_text;
2683 size_t len;
2684
2685 prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
2686
2687 *handover = false;
2688
2689 if (!prb_read_valid(prb, con->seq, &r))
2690 return false;
2691
2692 if (con->seq != r.info->seq) {
2693 con->dropped += r.info->seq - con->seq;
2694 con->seq = r.info->seq;
2695 if (panic_in_progress() && panic_console_dropped++ > 10) {
2696 suppress_panic_printk = 1;
2697 pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
2698 }
2699 }
2700
2701 /* Skip record that has level above the console loglevel. */
2702 if (suppress_message_printing(r.info->level)) {
2703 con->seq++;
2704 goto skip;
2705 }
2706
2707 if (ext_text) {
2708 write_text = ext_text;
2709 len = info_print_ext_header(ext_text, CONSOLE_EXT_LOG_MAX, r.info);
2710 len += msg_print_ext_body(ext_text + len, CONSOLE_EXT_LOG_MAX - len,
2711 &r.text_buf[0], r.info->text_len, &r.info->dev_info);
2712 } else {
2713 write_text = text;
2714 len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
2715 }
2716
2717 /*
2718 * While actively printing out messages, if another printk()
2719 * were to occur on another CPU, it may wait for this one to
2720 * finish. This task can not be preempted if there is a
2721 * waiter waiting to take over.
2722 *
2723 * Interrupts are disabled because the hand over to a waiter
2724 * must not be interrupted until the hand over is completed
2725 * (@console_waiter is cleared).
2726 */
2727 printk_safe_enter_irqsave(flags);
2728 console_lock_spinning_enable();
2729
2730 stop_critical_timings(); /* don't trace print latency */
2731 call_console_driver(con, write_text, len, dropped_text);
2732 start_critical_timings();
2733
2734 con->seq++;
2735
2736 *handover = console_lock_spinning_disable_and_check();
2737 printk_safe_exit_irqrestore(flags);
2738 skip:
2739 return true;
2740 }
2741
2742 /*
2743 * Print out all remaining records to all consoles.
2744 *
2745 * @do_cond_resched is set by the caller. It can be true only in schedulable
2746 * context.
2747 *
2748 * @next_seq is set to the sequence number after the last available record.
2749 * The value is valid only when this function returns true. It means that all
2750 * usable consoles are completely flushed.
2751 *
2752 * @handover will be set to true if a printk waiter has taken over the
2753 * console_lock, in which case the caller is no longer holding the
2754 * console_lock. Otherwise it is set to false.
2755 *
2756 * Returns true when there was at least one usable console and all messages
2757 * were flushed to all usable consoles. A returned false informs the caller
2758 * that everything was not flushed (either there were no usable consoles or
2759 * another context has taken over printing or it is a panic situation and this
2760 * is not the panic CPU). Regardless the reason, the caller should assume it
2761 * is not useful to immediately try again.
2762 *
2763 * Requires the console_lock.
2764 */
console_flush_all(bool do_cond_resched,u64 * next_seq,bool * handover)2765 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
2766 {
2767 static char dropped_text[DROPPED_TEXT_MAX];
2768 static char ext_text[CONSOLE_EXT_LOG_MAX];
2769 static char text[CONSOLE_LOG_MAX];
2770 bool any_usable = false;
2771 struct console *con;
2772 bool any_progress;
2773
2774 *next_seq = 0;
2775 *handover = false;
2776
2777 do {
2778 any_progress = false;
2779
2780 for_each_console(con) {
2781 bool progress;
2782
2783 if (!console_is_usable(con))
2784 continue;
2785 any_usable = true;
2786
2787 if (con->flags & CON_EXTENDED) {
2788 /* Extended consoles do not print "dropped messages". */
2789 progress = console_emit_next_record(con, &text[0],
2790 &ext_text[0], NULL,
2791 handover);
2792 } else {
2793 progress = console_emit_next_record(con, &text[0],
2794 NULL, &dropped_text[0],
2795 handover);
2796 }
2797 if (*handover)
2798 return false;
2799
2800 /* Track the next of the highest seq flushed. */
2801 if (con->seq > *next_seq)
2802 *next_seq = con->seq;
2803
2804 if (!progress)
2805 continue;
2806 any_progress = true;
2807
2808 /* Allow panic_cpu to take over the consoles safely. */
2809 if (abandon_console_lock_in_panic())
2810 return false;
2811
2812 if (do_cond_resched)
2813 cond_resched();
2814 }
2815 } while (any_progress);
2816
2817 return any_usable;
2818 }
2819
2820 /**
2821 * console_unlock - unlock the console system
2822 *
2823 * Releases the console_lock which the caller holds on the console system
2824 * and the console driver list.
2825 *
2826 * While the console_lock was held, console output may have been buffered
2827 * by printk(). If this is the case, console_unlock(); emits
2828 * the output prior to releasing the lock.
2829 *
2830 * console_unlock(); may be called from any context.
2831 */
console_unlock(void)2832 void console_unlock(void)
2833 {
2834 bool do_cond_resched;
2835 bool handover;
2836 bool flushed;
2837 u64 next_seq;
2838
2839 if (console_suspended) {
2840 up_console_sem();
2841 return;
2842 }
2843
2844 /*
2845 * Console drivers are called with interrupts disabled, so
2846 * @console_may_schedule should be cleared before; however, we may
2847 * end up dumping a lot of lines, for example, if called from
2848 * console registration path, and should invoke cond_resched()
2849 * between lines if allowable. Not doing so can cause a very long
2850 * scheduling stall on a slow console leading to RCU stall and
2851 * softlockup warnings which exacerbate the issue with more
2852 * messages practically incapacitating the system. Therefore, create
2853 * a local to use for the printing loop.
2854 */
2855 do_cond_resched = console_may_schedule;
2856
2857 do {
2858 console_may_schedule = 0;
2859
2860 flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
2861 if (!handover)
2862 __console_unlock();
2863
2864 /*
2865 * Abort if there was a failure to flush all messages to all
2866 * usable consoles. Either it is not possible to flush (in
2867 * which case it would be an infinite loop of retrying) or
2868 * another context has taken over printing.
2869 */
2870 if (!flushed)
2871 break;
2872
2873 /*
2874 * Some context may have added new records after
2875 * console_flush_all() but before unlocking the console.
2876 * Re-check if there is a new record to flush. If the trylock
2877 * fails, another context is already handling the printing.
2878 */
2879 } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
2880 }
2881 EXPORT_SYMBOL(console_unlock);
2882
2883 /**
2884 * console_conditional_schedule - yield the CPU if required
2885 *
2886 * If the console code is currently allowed to sleep, and
2887 * if this CPU should yield the CPU to another task, do
2888 * so here.
2889 *
2890 * Must be called within console_lock();.
2891 */
console_conditional_schedule(void)2892 void __sched console_conditional_schedule(void)
2893 {
2894 if (console_may_schedule)
2895 cond_resched();
2896 }
2897 EXPORT_SYMBOL(console_conditional_schedule);
2898
console_unblank(void)2899 void console_unblank(void)
2900 {
2901 struct console *c;
2902
2903 /*
2904 * console_unblank can no longer be called in interrupt context unless
2905 * oops_in_progress is set to 1..
2906 */
2907 if (oops_in_progress) {
2908 if (down_trylock_console_sem() != 0)
2909 return;
2910 } else
2911 console_lock();
2912
2913 console_locked = 1;
2914 console_may_schedule = 0;
2915 for_each_console(c)
2916 if ((c->flags & CON_ENABLED) && c->unblank)
2917 c->unblank();
2918 console_unlock();
2919
2920 if (!oops_in_progress)
2921 pr_flush(1000, true);
2922 }
2923
2924 /**
2925 * console_flush_on_panic - flush console content on panic
2926 * @mode: flush all messages in buffer or just the pending ones
2927 *
2928 * Immediately output all pending messages no matter what.
2929 */
console_flush_on_panic(enum con_flush_mode mode)2930 void console_flush_on_panic(enum con_flush_mode mode)
2931 {
2932 /*
2933 * If someone else is holding the console lock, trylock will fail
2934 * and may_schedule may be set. Ignore and proceed to unlock so
2935 * that messages are flushed out. As this can be called from any
2936 * context and we don't want to get preempted while flushing,
2937 * ensure may_schedule is cleared.
2938 */
2939 console_trylock();
2940 console_may_schedule = 0;
2941
2942 if (mode == CONSOLE_REPLAY_ALL) {
2943 struct console *c;
2944 u64 seq;
2945
2946 seq = prb_first_valid_seq(prb);
2947 for_each_console(c)
2948 c->seq = seq;
2949 }
2950 console_unlock();
2951 }
2952
2953 /*
2954 * Return the console tty driver structure and its associated index
2955 */
console_device(int * index)2956 struct tty_driver *console_device(int *index)
2957 {
2958 struct console *c;
2959 struct tty_driver *driver = NULL;
2960
2961 console_lock();
2962 for_each_console(c) {
2963 if (!c->device)
2964 continue;
2965 driver = c->device(c, index);
2966 if (driver)
2967 break;
2968 }
2969 console_unlock();
2970 return driver;
2971 }
2972
2973 /*
2974 * Prevent further output on the passed console device so that (for example)
2975 * serial drivers can disable console output before suspending a port, and can
2976 * re-enable output afterwards.
2977 */
console_stop(struct console * console)2978 void console_stop(struct console *console)
2979 {
2980 __pr_flush(console, 1000, true);
2981 console_lock();
2982 console->flags &= ~CON_ENABLED;
2983 console_unlock();
2984 }
2985 EXPORT_SYMBOL(console_stop);
2986
console_start(struct console * console)2987 void console_start(struct console *console)
2988 {
2989 console_lock();
2990 console->flags |= CON_ENABLED;
2991 console_unlock();
2992 __pr_flush(console, 1000, true);
2993 }
2994 EXPORT_SYMBOL(console_start);
2995
2996 static int __read_mostly keep_bootcon;
2997
keep_bootcon_setup(char * str)2998 static int __init keep_bootcon_setup(char *str)
2999 {
3000 keep_bootcon = 1;
3001 pr_info("debug: skip boot console de-registration.\n");
3002
3003 return 0;
3004 }
3005
3006 early_param("keep_bootcon", keep_bootcon_setup);
3007
3008 /*
3009 * This is called by register_console() to try to match
3010 * the newly registered console with any of the ones selected
3011 * by either the command line or add_preferred_console() and
3012 * setup/enable it.
3013 *
3014 * Care need to be taken with consoles that are statically
3015 * enabled such as netconsole
3016 */
try_enable_preferred_console(struct console * newcon,bool user_specified)3017 static int try_enable_preferred_console(struct console *newcon,
3018 bool user_specified)
3019 {
3020 struct console_cmdline *c;
3021 int i, err;
3022
3023 for (i = 0, c = console_cmdline;
3024 i < MAX_CMDLINECONSOLES && c->name[0];
3025 i++, c++) {
3026 if (c->user_specified != user_specified)
3027 continue;
3028 if (!newcon->match ||
3029 newcon->match(newcon, c->name, c->index, c->options) != 0) {
3030 /* default matching */
3031 BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3032 if (strcmp(c->name, newcon->name) != 0)
3033 continue;
3034 if (newcon->index >= 0 &&
3035 newcon->index != c->index)
3036 continue;
3037 if (newcon->index < 0)
3038 newcon->index = c->index;
3039
3040 if (_braille_register_console(newcon, c))
3041 return 0;
3042
3043 if (newcon->setup &&
3044 (err = newcon->setup(newcon, c->options)) != 0)
3045 return err;
3046 }
3047 newcon->flags |= CON_ENABLED;
3048 if (i == preferred_console)
3049 newcon->flags |= CON_CONSDEV;
3050 return 0;
3051 }
3052
3053 /*
3054 * Some consoles, such as pstore and netconsole, can be enabled even
3055 * without matching. Accept the pre-enabled consoles only when match()
3056 * and setup() had a chance to be called.
3057 */
3058 if (newcon->flags & CON_ENABLED && c->user_specified == user_specified)
3059 return 0;
3060
3061 return -ENOENT;
3062 }
3063
3064 /* Try to enable the console unconditionally */
try_enable_default_console(struct console * newcon)3065 static void try_enable_default_console(struct console *newcon)
3066 {
3067 if (newcon->index < 0)
3068 newcon->index = 0;
3069
3070 if (newcon->setup && newcon->setup(newcon, NULL) != 0)
3071 return;
3072
3073 newcon->flags |= CON_ENABLED;
3074
3075 if (newcon->device)
3076 newcon->flags |= CON_CONSDEV;
3077 }
3078
3079 #define con_printk(lvl, con, fmt, ...) \
3080 printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \
3081 (con->flags & CON_BOOT) ? "boot" : "", \
3082 con->name, con->index, ##__VA_ARGS__)
3083
3084 /*
3085 * The console driver calls this routine during kernel initialization
3086 * to register the console printing procedure with printk() and to
3087 * print any messages that were printed by the kernel before the
3088 * console driver was initialized.
3089 *
3090 * This can happen pretty early during the boot process (because of
3091 * early_printk) - sometimes before setup_arch() completes - be careful
3092 * of what kernel features are used - they may not be initialised yet.
3093 *
3094 * There are two types of consoles - bootconsoles (early_printk) and
3095 * "real" consoles (everything which is not a bootconsole) which are
3096 * handled differently.
3097 * - Any number of bootconsoles can be registered at any time.
3098 * - As soon as a "real" console is registered, all bootconsoles
3099 * will be unregistered automatically.
3100 * - Once a "real" console is registered, any attempt to register a
3101 * bootconsoles will be rejected
3102 */
register_console(struct console * newcon)3103 void register_console(struct console *newcon)
3104 {
3105 struct console *con;
3106 bool bootcon_enabled = false;
3107 bool realcon_enabled = false;
3108 int err;
3109
3110 for_each_console(con) {
3111 if (WARN(con == newcon, "console '%s%d' already registered\n",
3112 con->name, con->index))
3113 return;
3114 }
3115
3116 for_each_console(con) {
3117 if (con->flags & CON_BOOT)
3118 bootcon_enabled = true;
3119 else
3120 realcon_enabled = true;
3121 }
3122
3123 /* Do not register boot consoles when there already is a real one. */
3124 if (newcon->flags & CON_BOOT && realcon_enabled) {
3125 pr_info("Too late to register bootconsole %s%d\n",
3126 newcon->name, newcon->index);
3127 return;
3128 }
3129
3130 /*
3131 * See if we want to enable this console driver by default.
3132 *
3133 * Nope when a console is preferred by the command line, device
3134 * tree, or SPCR.
3135 *
3136 * The first real console with tty binding (driver) wins. More
3137 * consoles might get enabled before the right one is found.
3138 *
3139 * Note that a console with tty binding will have CON_CONSDEV
3140 * flag set and will be first in the list.
3141 */
3142 if (preferred_console < 0) {
3143 if (!console_drivers || !console_drivers->device ||
3144 console_drivers->flags & CON_BOOT) {
3145 try_enable_default_console(newcon);
3146 }
3147 }
3148
3149 /* See if this console matches one we selected on the command line */
3150 err = try_enable_preferred_console(newcon, true);
3151
3152 /* If not, try to match against the platform default(s) */
3153 if (err == -ENOENT)
3154 err = try_enable_preferred_console(newcon, false);
3155
3156 /* printk() messages are not printed to the Braille console. */
3157 if (err || newcon->flags & CON_BRL)
3158 return;
3159
3160 /*
3161 * If we have a bootconsole, and are switching to a real console,
3162 * don't print everything out again, since when the boot console, and
3163 * the real console are the same physical device, it's annoying to
3164 * see the beginning boot messages twice
3165 */
3166 if (bootcon_enabled &&
3167 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
3168 newcon->flags &= ~CON_PRINTBUFFER;
3169 }
3170
3171 /*
3172 * Put this console in the list - keep the
3173 * preferred driver at the head of the list.
3174 */
3175 console_lock();
3176 if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
3177 newcon->next = console_drivers;
3178 console_drivers = newcon;
3179 if (newcon->next)
3180 newcon->next->flags &= ~CON_CONSDEV;
3181 /* Ensure this flag is always set for the head of the list */
3182 newcon->flags |= CON_CONSDEV;
3183 } else {
3184 newcon->next = console_drivers->next;
3185 console_drivers->next = newcon;
3186 }
3187
3188 newcon->dropped = 0;
3189 if (newcon->flags & CON_PRINTBUFFER) {
3190 /* Get a consistent copy of @syslog_seq. */
3191 mutex_lock(&syslog_lock);
3192 newcon->seq = syslog_seq;
3193 mutex_unlock(&syslog_lock);
3194 } else {
3195 /* Begin with next message. */
3196 newcon->seq = prb_next_seq(prb);
3197 }
3198 console_unlock();
3199 console_sysfs_notify();
3200
3201 /*
3202 * By unregistering the bootconsoles after we enable the real console
3203 * we get the "console xxx enabled" message on all the consoles -
3204 * boot consoles, real consoles, etc - this is to ensure that end
3205 * users know there might be something in the kernel's log buffer that
3206 * went to the bootconsole (that they do not see on the real console)
3207 */
3208 con_printk(KERN_INFO, newcon, "enabled\n");
3209 if (bootcon_enabled &&
3210 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
3211 !keep_bootcon) {
3212 for_each_console(con)
3213 if (con->flags & CON_BOOT)
3214 unregister_console(con);
3215 }
3216 }
3217 EXPORT_SYMBOL(register_console);
3218
unregister_console(struct console * console)3219 int unregister_console(struct console *console)
3220 {
3221 struct console *con;
3222 int res;
3223
3224 con_printk(KERN_INFO, console, "disabled\n");
3225
3226 res = _braille_unregister_console(console);
3227 if (res < 0)
3228 return res;
3229 if (res > 0)
3230 return 0;
3231
3232 res = -ENODEV;
3233 console_lock();
3234 if (console_drivers == console) {
3235 console_drivers=console->next;
3236 res = 0;
3237 } else {
3238 for_each_console(con) {
3239 if (con->next == console) {
3240 con->next = console->next;
3241 res = 0;
3242 break;
3243 }
3244 }
3245 }
3246
3247 if (res)
3248 goto out_disable_unlock;
3249
3250 /*
3251 * If this isn't the last console and it has CON_CONSDEV set, we
3252 * need to set it on the next preferred console.
3253 */
3254 if (console_drivers != NULL && console->flags & CON_CONSDEV)
3255 console_drivers->flags |= CON_CONSDEV;
3256
3257 console->flags &= ~CON_ENABLED;
3258 console_unlock();
3259 console_sysfs_notify();
3260
3261 if (console->exit)
3262 res = console->exit(console);
3263
3264 return res;
3265
3266 out_disable_unlock:
3267 console->flags &= ~CON_ENABLED;
3268 console_unlock();
3269
3270 return res;
3271 }
3272 EXPORT_SYMBOL(unregister_console);
3273
3274 /*
3275 * Initialize the console device. This is called *early*, so
3276 * we can't necessarily depend on lots of kernel help here.
3277 * Just do some early initializations, and do the complex setup
3278 * later.
3279 */
console_init(void)3280 void __init console_init(void)
3281 {
3282 int ret;
3283 initcall_t call;
3284 initcall_entry_t *ce;
3285
3286 /* Setup the default TTY line discipline. */
3287 n_tty_init();
3288
3289 /*
3290 * set up the console device so that later boot sequences can
3291 * inform about problems etc..
3292 */
3293 ce = __con_initcall_start;
3294 trace_initcall_level("console");
3295 while (ce < __con_initcall_end) {
3296 call = initcall_from_entry(ce);
3297 trace_initcall_start(call);
3298 ret = call();
3299 trace_initcall_finish(call, ret);
3300 ce++;
3301 }
3302 }
3303
3304 /*
3305 * Some boot consoles access data that is in the init section and which will
3306 * be discarded after the initcalls have been run. To make sure that no code
3307 * will access this data, unregister the boot consoles in a late initcall.
3308 *
3309 * If for some reason, such as deferred probe or the driver being a loadable
3310 * module, the real console hasn't registered yet at this point, there will
3311 * be a brief interval in which no messages are logged to the console, which
3312 * makes it difficult to diagnose problems that occur during this time.
3313 *
3314 * To mitigate this problem somewhat, only unregister consoles whose memory
3315 * intersects with the init section. Note that all other boot consoles will
3316 * get unregistered when the real preferred console is registered.
3317 */
printk_late_init(void)3318 static int __init printk_late_init(void)
3319 {
3320 struct console *con;
3321 int ret;
3322
3323 for_each_console(con) {
3324 if (!(con->flags & CON_BOOT))
3325 continue;
3326
3327 /* Check addresses that might be used for enabled consoles. */
3328 if (init_section_intersects(con, sizeof(*con)) ||
3329 init_section_contains(con->write, 0) ||
3330 init_section_contains(con->read, 0) ||
3331 init_section_contains(con->device, 0) ||
3332 init_section_contains(con->unblank, 0) ||
3333 init_section_contains(con->data, 0)) {
3334 /*
3335 * Please, consider moving the reported consoles out
3336 * of the init section.
3337 */
3338 pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
3339 con->name, con->index);
3340 unregister_console(con);
3341 }
3342 }
3343 ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
3344 console_cpu_notify);
3345 WARN_ON(ret < 0);
3346 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
3347 console_cpu_notify, NULL);
3348 WARN_ON(ret < 0);
3349 printk_sysctl_init();
3350 return 0;
3351 }
3352 late_initcall(printk_late_init);
3353
3354 #if defined CONFIG_PRINTK
3355 /* If @con is specified, only wait for that console. Otherwise wait for all. */
__pr_flush(struct console * con,int timeout_ms,bool reset_on_progress)3356 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
3357 {
3358 int remaining = timeout_ms;
3359 struct console *c;
3360 u64 last_diff = 0;
3361 u64 printk_seq;
3362 u64 diff;
3363 u64 seq;
3364
3365 might_sleep();
3366
3367 seq = prb_next_seq(prb);
3368
3369 for (;;) {
3370 diff = 0;
3371
3372 console_lock();
3373
3374 for_each_console(c) {
3375 if (con && con != c)
3376 continue;
3377 if (!console_is_usable(c))
3378 continue;
3379 printk_seq = c->seq;
3380 if (printk_seq < seq)
3381 diff += seq - printk_seq;
3382 }
3383
3384 /*
3385 * If consoles are suspended, it cannot be expected that they
3386 * make forward progress, so timeout immediately. @diff is
3387 * still used to return a valid flush status.
3388 */
3389 if (console_suspended)
3390 remaining = 0;
3391 else if (diff != last_diff && reset_on_progress)
3392 remaining = timeout_ms;
3393
3394 console_unlock();
3395
3396 if (diff == 0 || remaining == 0)
3397 break;
3398
3399 if (remaining < 0) {
3400 /* no timeout limit */
3401 msleep(100);
3402 } else if (remaining < 100) {
3403 msleep(remaining);
3404 remaining = 0;
3405 } else {
3406 msleep(100);
3407 remaining -= 100;
3408 }
3409
3410 last_diff = diff;
3411 }
3412
3413 return (diff == 0);
3414 }
3415
3416 /**
3417 * pr_flush() - Wait for printing threads to catch up.
3418 *
3419 * @timeout_ms: The maximum time (in ms) to wait.
3420 * @reset_on_progress: Reset the timeout if forward progress is seen.
3421 *
3422 * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
3423 * represents infinite waiting.
3424 *
3425 * If @reset_on_progress is true, the timeout will be reset whenever any
3426 * printer has been seen to make some forward progress.
3427 *
3428 * Context: Process context. May sleep while acquiring console lock.
3429 * Return: true if all enabled printers are caught up.
3430 */
pr_flush(int timeout_ms,bool reset_on_progress)3431 static bool pr_flush(int timeout_ms, bool reset_on_progress)
3432 {
3433 return __pr_flush(NULL, timeout_ms, reset_on_progress);
3434 }
3435
3436 /*
3437 * Delayed printk version, for scheduler-internal messages:
3438 */
3439 #define PRINTK_PENDING_WAKEUP 0x01
3440 #define PRINTK_PENDING_OUTPUT 0x02
3441
3442 static DEFINE_PER_CPU(int, printk_pending);
3443
wake_up_klogd_work_func(struct irq_work * irq_work)3444 static void wake_up_klogd_work_func(struct irq_work *irq_work)
3445 {
3446 int pending = this_cpu_xchg(printk_pending, 0);
3447
3448 if (pending & PRINTK_PENDING_OUTPUT) {
3449 /* If trylock fails, someone else is doing the printing */
3450 if (console_trylock())
3451 console_unlock();
3452 }
3453
3454 if (pending & PRINTK_PENDING_WAKEUP)
3455 wake_up_interruptible(&log_wait);
3456 }
3457
3458 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
3459 IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
3460
__wake_up_klogd(int val)3461 static void __wake_up_klogd(int val)
3462 {
3463 if (!printk_percpu_data_ready())
3464 return;
3465
3466 preempt_disable();
3467 /*
3468 * Guarantee any new records can be seen by tasks preparing to wait
3469 * before this context checks if the wait queue is empty.
3470 *
3471 * The full memory barrier within wq_has_sleeper() pairs with the full
3472 * memory barrier within set_current_state() of
3473 * prepare_to_wait_event(), which is called after ___wait_event() adds
3474 * the waiter but before it has checked the wait condition.
3475 *
3476 * This pairs with devkmsg_read:A and syslog_print:A.
3477 */
3478 if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
3479 (val & PRINTK_PENDING_OUTPUT)) {
3480 this_cpu_or(printk_pending, val);
3481 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
3482 }
3483 preempt_enable();
3484 }
3485
wake_up_klogd(void)3486 void wake_up_klogd(void)
3487 {
3488 __wake_up_klogd(PRINTK_PENDING_WAKEUP);
3489 }
3490
defer_console_output(void)3491 void defer_console_output(void)
3492 {
3493 /*
3494 * New messages may have been added directly to the ringbuffer
3495 * using vprintk_store(), so wake any waiters as well.
3496 */
3497 __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
3498 }
3499
printk_trigger_flush(void)3500 void printk_trigger_flush(void)
3501 {
3502 defer_console_output();
3503 }
3504
vprintk_deferred(const char * fmt,va_list args)3505 int vprintk_deferred(const char *fmt, va_list args)
3506 {
3507 int r;
3508
3509 r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
3510 defer_console_output();
3511
3512 return r;
3513 }
3514
_printk_deferred(const char * fmt,...)3515 int _printk_deferred(const char *fmt, ...)
3516 {
3517 va_list args;
3518 int r;
3519
3520 va_start(args, fmt);
3521 r = vprintk_deferred(fmt, args);
3522 va_end(args);
3523
3524 return r;
3525 }
3526
3527 /*
3528 * printk rate limiting, lifted from the networking subsystem.
3529 *
3530 * This enforces a rate limit: not more than 10 kernel messages
3531 * every 5s to make a denial-of-service attack impossible.
3532 */
3533 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
3534
__printk_ratelimit(const char * func)3535 int __printk_ratelimit(const char *func)
3536 {
3537 return ___ratelimit(&printk_ratelimit_state, func);
3538 }
3539 EXPORT_SYMBOL(__printk_ratelimit);
3540
3541 /**
3542 * printk_timed_ratelimit - caller-controlled printk ratelimiting
3543 * @caller_jiffies: pointer to caller's state
3544 * @interval_msecs: minimum interval between prints
3545 *
3546 * printk_timed_ratelimit() returns true if more than @interval_msecs
3547 * milliseconds have elapsed since the last time printk_timed_ratelimit()
3548 * returned true.
3549 */
printk_timed_ratelimit(unsigned long * caller_jiffies,unsigned int interval_msecs)3550 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
3551 unsigned int interval_msecs)
3552 {
3553 unsigned long elapsed = jiffies - *caller_jiffies;
3554
3555 if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
3556 return false;
3557
3558 *caller_jiffies = jiffies;
3559 return true;
3560 }
3561 EXPORT_SYMBOL(printk_timed_ratelimit);
3562
3563 static DEFINE_SPINLOCK(dump_list_lock);
3564 static LIST_HEAD(dump_list);
3565
3566 /**
3567 * kmsg_dump_register - register a kernel log dumper.
3568 * @dumper: pointer to the kmsg_dumper structure
3569 *
3570 * Adds a kernel log dumper to the system. The dump callback in the
3571 * structure will be called when the kernel oopses or panics and must be
3572 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
3573 */
kmsg_dump_register(struct kmsg_dumper * dumper)3574 int kmsg_dump_register(struct kmsg_dumper *dumper)
3575 {
3576 unsigned long flags;
3577 int err = -EBUSY;
3578
3579 /* The dump callback needs to be set */
3580 if (!dumper->dump)
3581 return -EINVAL;
3582
3583 spin_lock_irqsave(&dump_list_lock, flags);
3584 /* Don't allow registering multiple times */
3585 if (!dumper->registered) {
3586 dumper->registered = 1;
3587 list_add_tail_rcu(&dumper->list, &dump_list);
3588 err = 0;
3589 }
3590 spin_unlock_irqrestore(&dump_list_lock, flags);
3591
3592 return err;
3593 }
3594 EXPORT_SYMBOL_GPL(kmsg_dump_register);
3595
3596 /**
3597 * kmsg_dump_unregister - unregister a kmsg dumper.
3598 * @dumper: pointer to the kmsg_dumper structure
3599 *
3600 * Removes a dump device from the system. Returns zero on success and
3601 * %-EINVAL otherwise.
3602 */
kmsg_dump_unregister(struct kmsg_dumper * dumper)3603 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
3604 {
3605 unsigned long flags;
3606 int err = -EINVAL;
3607
3608 spin_lock_irqsave(&dump_list_lock, flags);
3609 if (dumper->registered) {
3610 dumper->registered = 0;
3611 list_del_rcu(&dumper->list);
3612 err = 0;
3613 }
3614 spin_unlock_irqrestore(&dump_list_lock, flags);
3615 synchronize_rcu();
3616
3617 return err;
3618 }
3619 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
3620
3621 static bool always_kmsg_dump;
3622 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
3623
kmsg_dump_reason_str(enum kmsg_dump_reason reason)3624 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
3625 {
3626 switch (reason) {
3627 case KMSG_DUMP_PANIC:
3628 return "Panic";
3629 case KMSG_DUMP_OOPS:
3630 return "Oops";
3631 case KMSG_DUMP_EMERG:
3632 return "Emergency";
3633 case KMSG_DUMP_SHUTDOWN:
3634 return "Shutdown";
3635 default:
3636 return "Unknown";
3637 }
3638 }
3639 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
3640
3641 /**
3642 * kmsg_dump - dump kernel log to kernel message dumpers.
3643 * @reason: the reason (oops, panic etc) for dumping
3644 *
3645 * Call each of the registered dumper's dump() callback, which can
3646 * retrieve the kmsg records with kmsg_dump_get_line() or
3647 * kmsg_dump_get_buffer().
3648 */
kmsg_dump(enum kmsg_dump_reason reason)3649 void kmsg_dump(enum kmsg_dump_reason reason)
3650 {
3651 struct kmsg_dumper *dumper;
3652
3653 rcu_read_lock();
3654 list_for_each_entry_rcu(dumper, &dump_list, list) {
3655 enum kmsg_dump_reason max_reason = dumper->max_reason;
3656
3657 /*
3658 * If client has not provided a specific max_reason, default
3659 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
3660 */
3661 if (max_reason == KMSG_DUMP_UNDEF) {
3662 max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
3663 KMSG_DUMP_OOPS;
3664 }
3665 if (reason > max_reason)
3666 continue;
3667
3668 /* invoke dumper which will iterate over records */
3669 dumper->dump(dumper, reason);
3670 }
3671 rcu_read_unlock();
3672 }
3673
3674 /**
3675 * kmsg_dump_get_line - retrieve one kmsg log line
3676 * @iter: kmsg dump iterator
3677 * @syslog: include the "<4>" prefixes
3678 * @line: buffer to copy the line to
3679 * @size: maximum size of the buffer
3680 * @len: length of line placed into buffer
3681 *
3682 * Start at the beginning of the kmsg buffer, with the oldest kmsg
3683 * record, and copy one record into the provided buffer.
3684 *
3685 * Consecutive calls will return the next available record moving
3686 * towards the end of the buffer with the youngest messages.
3687 *
3688 * A return value of FALSE indicates that there are no more records to
3689 * read.
3690 */
kmsg_dump_get_line(struct kmsg_dump_iter * iter,bool syslog,char * line,size_t size,size_t * len)3691 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
3692 char *line, size_t size, size_t *len)
3693 {
3694 u64 min_seq = latched_seq_read_nolock(&clear_seq);
3695 struct printk_info info;
3696 unsigned int line_count;
3697 struct printk_record r;
3698 size_t l = 0;
3699 bool ret = false;
3700
3701 if (iter->cur_seq < min_seq)
3702 iter->cur_seq = min_seq;
3703
3704 prb_rec_init_rd(&r, &info, line, size);
3705
3706 /* Read text or count text lines? */
3707 if (line) {
3708 if (!prb_read_valid(prb, iter->cur_seq, &r))
3709 goto out;
3710 l = record_print_text(&r, syslog, printk_time);
3711 } else {
3712 if (!prb_read_valid_info(prb, iter->cur_seq,
3713 &info, &line_count)) {
3714 goto out;
3715 }
3716 l = get_record_print_text_size(&info, line_count, syslog,
3717 printk_time);
3718
3719 }
3720
3721 iter->cur_seq = r.info->seq + 1;
3722 ret = true;
3723 out:
3724 if (len)
3725 *len = l;
3726 return ret;
3727 }
3728 EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
3729
3730 /**
3731 * kmsg_dump_get_buffer - copy kmsg log lines
3732 * @iter: kmsg dump iterator
3733 * @syslog: include the "<4>" prefixes
3734 * @buf: buffer to copy the line to
3735 * @size: maximum size of the buffer
3736 * @len_out: length of line placed into buffer
3737 *
3738 * Start at the end of the kmsg buffer and fill the provided buffer
3739 * with as many of the *youngest* kmsg records that fit into it.
3740 * If the buffer is large enough, all available kmsg records will be
3741 * copied with a single call.
3742 *
3743 * Consecutive calls will fill the buffer with the next block of
3744 * available older records, not including the earlier retrieved ones.
3745 *
3746 * A return value of FALSE indicates that there are no more records to
3747 * read.
3748 */
kmsg_dump_get_buffer(struct kmsg_dump_iter * iter,bool syslog,char * buf,size_t size,size_t * len_out)3749 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
3750 char *buf, size_t size, size_t *len_out)
3751 {
3752 u64 min_seq = latched_seq_read_nolock(&clear_seq);
3753 struct printk_info info;
3754 struct printk_record r;
3755 u64 seq;
3756 u64 next_seq;
3757 size_t len = 0;
3758 bool ret = false;
3759 bool time = printk_time;
3760
3761 if (!buf || !size)
3762 goto out;
3763
3764 if (iter->cur_seq < min_seq)
3765 iter->cur_seq = min_seq;
3766
3767 if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
3768 if (info.seq != iter->cur_seq) {
3769 /* messages are gone, move to first available one */
3770 iter->cur_seq = info.seq;
3771 }
3772 }
3773
3774 /* last entry */
3775 if (iter->cur_seq >= iter->next_seq)
3776 goto out;
3777
3778 /*
3779 * Find first record that fits, including all following records,
3780 * into the user-provided buffer for this dump. Pass in size-1
3781 * because this function (by way of record_print_text()) will
3782 * not write more than size-1 bytes of text into @buf.
3783 */
3784 seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
3785 size - 1, syslog, time);
3786
3787 /*
3788 * Next kmsg_dump_get_buffer() invocation will dump block of
3789 * older records stored right before this one.
3790 */
3791 next_seq = seq;
3792
3793 prb_rec_init_rd(&r, &info, buf, size);
3794
3795 len = 0;
3796 prb_for_each_record(seq, prb, seq, &r) {
3797 if (r.info->seq >= iter->next_seq)
3798 break;
3799
3800 len += record_print_text(&r, syslog, time);
3801
3802 /* Adjust record to store to remaining buffer space. */
3803 prb_rec_init_rd(&r, &info, buf + len, size - len);
3804 }
3805
3806 iter->next_seq = next_seq;
3807 ret = true;
3808 out:
3809 if (len_out)
3810 *len_out = len;
3811 return ret;
3812 }
3813 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
3814
3815 /**
3816 * kmsg_dump_rewind - reset the iterator
3817 * @iter: kmsg dump iterator
3818 *
3819 * Reset the dumper's iterator so that kmsg_dump_get_line() and
3820 * kmsg_dump_get_buffer() can be called again and used multiple
3821 * times within the same dumper.dump() callback.
3822 */
kmsg_dump_rewind(struct kmsg_dump_iter * iter)3823 void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
3824 {
3825 iter->cur_seq = latched_seq_read_nolock(&clear_seq);
3826 iter->next_seq = prb_next_seq(prb);
3827 }
3828 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
3829
3830 #endif
3831
3832 #ifdef CONFIG_SMP
3833 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
3834 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
3835
3836 /**
3837 * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
3838 * spinning lock is not owned by any CPU.
3839 *
3840 * Context: Any context.
3841 */
__printk_cpu_sync_wait(void)3842 void __printk_cpu_sync_wait(void)
3843 {
3844 do {
3845 cpu_relax();
3846 } while (atomic_read(&printk_cpu_sync_owner) != -1);
3847 }
3848 EXPORT_SYMBOL(__printk_cpu_sync_wait);
3849
3850 /**
3851 * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
3852 * spinning lock.
3853 *
3854 * If no processor has the lock, the calling processor takes the lock and
3855 * becomes the owner. If the calling processor is already the owner of the
3856 * lock, this function succeeds immediately.
3857 *
3858 * Context: Any context. Expects interrupts to be disabled.
3859 * Return: 1 on success, otherwise 0.
3860 */
__printk_cpu_sync_try_get(void)3861 int __printk_cpu_sync_try_get(void)
3862 {
3863 int cpu;
3864 int old;
3865
3866 cpu = smp_processor_id();
3867
3868 /*
3869 * Guarantee loads and stores from this CPU when it is the lock owner
3870 * are _not_ visible to the previous lock owner. This pairs with
3871 * __printk_cpu_sync_put:B.
3872 *
3873 * Memory barrier involvement:
3874 *
3875 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
3876 * then __printk_cpu_sync_put:A can never read from
3877 * __printk_cpu_sync_try_get:B.
3878 *
3879 * Relies on:
3880 *
3881 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
3882 * of the previous CPU
3883 * matching
3884 * ACQUIRE from __printk_cpu_sync_try_get:A to
3885 * __printk_cpu_sync_try_get:B of this CPU
3886 */
3887 old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
3888 cpu); /* LMM(__printk_cpu_sync_try_get:A) */
3889 if (old == -1) {
3890 /*
3891 * This CPU is now the owner and begins loading/storing
3892 * data: LMM(__printk_cpu_sync_try_get:B)
3893 */
3894 return 1;
3895
3896 } else if (old == cpu) {
3897 /* This CPU is already the owner. */
3898 atomic_inc(&printk_cpu_sync_nested);
3899 return 1;
3900 }
3901
3902 return 0;
3903 }
3904 EXPORT_SYMBOL(__printk_cpu_sync_try_get);
3905
3906 /**
3907 * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
3908 *
3909 * The calling processor must be the owner of the lock.
3910 *
3911 * Context: Any context. Expects interrupts to be disabled.
3912 */
__printk_cpu_sync_put(void)3913 void __printk_cpu_sync_put(void)
3914 {
3915 if (atomic_read(&printk_cpu_sync_nested)) {
3916 atomic_dec(&printk_cpu_sync_nested);
3917 return;
3918 }
3919
3920 /*
3921 * This CPU is finished loading/storing data:
3922 * LMM(__printk_cpu_sync_put:A)
3923 */
3924
3925 /*
3926 * Guarantee loads and stores from this CPU when it was the
3927 * lock owner are visible to the next lock owner. This pairs
3928 * with __printk_cpu_sync_try_get:A.
3929 *
3930 * Memory barrier involvement:
3931 *
3932 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
3933 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
3934 *
3935 * Relies on:
3936 *
3937 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
3938 * of this CPU
3939 * matching
3940 * ACQUIRE from __printk_cpu_sync_try_get:A to
3941 * __printk_cpu_sync_try_get:B of the next CPU
3942 */
3943 atomic_set_release(&printk_cpu_sync_owner,
3944 -1); /* LMM(__printk_cpu_sync_put:B) */
3945 }
3946 EXPORT_SYMBOL(__printk_cpu_sync_put);
3947 #endif /* CONFIG_SMP */
3948