blob: b67e84cfd085c8cee1e7fd352ef9df404f3180e4
1 | /* |
2 | * linux/kernel/printk.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * |
6 | * Modified to make sys_syslog() more flexible: added commands to |
7 | * return the last 4k of kernel messages, regardless of whether |
8 | * they've been read or not. Added option to suppress kernel printk's |
9 | * to the console. Added hook for sending the console messages |
10 | * elsewhere, in preparation for a serial line console (someday). |
11 | * Ted Ts'o, 2/11/93. |
12 | * Modified for sysctl support, 1/8/97, Chris Horn. |
13 | * Fixed SMP synchronization, 08/08/99, Manfred Spraul |
14 | * manfred@colorfullife.com |
15 | * Rewrote bits to get rid of console_lock |
16 | * 01Mar01 Andrew Morton |
17 | */ |
18 | |
19 | #include <linux/kernel.h> |
20 | #include <linux/mm.h> |
21 | #include <linux/tty.h> |
22 | #include <linux/tty_driver.h> |
23 | #include <linux/console.h> |
24 | #include <linux/init.h> |
25 | #include <linux/jiffies.h> |
26 | #include <linux/nmi.h> |
27 | #include <linux/module.h> |
28 | #include <linux/moduleparam.h> |
29 | #include <linux/delay.h> |
30 | #include <linux/smp.h> |
31 | #include <linux/security.h> |
32 | #include <linux/bootmem.h> |
33 | #include <linux/memblock.h> |
34 | #include <linux/syscalls.h> |
35 | #include <linux/kexec.h> |
36 | #include <linux/kdb.h> |
37 | #include <linux/ratelimit.h> |
38 | #include <linux/kmsg_dump.h> |
39 | #include <linux/syslog.h> |
40 | #include <linux/cpu.h> |
41 | #include <linux/notifier.h> |
42 | #include <linux/rculist.h> |
43 | #include <linux/poll.h> |
44 | #include <linux/irq_work.h> |
45 | #include <linux/utsname.h> |
46 | #include <linux/ctype.h> |
47 | #include <linux/uio.h> |
48 | |
49 | #include <asm/uaccess.h> |
50 | #include <asm/sections.h> |
51 | |
52 | #define CREATE_TRACE_POINTS |
53 | #include <trace/events/printk.h> |
54 | |
55 | #include "console_cmdline.h" |
56 | #include "braille.h" |
57 | #include "internal.h" |
58 | |
59 | #ifdef CONFIG_EARLY_PRINTK_DIRECT |
60 | extern void printascii(char *); |
61 | #endif |
62 | |
63 | int console_printk[4] = { |
64 | CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ |
65 | MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */ |
66 | CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */ |
67 | CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */ |
68 | }; |
69 | |
70 | /* |
71 | * Low level drivers may need that to know if they can schedule in |
72 | * their unblank() callback or not. So let's export it. |
73 | */ |
74 | int oops_in_progress; |
75 | EXPORT_SYMBOL(oops_in_progress); |
76 | |
77 | /* |
78 | * console_sem protects the console_drivers list, and also |
79 | * provides serialisation for access to the entire console |
80 | * driver system. |
81 | */ |
82 | static DEFINE_SEMAPHORE(console_sem); |
83 | struct console *console_drivers; |
84 | EXPORT_SYMBOL_GPL(console_drivers); |
85 | |
86 | #ifdef CONFIG_LOCKDEP |
87 | static struct lockdep_map console_lock_dep_map = { |
88 | .name = "console_lock" |
89 | }; |
90 | #endif |
91 | |
92 | enum devkmsg_log_bits { |
93 | __DEVKMSG_LOG_BIT_ON = 0, |
94 | __DEVKMSG_LOG_BIT_OFF, |
95 | __DEVKMSG_LOG_BIT_LOCK, |
96 | }; |
97 | |
98 | enum devkmsg_log_masks { |
99 | DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON), |
100 | DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF), |
101 | DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK), |
102 | }; |
103 | |
104 | /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */ |
105 | #define DEVKMSG_LOG_MASK_DEFAULT 0 |
106 | |
107 | static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; |
108 | |
109 | static int __control_devkmsg(char *str) |
110 | { |
111 | if (!str) |
112 | return -EINVAL; |
113 | |
114 | if (!strncmp(str, "on", 2)) { |
115 | devkmsg_log = DEVKMSG_LOG_MASK_ON; |
116 | return 2; |
117 | } else if (!strncmp(str, "off", 3)) { |
118 | devkmsg_log = DEVKMSG_LOG_MASK_OFF; |
119 | return 3; |
120 | } else if (!strncmp(str, "ratelimit", 9)) { |
121 | devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; |
122 | return 9; |
123 | } |
124 | return -EINVAL; |
125 | } |
126 | |
127 | static int __init control_devkmsg(char *str) |
128 | { |
129 | if (__control_devkmsg(str) < 0) |
130 | return 1; |
131 | |
132 | /* |
133 | * Set sysctl string accordingly: |
134 | */ |
135 | if (devkmsg_log == DEVKMSG_LOG_MASK_ON) { |
136 | memset(devkmsg_log_str, 0, DEVKMSG_STR_MAX_SIZE); |
137 | strncpy(devkmsg_log_str, "on", 2); |
138 | } else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF) { |
139 | memset(devkmsg_log_str, 0, DEVKMSG_STR_MAX_SIZE); |
140 | strncpy(devkmsg_log_str, "off", 3); |
141 | } |
142 | /* else "ratelimit" which is set by default. */ |
143 | |
144 | /* |
145 | * Sysctl cannot change it anymore. The kernel command line setting of |
146 | * this parameter is to force the setting to be permanent throughout the |
147 | * runtime of the system. This is a precation measure against userspace |
148 | * trying to be a smarta** and attempting to change it up on us. |
149 | */ |
150 | devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; |
151 | |
152 | return 0; |
153 | } |
154 | __setup("printk.devkmsg=", control_devkmsg); |
155 | |
156 | char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; |
157 | |
158 | int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, |
159 | void __user *buffer, size_t *lenp, loff_t *ppos) |
160 | { |
161 | char old_str[DEVKMSG_STR_MAX_SIZE]; |
162 | unsigned int old; |
163 | int err; |
164 | |
165 | if (write) { |
166 | if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK) |
167 | return -EINVAL; |
168 | |
169 | old = devkmsg_log; |
170 | strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE); |
171 | } |
172 | |
173 | err = proc_dostring(table, write, buffer, lenp, ppos); |
174 | if (err) |
175 | return err; |
176 | |
177 | if (write) { |
178 | err = __control_devkmsg(devkmsg_log_str); |
179 | |
180 | /* |
181 | * Do not accept an unknown string OR a known string with |
182 | * trailing crap... |
183 | */ |
184 | if (err < 0 || (err + 1 != *lenp)) { |
185 | |
186 | /* ... and restore old setting. */ |
187 | devkmsg_log = old; |
188 | strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE); |
189 | |
190 | return -EINVAL; |
191 | } |
192 | } |
193 | |
194 | return 0; |
195 | } |
196 | |
197 | /* |
198 | * Number of registered extended console drivers. |
199 | * |
200 | * If extended consoles are present, in-kernel cont reassembly is disabled |
201 | * and each fragment is stored as a separate log entry with proper |
202 | * continuation flag so that every emitted message has full metadata. This |
203 | * doesn't change the result for regular consoles or /proc/kmsg. For |
204 | * /dev/kmsg, as long as the reader concatenates messages according to |
205 | * consecutive continuation flags, the end result should be the same too. |
206 | */ |
207 | static int nr_ext_console_drivers; |
208 | |
209 | /* |
210 | * Helper macros to handle lockdep when locking/unlocking console_sem. We use |
211 | * macros instead of functions so that _RET_IP_ contains useful information. |
212 | */ |
213 | #define down_console_sem() do { \ |
214 | down(&console_sem);\ |
215 | mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\ |
216 | } while (0) |
217 | |
218 | static int __down_trylock_console_sem(unsigned long ip) |
219 | { |
220 | if (down_trylock(&console_sem)) |
221 | return 1; |
222 | mutex_acquire(&console_lock_dep_map, 0, 1, ip); |
223 | return 0; |
224 | } |
225 | #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_) |
226 | |
227 | #define up_console_sem() do { \ |
228 | mutex_release(&console_lock_dep_map, 1, _RET_IP_);\ |
229 | up(&console_sem);\ |
230 | } while (0) |
231 | |
232 | /* |
233 | * This is used for debugging the mess that is the VT code by |
234 | * keeping track if we have the console semaphore held. It's |
235 | * definitely not the perfect debug tool (we don't know if _WE_ |
236 | * hold it and are racing, but it helps tracking those weird code |
237 | * paths in the console code where we end up in places I want |
238 | * locked without the console sempahore held). |
239 | */ |
240 | static int console_locked, console_suspended; |
241 | |
242 | /* |
243 | * If exclusive_console is non-NULL then only this console is to be printed to. |
244 | */ |
245 | static struct console *exclusive_console; |
246 | |
247 | /* |
248 | * Array of consoles built from command line options (console=) |
249 | */ |
250 | |
251 | #define MAX_CMDLINECONSOLES 8 |
252 | |
253 | static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; |
254 | |
255 | static int selected_console = -1; |
256 | static int preferred_console = -1; |
257 | int console_set_on_cmdline; |
258 | EXPORT_SYMBOL(console_set_on_cmdline); |
259 | |
260 | /* Flag: console code may call schedule() */ |
261 | static int console_may_schedule; |
262 | |
263 | /* |
264 | * The printk log buffer consists of a chain of concatenated variable |
265 | * length records. Every record starts with a record header, containing |
266 | * the overall length of the record. |
267 | * |
268 | * The heads to the first and last entry in the buffer, as well as the |
269 | * sequence numbers of these entries are maintained when messages are |
270 | * stored. |
271 | * |
272 | * If the heads indicate available messages, the length in the header |
273 | * tells the start next message. A length == 0 for the next message |
274 | * indicates a wrap-around to the beginning of the buffer. |
275 | * |
276 | * Every record carries the monotonic timestamp in microseconds, as well as |
277 | * the standard userspace syslog level and syslog facility. The usual |
278 | * kernel messages use LOG_KERN; userspace-injected messages always carry |
279 | * a matching syslog facility, by default LOG_USER. The origin of every |
280 | * message can be reliably determined that way. |
281 | * |
282 | * The human readable log message directly follows the message header. The |
283 | * length of the message text is stored in the header, the stored message |
284 | * is not terminated. |
285 | * |
286 | * Optionally, a message can carry a dictionary of properties (key/value pairs), |
287 | * to provide userspace with a machine-readable message context. |
288 | * |
289 | * Examples for well-defined, commonly used property names are: |
290 | * DEVICE=b12:8 device identifier |
291 | * b12:8 block dev_t |
292 | * c127:3 char dev_t |
293 | * n8 netdev ifindex |
294 | * +sound:card0 subsystem:devname |
295 | * SUBSYSTEM=pci driver-core subsystem name |
296 | * |
297 | * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value |
298 | * follows directly after a '=' character. Every property is terminated by |
299 | * a '\0' character. The last property is not terminated. |
300 | * |
301 | * Example of a message structure: |
302 | * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec |
303 | * 0008 34 00 record is 52 bytes long |
304 | * 000a 0b 00 text is 11 bytes long |
305 | * 000c 1f 00 dictionary is 23 bytes long |
306 | * 000e 03 00 LOG_KERN (facility) LOG_ERR (level) |
307 | * 0010 69 74 27 73 20 61 20 6c "it's a l" |
308 | * 69 6e 65 "ine" |
309 | * 001b 44 45 56 49 43 "DEVIC" |
310 | * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D" |
311 | * 52 49 56 45 52 3d 62 75 "RIVER=bu" |
312 | * 67 "g" |
313 | * 0032 00 00 00 padding to next message header |
314 | * |
315 | * The 'struct printk_log' buffer header must never be directly exported to |
316 | * userspace, it is a kernel-private implementation detail that might |
317 | * need to be changed in the future, when the requirements change. |
318 | * |
319 | * /dev/kmsg exports the structured data in the following line format: |
320 | * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" |
321 | * |
322 | * Users of the export format should ignore possible additional values |
323 | * separated by ',', and find the message after the ';' character. |
324 | * |
325 | * The optional key/value pairs are attached as continuation lines starting |
326 | * with a space character and terminated by a newline. All possible |
327 | * non-prinatable characters are escaped in the "\xff" notation. |
328 | */ |
329 | |
330 | enum log_flags { |
331 | LOG_NOCONS = 1, /* already flushed, do not print to console */ |
332 | LOG_NEWLINE = 2, /* text ended with a newline */ |
333 | LOG_PREFIX = 4, /* text started with a prefix */ |
334 | LOG_CONT = 8, /* text is a fragment of a continuation line */ |
335 | }; |
336 | |
337 | struct printk_log { |
338 | u64 ts_nsec; /* timestamp in nanoseconds */ |
339 | u16 len; /* length of entire record */ |
340 | u16 text_len; /* length of text buffer */ |
341 | u16 dict_len; /* length of dictionary buffer */ |
342 | u8 facility; /* syslog facility */ |
343 | u8 flags:5; /* internal record flags */ |
344 | u8 level:3; /* syslog level */ |
345 | #ifdef CONFIG_AMLOGIC_DRIVER |
346 | int cpu; |
347 | #endif |
348 | } |
349 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
350 | __packed __aligned(4) |
351 | #endif |
352 | ; |
353 | |
354 | /* |
355 | * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken |
356 | * within the scheduler's rq lock. It must be released before calling |
357 | * console_unlock() or anything else that might wake up a process. |
358 | */ |
359 | DEFINE_RAW_SPINLOCK(logbuf_lock); |
360 | |
361 | #ifdef CONFIG_PRINTK |
362 | DECLARE_WAIT_QUEUE_HEAD(log_wait); |
363 | #ifdef CONFIG_AMLOGIC_DRIVER |
364 | static int current_cpu; |
365 | #endif |
366 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ |
367 | static u64 syslog_seq; |
368 | static u32 syslog_idx; |
369 | static enum log_flags syslog_prev; |
370 | static size_t syslog_partial; |
371 | |
372 | /* index and sequence number of the first record stored in the buffer */ |
373 | static u64 log_first_seq; |
374 | static u32 log_first_idx; |
375 | |
376 | /* index and sequence number of the next record to store in the buffer */ |
377 | static u64 log_next_seq; |
378 | static u32 log_next_idx; |
379 | |
380 | /* the next printk record to write to the console */ |
381 | static u64 console_seq; |
382 | static u32 console_idx; |
383 | static enum log_flags console_prev; |
384 | |
385 | /* the next printk record to read after the last 'clear' command */ |
386 | static u64 clear_seq; |
387 | static u32 clear_idx; |
388 | |
389 | #define PREFIX_MAX 32 |
390 | #define LOG_LINE_MAX (1024 - PREFIX_MAX) |
391 | |
392 | #define LOG_LEVEL(v) ((v) & 0x07) |
393 | #define LOG_FACILITY(v) ((v) >> 3 & 0xff) |
394 | |
395 | /* record buffer */ |
396 | #define LOG_ALIGN __alignof__(struct printk_log) |
397 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) |
398 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); |
399 | static char *log_buf = __log_buf; |
400 | static u32 log_buf_len = __LOG_BUF_LEN; |
401 | |
402 | /* Return log buffer address */ |
403 | char *log_buf_addr_get(void) |
404 | { |
405 | return log_buf; |
406 | } |
407 | |
408 | /* Return log buffer size */ |
409 | u32 log_buf_len_get(void) |
410 | { |
411 | return log_buf_len; |
412 | } |
413 | |
414 | /* human readable text of the record */ |
415 | static char *log_text(const struct printk_log *msg) |
416 | { |
417 | return (char *)msg + sizeof(struct printk_log); |
418 | } |
419 | |
420 | /* optional key/value pair dictionary attached to the record */ |
421 | static char *log_dict(const struct printk_log *msg) |
422 | { |
423 | return (char *)msg + sizeof(struct printk_log) + msg->text_len; |
424 | } |
425 | |
426 | /* get record by index; idx must point to valid msg */ |
427 | static struct printk_log *log_from_idx(u32 idx) |
428 | { |
429 | struct printk_log *msg = (struct printk_log *)(log_buf + idx); |
430 | |
431 | /* |
432 | * A length == 0 record is the end of buffer marker. Wrap around and |
433 | * read the message at the start of the buffer. |
434 | */ |
435 | if (!msg->len) |
436 | return (struct printk_log *)log_buf; |
437 | return msg; |
438 | } |
439 | |
440 | /* get next record; idx must point to valid msg */ |
441 | static u32 log_next(u32 idx) |
442 | { |
443 | struct printk_log *msg = (struct printk_log *)(log_buf + idx); |
444 | |
445 | /* length == 0 indicates the end of the buffer; wrap */ |
446 | /* |
447 | * A length == 0 record is the end of buffer marker. Wrap around and |
448 | * read the message at the start of the buffer as *this* one, and |
449 | * return the one after that. |
450 | */ |
451 | if (!msg->len) { |
452 | msg = (struct printk_log *)log_buf; |
453 | return msg->len; |
454 | } |
455 | return idx + msg->len; |
456 | } |
457 | |
458 | /* |
459 | * Check whether there is enough free space for the given message. |
460 | * |
461 | * The same values of first_idx and next_idx mean that the buffer |
462 | * is either empty or full. |
463 | * |
464 | * If the buffer is empty, we must respect the position of the indexes. |
465 | * They cannot be reset to the beginning of the buffer. |
466 | */ |
467 | static int logbuf_has_space(u32 msg_size, bool empty) |
468 | { |
469 | u32 free; |
470 | |
471 | if (log_next_idx > log_first_idx || empty) |
472 | free = max(log_buf_len - log_next_idx, log_first_idx); |
473 | else |
474 | free = log_first_idx - log_next_idx; |
475 | |
476 | /* |
477 | * We need space also for an empty header that signalizes wrapping |
478 | * of the buffer. |
479 | */ |
480 | return free >= msg_size + sizeof(struct printk_log); |
481 | } |
482 | |
483 | static int log_make_free_space(u32 msg_size) |
484 | { |
485 | while (log_first_seq < log_next_seq && |
486 | !logbuf_has_space(msg_size, false)) { |
487 | /* drop old messages until we have enough contiguous space */ |
488 | log_first_idx = log_next(log_first_idx); |
489 | log_first_seq++; |
490 | } |
491 | |
492 | if (clear_seq < log_first_seq) { |
493 | clear_seq = log_first_seq; |
494 | clear_idx = log_first_idx; |
495 | } |
496 | |
497 | /* sequence numbers are equal, so the log buffer is empty */ |
498 | if (logbuf_has_space(msg_size, log_first_seq == log_next_seq)) |
499 | return 0; |
500 | |
501 | return -ENOMEM; |
502 | } |
503 | |
504 | /* compute the message size including the padding bytes */ |
505 | static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len) |
506 | { |
507 | u32 size; |
508 | |
509 | size = sizeof(struct printk_log) + text_len + dict_len; |
510 | *pad_len = (-size) & (LOG_ALIGN - 1); |
511 | size += *pad_len; |
512 | |
513 | return size; |
514 | } |
515 | |
516 | /* |
517 | * Define how much of the log buffer we could take at maximum. The value |
518 | * must be greater than two. Note that only half of the buffer is available |
519 | * when the index points to the middle. |
520 | */ |
521 | #define MAX_LOG_TAKE_PART 4 |
522 | static const char trunc_msg[] = "<truncated>"; |
523 | |
524 | static u32 truncate_msg(u16 *text_len, u16 *trunc_msg_len, |
525 | u16 *dict_len, u32 *pad_len) |
526 | { |
527 | /* |
528 | * The message should not take the whole buffer. Otherwise, it might |
529 | * get removed too soon. |
530 | */ |
531 | u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART; |
532 | if (*text_len > max_text_len) |
533 | *text_len = max_text_len; |
534 | /* enable the warning message */ |
535 | *trunc_msg_len = strlen(trunc_msg); |
536 | /* disable the "dict" completely */ |
537 | *dict_len = 0; |
538 | /* compute the size again, count also the warning message */ |
539 | return msg_used_size(*text_len + *trunc_msg_len, 0, pad_len); |
540 | } |
541 | |
542 | /* insert record into the buffer, discard old ones, update heads */ |
543 | static int log_store(int facility, int level, |
544 | enum log_flags flags, u64 ts_nsec, |
545 | const char *dict, u16 dict_len, |
546 | const char *text, u16 text_len) |
547 | { |
548 | struct printk_log *msg; |
549 | u32 size, pad_len; |
550 | u16 trunc_msg_len = 0; |
551 | |
552 | /* number of '\0' padding bytes to next message */ |
553 | size = msg_used_size(text_len, dict_len, &pad_len); |
554 | |
555 | if (log_make_free_space(size)) { |
556 | /* truncate the message if it is too long for empty buffer */ |
557 | size = truncate_msg(&text_len, &trunc_msg_len, |
558 | &dict_len, &pad_len); |
559 | /* survive when the log buffer is too small for trunc_msg */ |
560 | if (log_make_free_space(size)) |
561 | return 0; |
562 | } |
563 | |
564 | if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) { |
565 | /* |
566 | * This message + an additional empty header does not fit |
567 | * at the end of the buffer. Add an empty header with len == 0 |
568 | * to signify a wrap around. |
569 | */ |
570 | memset(log_buf + log_next_idx, 0, sizeof(struct printk_log)); |
571 | log_next_idx = 0; |
572 | } |
573 | |
574 | /* fill message */ |
575 | msg = (struct printk_log *)(log_buf + log_next_idx); |
576 | memcpy(log_text(msg), text, text_len); |
577 | msg->text_len = text_len; |
578 | if (trunc_msg_len) { |
579 | memcpy(log_text(msg) + text_len, trunc_msg, trunc_msg_len); |
580 | msg->text_len += trunc_msg_len; |
581 | } |
582 | memcpy(log_dict(msg), dict, dict_len); |
583 | msg->dict_len = dict_len; |
584 | msg->facility = facility; |
585 | msg->level = level & 7; |
586 | msg->flags = flags & 0x1f; |
587 | #ifdef CONFIG_AMLOGIC_DRIVER |
588 | msg->cpu = smp_processor_id(); |
589 | #endif |
590 | if (ts_nsec > 0) |
591 | msg->ts_nsec = ts_nsec; |
592 | else |
593 | msg->ts_nsec = local_clock(); |
594 | memset(log_dict(msg) + dict_len, 0, pad_len); |
595 | msg->len = size; |
596 | |
597 | /* insert message */ |
598 | log_next_idx += msg->len; |
599 | log_next_seq++; |
600 | |
601 | return msg->text_len; |
602 | } |
603 | |
604 | int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); |
605 | |
606 | static int syslog_action_restricted(int type) |
607 | { |
608 | if (dmesg_restrict) |
609 | return 1; |
610 | /* |
611 | * Unless restricted, we allow "read all" and "get buffer size" |
612 | * for everybody. |
613 | */ |
614 | return type != SYSLOG_ACTION_READ_ALL && |
615 | type != SYSLOG_ACTION_SIZE_BUFFER; |
616 | } |
617 | |
618 | static int check_syslog_permissions(int type, int source) |
619 | { |
620 | /* |
621 | * If this is from /proc/kmsg and we've already opened it, then we've |
622 | * already done the capabilities checks at open time. |
623 | */ |
624 | if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN) |
625 | goto ok; |
626 | |
627 | if (syslog_action_restricted(type)) { |
628 | if (capable(CAP_SYSLOG)) |
629 | goto ok; |
630 | /* |
631 | * For historical reasons, accept CAP_SYS_ADMIN too, with |
632 | * a warning. |
633 | */ |
634 | if (capable(CAP_SYS_ADMIN)) { |
635 | pr_warn_once("%s (%d): Attempt to access syslog with " |
636 | "CAP_SYS_ADMIN but no CAP_SYSLOG " |
637 | "(deprecated).\n", |
638 | current->comm, task_pid_nr(current)); |
639 | goto ok; |
640 | } |
641 | return -EPERM; |
642 | } |
643 | ok: |
644 | return security_syslog(type); |
645 | } |
646 | |
647 | static void append_char(char **pp, char *e, char c) |
648 | { |
649 | if (*pp < e) |
650 | *(*pp)++ = c; |
651 | } |
652 | |
653 | static ssize_t msg_print_ext_header(char *buf, size_t size, |
654 | struct printk_log *msg, u64 seq, |
655 | enum log_flags prev_flags) |
656 | { |
657 | u64 ts_usec = msg->ts_nsec; |
658 | char cont = '-'; |
659 | |
660 | do_div(ts_usec, 1000); |
661 | |
662 | /* |
663 | * If we couldn't merge continuation line fragments during the print, |
664 | * export the stored flags to allow an optional external merge of the |
665 | * records. Merging the records isn't always neccessarily correct, like |
666 | * when we hit a race during printing. In most cases though, it produces |
667 | * better readable output. 'c' in the record flags mark the first |
668 | * fragment of a line, '+' the following. |
669 | */ |
670 | if (msg->flags & LOG_CONT) |
671 | cont = (prev_flags & LOG_CONT) ? '+' : 'c'; |
672 | |
673 | return scnprintf(buf, size, "%u,%llu,%llu,%c;", |
674 | (msg->facility << 3) | msg->level, seq, ts_usec, cont); |
675 | } |
676 | |
677 | static ssize_t msg_print_ext_body(char *buf, size_t size, |
678 | char *dict, size_t dict_len, |
679 | char *text, size_t text_len) |
680 | { |
681 | char *p = buf, *e = buf + size; |
682 | size_t i; |
683 | |
684 | /* escape non-printable characters */ |
685 | for (i = 0; i < text_len; i++) { |
686 | unsigned char c = text[i]; |
687 | |
688 | if (c < ' ' || c >= 127 || c == '\\') |
689 | p += scnprintf(p, e - p, "\\x%02x", c); |
690 | else |
691 | append_char(&p, e, c); |
692 | } |
693 | append_char(&p, e, '\n'); |
694 | |
695 | if (dict_len) { |
696 | bool line = true; |
697 | |
698 | for (i = 0; i < dict_len; i++) { |
699 | unsigned char c = dict[i]; |
700 | |
701 | if (line) { |
702 | append_char(&p, e, ' '); |
703 | line = false; |
704 | } |
705 | |
706 | if (c == '\0') { |
707 | append_char(&p, e, '\n'); |
708 | line = true; |
709 | continue; |
710 | } |
711 | |
712 | if (c < ' ' || c >= 127 || c == '\\') { |
713 | p += scnprintf(p, e - p, "\\x%02x", c); |
714 | continue; |
715 | } |
716 | |
717 | append_char(&p, e, c); |
718 | } |
719 | append_char(&p, e, '\n'); |
720 | } |
721 | |
722 | return p - buf; |
723 | } |
724 | |
725 | /* /dev/kmsg - userspace message inject/listen interface */ |
726 | struct devkmsg_user { |
727 | u64 seq; |
728 | u32 idx; |
729 | enum log_flags prev; |
730 | struct ratelimit_state rs; |
731 | struct mutex lock; |
732 | char buf[CONSOLE_EXT_LOG_MAX]; |
733 | }; |
734 | |
735 | static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) |
736 | { |
737 | char *buf, *line; |
738 | int level = default_message_loglevel; |
739 | int facility = 1; /* LOG_USER */ |
740 | struct file *file = iocb->ki_filp; |
741 | struct devkmsg_user *user = file->private_data; |
742 | size_t len = iov_iter_count(from); |
743 | ssize_t ret = len; |
744 | |
745 | if (!user || len > LOG_LINE_MAX) |
746 | return -EINVAL; |
747 | |
748 | /* Ignore when user logging is disabled. */ |
749 | if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) |
750 | return len; |
751 | |
752 | /* Ratelimit when not explicitly enabled. */ |
753 | if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) { |
754 | if (!___ratelimit(&user->rs, current->comm)) |
755 | return ret; |
756 | } |
757 | |
758 | buf = kmalloc(len+1, GFP_KERNEL); |
759 | if (buf == NULL) |
760 | return -ENOMEM; |
761 | |
762 | buf[len] = '\0'; |
763 | if (copy_from_iter(buf, len, from) != len) { |
764 | kfree(buf); |
765 | return -EFAULT; |
766 | } |
767 | |
768 | /* |
769 | * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace |
770 | * the decimal value represents 32bit, the lower 3 bit are the log |
771 | * level, the rest are the log facility. |
772 | * |
773 | * If no prefix or no userspace facility is specified, we |
774 | * enforce LOG_USER, to be able to reliably distinguish |
775 | * kernel-generated messages from userspace-injected ones. |
776 | */ |
777 | line = buf; |
778 | if (line[0] == '<') { |
779 | char *endp = NULL; |
780 | unsigned int u; |
781 | |
782 | u = simple_strtoul(line + 1, &endp, 10); |
783 | if (endp && endp[0] == '>') { |
784 | level = LOG_LEVEL(u); |
785 | if (LOG_FACILITY(u) != 0) |
786 | facility = LOG_FACILITY(u); |
787 | endp++; |
788 | len -= endp - line; |
789 | line = endp; |
790 | } |
791 | } |
792 | |
793 | printk_emit(facility, level, NULL, 0, "%s", line); |
794 | kfree(buf); |
795 | return ret; |
796 | } |
797 | |
798 | static ssize_t devkmsg_read(struct file *file, char __user *buf, |
799 | size_t count, loff_t *ppos) |
800 | { |
801 | struct devkmsg_user *user = file->private_data; |
802 | struct printk_log *msg; |
803 | size_t len; |
804 | ssize_t ret; |
805 | |
806 | if (!user) |
807 | return -EBADF; |
808 | |
809 | ret = mutex_lock_interruptible(&user->lock); |
810 | if (ret) |
811 | return ret; |
812 | raw_spin_lock_irq(&logbuf_lock); |
813 | while (user->seq == log_next_seq) { |
814 | if (file->f_flags & O_NONBLOCK) { |
815 | ret = -EAGAIN; |
816 | raw_spin_unlock_irq(&logbuf_lock); |
817 | goto out; |
818 | } |
819 | |
820 | raw_spin_unlock_irq(&logbuf_lock); |
821 | ret = wait_event_interruptible(log_wait, |
822 | user->seq != log_next_seq); |
823 | if (ret) |
824 | goto out; |
825 | raw_spin_lock_irq(&logbuf_lock); |
826 | } |
827 | |
828 | if (user->seq < log_first_seq) { |
829 | /* our last seen message is gone, return error and reset */ |
830 | user->idx = log_first_idx; |
831 | user->seq = log_first_seq; |
832 | ret = -EPIPE; |
833 | raw_spin_unlock_irq(&logbuf_lock); |
834 | goto out; |
835 | } |
836 | |
837 | msg = log_from_idx(user->idx); |
838 | len = msg_print_ext_header(user->buf, sizeof(user->buf), |
839 | msg, user->seq, user->prev); |
840 | len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len, |
841 | log_dict(msg), msg->dict_len, |
842 | log_text(msg), msg->text_len); |
843 | |
844 | user->prev = msg->flags; |
845 | user->idx = log_next(user->idx); |
846 | user->seq++; |
847 | raw_spin_unlock_irq(&logbuf_lock); |
848 | |
849 | if (len > count) { |
850 | ret = -EINVAL; |
851 | goto out; |
852 | } |
853 | |
854 | if (copy_to_user(buf, user->buf, len)) { |
855 | ret = -EFAULT; |
856 | goto out; |
857 | } |
858 | ret = len; |
859 | out: |
860 | mutex_unlock(&user->lock); |
861 | return ret; |
862 | } |
863 | |
864 | static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) |
865 | { |
866 | struct devkmsg_user *user = file->private_data; |
867 | loff_t ret = 0; |
868 | |
869 | if (!user) |
870 | return -EBADF; |
871 | if (offset) |
872 | return -ESPIPE; |
873 | |
874 | raw_spin_lock_irq(&logbuf_lock); |
875 | switch (whence) { |
876 | case SEEK_SET: |
877 | /* the first record */ |
878 | user->idx = log_first_idx; |
879 | user->seq = log_first_seq; |
880 | break; |
881 | case SEEK_DATA: |
882 | /* |
883 | * The first record after the last SYSLOG_ACTION_CLEAR, |
884 | * like issued by 'dmesg -c'. Reading /dev/kmsg itself |
885 | * changes no global state, and does not clear anything. |
886 | */ |
887 | user->idx = clear_idx; |
888 | user->seq = clear_seq; |
889 | break; |
890 | case SEEK_END: |
891 | /* after the last record */ |
892 | user->idx = log_next_idx; |
893 | user->seq = log_next_seq; |
894 | break; |
895 | default: |
896 | ret = -EINVAL; |
897 | } |
898 | raw_spin_unlock_irq(&logbuf_lock); |
899 | return ret; |
900 | } |
901 | |
902 | static unsigned int devkmsg_poll(struct file *file, poll_table *wait) |
903 | { |
904 | struct devkmsg_user *user = file->private_data; |
905 | int ret = 0; |
906 | |
907 | if (!user) |
908 | return POLLERR|POLLNVAL; |
909 | |
910 | poll_wait(file, &log_wait, wait); |
911 | |
912 | raw_spin_lock_irq(&logbuf_lock); |
913 | if (user->seq < log_next_seq) { |
914 | /* return error when data has vanished underneath us */ |
915 | if (user->seq < log_first_seq) |
916 | ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI; |
917 | else |
918 | ret = POLLIN|POLLRDNORM; |
919 | } |
920 | raw_spin_unlock_irq(&logbuf_lock); |
921 | |
922 | return ret; |
923 | } |
924 | |
925 | static int devkmsg_open(struct inode *inode, struct file *file) |
926 | { |
927 | struct devkmsg_user *user; |
928 | int err; |
929 | |
930 | if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) |
931 | return -EPERM; |
932 | |
933 | /* write-only does not need any file context */ |
934 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { |
935 | err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, |
936 | SYSLOG_FROM_READER); |
937 | if (err) |
938 | return err; |
939 | } |
940 | |
941 | user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); |
942 | if (!user) |
943 | return -ENOMEM; |
944 | |
945 | ratelimit_default_init(&user->rs); |
946 | ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE); |
947 | |
948 | mutex_init(&user->lock); |
949 | |
950 | raw_spin_lock_irq(&logbuf_lock); |
951 | user->idx = log_first_idx; |
952 | user->seq = log_first_seq; |
953 | raw_spin_unlock_irq(&logbuf_lock); |
954 | |
955 | file->private_data = user; |
956 | return 0; |
957 | } |
958 | |
959 | static int devkmsg_release(struct inode *inode, struct file *file) |
960 | { |
961 | struct devkmsg_user *user = file->private_data; |
962 | |
963 | if (!user) |
964 | return 0; |
965 | |
966 | ratelimit_state_exit(&user->rs); |
967 | |
968 | mutex_destroy(&user->lock); |
969 | kfree(user); |
970 | return 0; |
971 | } |
972 | |
973 | const struct file_operations kmsg_fops = { |
974 | .open = devkmsg_open, |
975 | .read = devkmsg_read, |
976 | .write_iter = devkmsg_write, |
977 | .llseek = devkmsg_llseek, |
978 | .poll = devkmsg_poll, |
979 | .release = devkmsg_release, |
980 | }; |
981 | |
982 | #ifdef CONFIG_KEXEC_CORE |
983 | /* |
984 | * This appends the listed symbols to /proc/vmcore |
985 | * |
986 | * /proc/vmcore is used by various utilities, like crash and makedumpfile to |
987 | * obtain access to symbols that are otherwise very difficult to locate. These |
988 | * symbols are specifically used so that utilities can access and extract the |
989 | * dmesg log from a vmcore file after a crash. |
990 | */ |
991 | void log_buf_kexec_setup(void) |
992 | { |
993 | VMCOREINFO_SYMBOL(log_buf); |
994 | VMCOREINFO_SYMBOL(log_buf_len); |
995 | VMCOREINFO_SYMBOL(log_first_idx); |
996 | VMCOREINFO_SYMBOL(clear_idx); |
997 | VMCOREINFO_SYMBOL(log_next_idx); |
998 | /* |
999 | * Export struct printk_log size and field offsets. User space tools can |
1000 | * parse it and detect any changes to structure down the line. |
1001 | */ |
1002 | VMCOREINFO_STRUCT_SIZE(printk_log); |
1003 | VMCOREINFO_OFFSET(printk_log, ts_nsec); |
1004 | VMCOREINFO_OFFSET(printk_log, len); |
1005 | VMCOREINFO_OFFSET(printk_log, text_len); |
1006 | VMCOREINFO_OFFSET(printk_log, dict_len); |
1007 | } |
1008 | #endif |
1009 | |
1010 | /* requested log_buf_len from kernel cmdline */ |
1011 | static unsigned long __initdata new_log_buf_len; |
1012 | |
1013 | /* we practice scaling the ring buffer by powers of 2 */ |
1014 | static void __init log_buf_len_update(unsigned size) |
1015 | { |
1016 | if (size) |
1017 | size = roundup_pow_of_two(size); |
1018 | if (size > log_buf_len) |
1019 | new_log_buf_len = size; |
1020 | } |
1021 | |
1022 | /* save requested log_buf_len since it's too early to process it */ |
1023 | static int __init log_buf_len_setup(char *str) |
1024 | { |
1025 | unsigned size = memparse(str, &str); |
1026 | |
1027 | log_buf_len_update(size); |
1028 | |
1029 | return 0; |
1030 | } |
1031 | early_param("log_buf_len", log_buf_len_setup); |
1032 | |
1033 | #ifdef CONFIG_SMP |
1034 | #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT) |
1035 | |
1036 | static void __init log_buf_add_cpu(void) |
1037 | { |
1038 | unsigned int cpu_extra; |
1039 | |
1040 | /* |
1041 | * archs should set up cpu_possible_bits properly with |
1042 | * set_cpu_possible() after setup_arch() but just in |
1043 | * case lets ensure this is valid. |
1044 | */ |
1045 | if (num_possible_cpus() == 1) |
1046 | return; |
1047 | |
1048 | cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN; |
1049 | |
1050 | /* by default this will only continue through for large > 64 CPUs */ |
1051 | if (cpu_extra <= __LOG_BUF_LEN / 2) |
1052 | return; |
1053 | |
1054 | pr_info("log_buf_len individual max cpu contribution: %d bytes\n", |
1055 | __LOG_CPU_MAX_BUF_LEN); |
1056 | pr_info("log_buf_len total cpu_extra contributions: %d bytes\n", |
1057 | cpu_extra); |
1058 | pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN); |
1059 | |
1060 | log_buf_len_update(cpu_extra + __LOG_BUF_LEN); |
1061 | } |
1062 | #else /* !CONFIG_SMP */ |
1063 | static inline void log_buf_add_cpu(void) {} |
1064 | #endif /* CONFIG_SMP */ |
1065 | |
1066 | void __init setup_log_buf(int early) |
1067 | { |
1068 | unsigned long flags; |
1069 | char *new_log_buf; |
1070 | int free; |
1071 | |
1072 | if (log_buf != __log_buf) |
1073 | return; |
1074 | |
1075 | if (!early && !new_log_buf_len) |
1076 | log_buf_add_cpu(); |
1077 | |
1078 | if (!new_log_buf_len) |
1079 | return; |
1080 | |
1081 | if (early) { |
1082 | new_log_buf = |
1083 | memblock_virt_alloc(new_log_buf_len, LOG_ALIGN); |
1084 | } else { |
1085 | new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len, |
1086 | LOG_ALIGN); |
1087 | } |
1088 | |
1089 | if (unlikely(!new_log_buf)) { |
1090 | pr_err("log_buf_len: %ld bytes not available\n", |
1091 | new_log_buf_len); |
1092 | return; |
1093 | } |
1094 | |
1095 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
1096 | log_buf_len = new_log_buf_len; |
1097 | log_buf = new_log_buf; |
1098 | new_log_buf_len = 0; |
1099 | free = __LOG_BUF_LEN - log_next_idx; |
1100 | memcpy(log_buf, __log_buf, __LOG_BUF_LEN); |
1101 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
1102 | |
1103 | pr_info("log_buf_len: %d bytes\n", log_buf_len); |
1104 | pr_info("early log buf free: %d(%d%%)\n", |
1105 | free, (free * 100) / __LOG_BUF_LEN); |
1106 | } |
1107 | |
1108 | static bool __read_mostly ignore_loglevel; |
1109 | |
1110 | static int __init ignore_loglevel_setup(char *str) |
1111 | { |
1112 | ignore_loglevel = true; |
1113 | pr_info("debug: ignoring loglevel setting.\n"); |
1114 | |
1115 | return 0; |
1116 | } |
1117 | |
1118 | early_param("ignore_loglevel", ignore_loglevel_setup); |
1119 | module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); |
1120 | MODULE_PARM_DESC(ignore_loglevel, |
1121 | "ignore loglevel setting (prints all kernel messages to the console)"); |
1122 | |
1123 | static bool suppress_message_printing(int level) |
1124 | { |
1125 | return (level >= console_loglevel && !ignore_loglevel); |
1126 | } |
1127 | |
1128 | #ifdef CONFIG_BOOT_PRINTK_DELAY |
1129 | |
1130 | static int boot_delay; /* msecs delay after each printk during bootup */ |
1131 | static unsigned long long loops_per_msec; /* based on boot_delay */ |
1132 | |
1133 | static int __init boot_delay_setup(char *str) |
1134 | { |
1135 | unsigned long lpj; |
1136 | |
1137 | lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ |
1138 | loops_per_msec = (unsigned long long)lpj / 1000 * HZ; |
1139 | |
1140 | get_option(&str, &boot_delay); |
1141 | if (boot_delay > 10 * 1000) |
1142 | boot_delay = 0; |
1143 | |
1144 | pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " |
1145 | "HZ: %d, loops_per_msec: %llu\n", |
1146 | boot_delay, preset_lpj, lpj, HZ, loops_per_msec); |
1147 | return 0; |
1148 | } |
1149 | early_param("boot_delay", boot_delay_setup); |
1150 | |
1151 | static void boot_delay_msec(int level) |
1152 | { |
1153 | unsigned long long k; |
1154 | unsigned long timeout; |
1155 | |
1156 | if ((boot_delay == 0 || system_state != SYSTEM_BOOTING) |
1157 | || suppress_message_printing(level)) { |
1158 | return; |
1159 | } |
1160 | |
1161 | k = (unsigned long long)loops_per_msec * boot_delay; |
1162 | |
1163 | timeout = jiffies + msecs_to_jiffies(boot_delay); |
1164 | while (k) { |
1165 | k--; |
1166 | cpu_relax(); |
1167 | /* |
1168 | * use (volatile) jiffies to prevent |
1169 | * compiler reduction; loop termination via jiffies |
1170 | * is secondary and may or may not happen. |
1171 | */ |
1172 | if (time_after(jiffies, timeout)) |
1173 | break; |
1174 | touch_nmi_watchdog(); |
1175 | } |
1176 | } |
1177 | #else |
1178 | static inline void boot_delay_msec(int level) |
1179 | { |
1180 | } |
1181 | #endif |
1182 | |
1183 | static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME); |
1184 | module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); |
1185 | |
1186 | static size_t print_time(u64 ts, char *buf) |
1187 | { |
1188 | unsigned long rem_nsec; |
1189 | |
1190 | if (!printk_time) |
1191 | return 0; |
1192 | |
1193 | rem_nsec = do_div(ts, 1000000000); |
1194 | |
1195 | if (!buf) |
1196 | return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts); |
1197 | |
1198 | #if defined(CONFIG_SMP) && defined(CONFIG_AMLOGIC_DRIVER) |
1199 | return sprintf(buf, "[%5lu.%06lu@%d] ", |
1200 | (unsigned long)ts, rem_nsec / 1000, current_cpu); |
1201 | #else |
1202 | return sprintf(buf, "[%5lu.%06lu] ", |
1203 | (unsigned long)ts, rem_nsec / 1000); |
1204 | #endif |
1205 | } |
1206 | |
1207 | static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf) |
1208 | { |
1209 | size_t len = 0; |
1210 | unsigned int prefix = (msg->facility << 3) | msg->level; |
1211 | |
1212 | if (syslog) { |
1213 | if (buf) { |
1214 | len += sprintf(buf, "<%u>", prefix); |
1215 | } else { |
1216 | len += 3; |
1217 | if (prefix > 999) |
1218 | len += 3; |
1219 | else if (prefix > 99) |
1220 | len += 2; |
1221 | else if (prefix > 9) |
1222 | len++; |
1223 | } |
1224 | } |
1225 | #ifdef CONFIG_AMLOGIC_DRIVER |
1226 | current_cpu = msg->cpu; |
1227 | #endif |
1228 | len += print_time(msg->ts_nsec, buf ? buf + len : NULL); |
1229 | return len; |
1230 | } |
1231 | |
1232 | static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, |
1233 | bool syslog, char *buf, size_t size) |
1234 | { |
1235 | const char *text = log_text(msg); |
1236 | size_t text_size = msg->text_len; |
1237 | bool prefix = true; |
1238 | bool newline = true; |
1239 | size_t len = 0; |
1240 | |
1241 | if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX)) |
1242 | prefix = false; |
1243 | |
1244 | if (msg->flags & LOG_CONT) { |
1245 | if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE)) |
1246 | prefix = false; |
1247 | |
1248 | if (!(msg->flags & LOG_NEWLINE)) |
1249 | newline = false; |
1250 | } |
1251 | |
1252 | do { |
1253 | const char *next = memchr(text, '\n', text_size); |
1254 | size_t text_len; |
1255 | |
1256 | if (next) { |
1257 | text_len = next - text; |
1258 | next++; |
1259 | text_size -= next - text; |
1260 | } else { |
1261 | text_len = text_size; |
1262 | } |
1263 | |
1264 | if (buf) { |
1265 | if (print_prefix(msg, syslog, NULL) + |
1266 | text_len + 1 >= size - len) |
1267 | break; |
1268 | |
1269 | if (prefix) |
1270 | len += print_prefix(msg, syslog, buf + len); |
1271 | memcpy(buf + len, text, text_len); |
1272 | len += text_len; |
1273 | if (next || newline) |
1274 | buf[len++] = '\n'; |
1275 | } else { |
1276 | /* SYSLOG_ACTION_* buffer size only calculation */ |
1277 | if (prefix) |
1278 | len += print_prefix(msg, syslog, NULL); |
1279 | len += text_len; |
1280 | if (next || newline) |
1281 | len++; |
1282 | } |
1283 | |
1284 | prefix = true; |
1285 | text = next; |
1286 | } while (text); |
1287 | |
1288 | return len; |
1289 | } |
1290 | |
1291 | static int syslog_print(char __user *buf, int size) |
1292 | { |
1293 | char *text; |
1294 | struct printk_log *msg; |
1295 | int len = 0; |
1296 | |
1297 | text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); |
1298 | if (!text) |
1299 | return -ENOMEM; |
1300 | |
1301 | while (size > 0) { |
1302 | size_t n; |
1303 | size_t skip; |
1304 | |
1305 | raw_spin_lock_irq(&logbuf_lock); |
1306 | if (syslog_seq < log_first_seq) { |
1307 | /* messages are gone, move to first one */ |
1308 | syslog_seq = log_first_seq; |
1309 | syslog_idx = log_first_idx; |
1310 | syslog_prev = 0; |
1311 | syslog_partial = 0; |
1312 | } |
1313 | if (syslog_seq == log_next_seq) { |
1314 | raw_spin_unlock_irq(&logbuf_lock); |
1315 | break; |
1316 | } |
1317 | |
1318 | skip = syslog_partial; |
1319 | msg = log_from_idx(syslog_idx); |
1320 | n = msg_print_text(msg, syslog_prev, true, text, |
1321 | LOG_LINE_MAX + PREFIX_MAX); |
1322 | if (n - syslog_partial <= size) { |
1323 | /* message fits into buffer, move forward */ |
1324 | syslog_idx = log_next(syslog_idx); |
1325 | syslog_seq++; |
1326 | syslog_prev = msg->flags; |
1327 | n -= syslog_partial; |
1328 | syslog_partial = 0; |
1329 | } else if (!len){ |
1330 | /* partial read(), remember position */ |
1331 | n = size; |
1332 | syslog_partial += n; |
1333 | } else |
1334 | n = 0; |
1335 | raw_spin_unlock_irq(&logbuf_lock); |
1336 | |
1337 | if (!n) |
1338 | break; |
1339 | |
1340 | if (copy_to_user(buf, text + skip, n)) { |
1341 | if (!len) |
1342 | len = -EFAULT; |
1343 | break; |
1344 | } |
1345 | |
1346 | len += n; |
1347 | size -= n; |
1348 | buf += n; |
1349 | } |
1350 | |
1351 | kfree(text); |
1352 | return len; |
1353 | } |
1354 | |
1355 | static int syslog_print_all(char __user *buf, int size, bool clear) |
1356 | { |
1357 | char *text; |
1358 | int len = 0; |
1359 | |
1360 | text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); |
1361 | if (!text) |
1362 | return -ENOMEM; |
1363 | |
1364 | raw_spin_lock_irq(&logbuf_lock); |
1365 | if (buf) { |
1366 | u64 next_seq; |
1367 | u64 seq; |
1368 | u32 idx; |
1369 | enum log_flags prev; |
1370 | |
1371 | /* |
1372 | * Find first record that fits, including all following records, |
1373 | * into the user-provided buffer for this dump. |
1374 | */ |
1375 | seq = clear_seq; |
1376 | idx = clear_idx; |
1377 | prev = 0; |
1378 | while (seq < log_next_seq) { |
1379 | struct printk_log *msg = log_from_idx(idx); |
1380 | |
1381 | len += msg_print_text(msg, prev, true, NULL, 0); |
1382 | prev = msg->flags; |
1383 | idx = log_next(idx); |
1384 | seq++; |
1385 | } |
1386 | |
1387 | /* move first record forward until length fits into the buffer */ |
1388 | seq = clear_seq; |
1389 | idx = clear_idx; |
1390 | prev = 0; |
1391 | while (len > size && seq < log_next_seq) { |
1392 | struct printk_log *msg = log_from_idx(idx); |
1393 | |
1394 | len -= msg_print_text(msg, prev, true, NULL, 0); |
1395 | prev = msg->flags; |
1396 | idx = log_next(idx); |
1397 | seq++; |
1398 | } |
1399 | |
1400 | /* last message fitting into this dump */ |
1401 | next_seq = log_next_seq; |
1402 | |
1403 | len = 0; |
1404 | while (len >= 0 && seq < next_seq) { |
1405 | struct printk_log *msg = log_from_idx(idx); |
1406 | int textlen; |
1407 | |
1408 | textlen = msg_print_text(msg, prev, true, text, |
1409 | LOG_LINE_MAX + PREFIX_MAX); |
1410 | if (textlen < 0) { |
1411 | len = textlen; |
1412 | break; |
1413 | } |
1414 | idx = log_next(idx); |
1415 | seq++; |
1416 | prev = msg->flags; |
1417 | |
1418 | raw_spin_unlock_irq(&logbuf_lock); |
1419 | if (copy_to_user(buf + len, text, textlen)) |
1420 | len = -EFAULT; |
1421 | else |
1422 | len += textlen; |
1423 | raw_spin_lock_irq(&logbuf_lock); |
1424 | |
1425 | if (seq < log_first_seq) { |
1426 | /* messages are gone, move to next one */ |
1427 | seq = log_first_seq; |
1428 | idx = log_first_idx; |
1429 | prev = 0; |
1430 | } |
1431 | } |
1432 | } |
1433 | |
1434 | if (clear) { |
1435 | clear_seq = log_next_seq; |
1436 | clear_idx = log_next_idx; |
1437 | } |
1438 | raw_spin_unlock_irq(&logbuf_lock); |
1439 | |
1440 | kfree(text); |
1441 | return len; |
1442 | } |
1443 | |
1444 | int do_syslog(int type, char __user *buf, int len, int source) |
1445 | { |
1446 | bool clear = false; |
1447 | static int saved_console_loglevel = LOGLEVEL_DEFAULT; |
1448 | int error; |
1449 | |
1450 | error = check_syslog_permissions(type, source); |
1451 | if (error) |
1452 | goto out; |
1453 | |
1454 | switch (type) { |
1455 | case SYSLOG_ACTION_CLOSE: /* Close log */ |
1456 | break; |
1457 | case SYSLOG_ACTION_OPEN: /* Open log */ |
1458 | break; |
1459 | case SYSLOG_ACTION_READ: /* Read from log */ |
1460 | error = -EINVAL; |
1461 | if (!buf || len < 0) |
1462 | goto out; |
1463 | error = 0; |
1464 | if (!len) |
1465 | goto out; |
1466 | if (!access_ok(VERIFY_WRITE, buf, len)) { |
1467 | error = -EFAULT; |
1468 | goto out; |
1469 | } |
1470 | error = wait_event_interruptible(log_wait, |
1471 | syslog_seq != log_next_seq); |
1472 | if (error) |
1473 | goto out; |
1474 | error = syslog_print(buf, len); |
1475 | break; |
1476 | /* Read/clear last kernel messages */ |
1477 | case SYSLOG_ACTION_READ_CLEAR: |
1478 | clear = true; |
1479 | /* FALL THRU */ |
1480 | /* Read last kernel messages */ |
1481 | case SYSLOG_ACTION_READ_ALL: |
1482 | error = -EINVAL; |
1483 | if (!buf || len < 0) |
1484 | goto out; |
1485 | error = 0; |
1486 | if (!len) |
1487 | goto out; |
1488 | if (!access_ok(VERIFY_WRITE, buf, len)) { |
1489 | error = -EFAULT; |
1490 | goto out; |
1491 | } |
1492 | error = syslog_print_all(buf, len, clear); |
1493 | break; |
1494 | /* Clear ring buffer */ |
1495 | case SYSLOG_ACTION_CLEAR: |
1496 | syslog_print_all(NULL, 0, true); |
1497 | break; |
1498 | /* Disable logging to console */ |
1499 | case SYSLOG_ACTION_CONSOLE_OFF: |
1500 | if (saved_console_loglevel == LOGLEVEL_DEFAULT) |
1501 | saved_console_loglevel = console_loglevel; |
1502 | console_loglevel = minimum_console_loglevel; |
1503 | break; |
1504 | /* Enable logging to console */ |
1505 | case SYSLOG_ACTION_CONSOLE_ON: |
1506 | if (saved_console_loglevel != LOGLEVEL_DEFAULT) { |
1507 | console_loglevel = saved_console_loglevel; |
1508 | saved_console_loglevel = LOGLEVEL_DEFAULT; |
1509 | } |
1510 | break; |
1511 | /* Set level of messages printed to console */ |
1512 | case SYSLOG_ACTION_CONSOLE_LEVEL: |
1513 | error = -EINVAL; |
1514 | if (len < 1 || len > 8) |
1515 | goto out; |
1516 | if (len < minimum_console_loglevel) |
1517 | len = minimum_console_loglevel; |
1518 | console_loglevel = len; |
1519 | /* Implicitly re-enable logging to console */ |
1520 | saved_console_loglevel = LOGLEVEL_DEFAULT; |
1521 | error = 0; |
1522 | break; |
1523 | /* Number of chars in the log buffer */ |
1524 | case SYSLOG_ACTION_SIZE_UNREAD: |
1525 | raw_spin_lock_irq(&logbuf_lock); |
1526 | if (syslog_seq < log_first_seq) { |
1527 | /* messages are gone, move to first one */ |
1528 | syslog_seq = log_first_seq; |
1529 | syslog_idx = log_first_idx; |
1530 | syslog_prev = 0; |
1531 | syslog_partial = 0; |
1532 | } |
1533 | if (source == SYSLOG_FROM_PROC) { |
1534 | /* |
1535 | * Short-cut for poll(/"proc/kmsg") which simply checks |
1536 | * for pending data, not the size; return the count of |
1537 | * records, not the length. |
1538 | */ |
1539 | error = log_next_seq - syslog_seq; |
1540 | } else { |
1541 | u64 seq = syslog_seq; |
1542 | u32 idx = syslog_idx; |
1543 | enum log_flags prev = syslog_prev; |
1544 | |
1545 | error = 0; |
1546 | while (seq < log_next_seq) { |
1547 | struct printk_log *msg = log_from_idx(idx); |
1548 | |
1549 | error += msg_print_text(msg, prev, true, NULL, 0); |
1550 | idx = log_next(idx); |
1551 | seq++; |
1552 | prev = msg->flags; |
1553 | } |
1554 | error -= syslog_partial; |
1555 | } |
1556 | raw_spin_unlock_irq(&logbuf_lock); |
1557 | break; |
1558 | /* Size of the log buffer */ |
1559 | case SYSLOG_ACTION_SIZE_BUFFER: |
1560 | error = log_buf_len; |
1561 | break; |
1562 | default: |
1563 | error = -EINVAL; |
1564 | break; |
1565 | } |
1566 | out: |
1567 | return error; |
1568 | } |
1569 | |
1570 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) |
1571 | { |
1572 | return do_syslog(type, buf, len, SYSLOG_FROM_READER); |
1573 | } |
1574 | |
1575 | /* |
1576 | * Call the console drivers, asking them to write out |
1577 | * log_buf[start] to log_buf[end - 1]. |
1578 | * The console_lock must be held. |
1579 | */ |
1580 | static void call_console_drivers(int level, |
1581 | const char *ext_text, size_t ext_len, |
1582 | const char *text, size_t len) |
1583 | { |
1584 | struct console *con; |
1585 | |
1586 | trace_console_rcuidle(text, len); |
1587 | |
1588 | if (!console_drivers) |
1589 | return; |
1590 | |
1591 | for_each_console(con) { |
1592 | if (exclusive_console && con != exclusive_console) |
1593 | continue; |
1594 | if (!(con->flags & CON_ENABLED)) |
1595 | continue; |
1596 | if (!con->write) |
1597 | continue; |
1598 | if (!cpu_online(smp_processor_id()) && |
1599 | !(con->flags & CON_ANYTIME)) |
1600 | continue; |
1601 | if (con->flags & CON_EXTENDED) |
1602 | con->write(con, ext_text, ext_len); |
1603 | else |
1604 | con->write(con, text, len); |
1605 | } |
1606 | } |
1607 | |
1608 | /* |
1609 | * Zap console related locks when oopsing. |
1610 | * To leave time for slow consoles to print a full oops, |
1611 | * only zap at most once every 30 seconds. |
1612 | */ |
1613 | static void zap_locks(void) |
1614 | { |
1615 | static unsigned long oops_timestamp; |
1616 | |
1617 | if (time_after_eq(jiffies, oops_timestamp) && |
1618 | !time_after(jiffies, oops_timestamp + 30 * HZ)) |
1619 | return; |
1620 | |
1621 | oops_timestamp = jiffies; |
1622 | |
1623 | debug_locks_off(); |
1624 | /* If a crash is occurring, make sure we can't deadlock */ |
1625 | raw_spin_lock_init(&logbuf_lock); |
1626 | /* And make sure that we print immediately */ |
1627 | sema_init(&console_sem, 1); |
1628 | } |
1629 | |
1630 | int printk_delay_msec __read_mostly; |
1631 | |
1632 | static inline void printk_delay(void) |
1633 | { |
1634 | if (unlikely(printk_delay_msec)) { |
1635 | int m = printk_delay_msec; |
1636 | |
1637 | while (m--) { |
1638 | mdelay(1); |
1639 | touch_nmi_watchdog(); |
1640 | } |
1641 | } |
1642 | } |
1643 | |
1644 | /* |
1645 | * Continuation lines are buffered, and not committed to the record buffer |
1646 | * until the line is complete, or a race forces it. The line fragments |
1647 | * though, are printed immediately to the consoles to ensure everything has |
1648 | * reached the console in case of a kernel crash. |
1649 | */ |
1650 | static struct cont { |
1651 | char buf[LOG_LINE_MAX]; |
1652 | size_t len; /* length == 0 means unused buffer */ |
1653 | size_t cons; /* bytes written to console */ |
1654 | struct task_struct *owner; /* task of first print*/ |
1655 | u64 ts_nsec; /* time of first print */ |
1656 | u8 level; /* log level of first message */ |
1657 | u8 facility; /* log facility of first message */ |
1658 | enum log_flags flags; /* prefix, newline flags */ |
1659 | bool flushed:1; /* buffer sealed and committed */ |
1660 | } cont; |
1661 | |
1662 | static void cont_flush(void) |
1663 | { |
1664 | if (cont.flushed) |
1665 | return; |
1666 | if (cont.len == 0) |
1667 | return; |
1668 | if (cont.cons) { |
1669 | /* |
1670 | * If a fragment of this line was directly flushed to the |
1671 | * console; wait for the console to pick up the rest of the |
1672 | * line. LOG_NOCONS suppresses a duplicated output. |
1673 | */ |
1674 | log_store(cont.facility, cont.level, cont.flags | LOG_NOCONS, |
1675 | cont.ts_nsec, NULL, 0, cont.buf, cont.len); |
1676 | cont.flushed = true; |
1677 | } else { |
1678 | /* |
1679 | * If no fragment of this line ever reached the console, |
1680 | * just submit it to the store and free the buffer. |
1681 | */ |
1682 | log_store(cont.facility, cont.level, cont.flags, 0, |
1683 | NULL, 0, cont.buf, cont.len); |
1684 | cont.len = 0; |
1685 | } |
1686 | } |
1687 | |
1688 | static bool cont_add(int facility, int level, enum log_flags flags, const char *text, size_t len) |
1689 | { |
1690 | if (cont.len && cont.flushed) |
1691 | return false; |
1692 | |
1693 | /* |
1694 | * If ext consoles are present, flush and skip in-kernel |
1695 | * continuation. See nr_ext_console_drivers definition. Also, if |
1696 | * the line gets too long, split it up in separate records. |
1697 | */ |
1698 | if (nr_ext_console_drivers || cont.len + len > sizeof(cont.buf)) { |
1699 | cont_flush(); |
1700 | return false; |
1701 | } |
1702 | |
1703 | if (!cont.len) { |
1704 | cont.facility = facility; |
1705 | cont.level = level; |
1706 | cont.owner = current; |
1707 | cont.ts_nsec = local_clock(); |
1708 | cont.flags = flags; |
1709 | cont.cons = 0; |
1710 | cont.flushed = false; |
1711 | } |
1712 | |
1713 | memcpy(cont.buf + cont.len, text, len); |
1714 | cont.len += len; |
1715 | |
1716 | // The original flags come from the first line, |
1717 | // but later continuations can add a newline. |
1718 | if (flags & LOG_NEWLINE) { |
1719 | cont.flags |= LOG_NEWLINE; |
1720 | cont_flush(); |
1721 | } |
1722 | |
1723 | if (cont.len > (sizeof(cont.buf) * 80) / 100) |
1724 | cont_flush(); |
1725 | |
1726 | return true; |
1727 | } |
1728 | |
1729 | static size_t cont_print_text(char *text, size_t size) |
1730 | { |
1731 | size_t textlen = 0; |
1732 | size_t len; |
1733 | |
1734 | if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) { |
1735 | textlen += print_time(cont.ts_nsec, text); |
1736 | size -= textlen; |
1737 | } |
1738 | |
1739 | len = cont.len - cont.cons; |
1740 | if (len > 0) { |
1741 | if (len+1 > size) |
1742 | len = size-1; |
1743 | memcpy(text + textlen, cont.buf + cont.cons, len); |
1744 | textlen += len; |
1745 | cont.cons = cont.len; |
1746 | } |
1747 | |
1748 | if (cont.flushed) { |
1749 | if (cont.flags & LOG_NEWLINE) |
1750 | text[textlen++] = '\n'; |
1751 | /* got everything, release buffer */ |
1752 | cont.len = 0; |
1753 | } |
1754 | return textlen; |
1755 | } |
1756 | |
1757 | #define AML_LOSE_CONTLINE_DEF 1 |
1758 | |
1759 | static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len) |
1760 | { |
1761 | /* |
1762 | * If an earlier line was buffered, and we're a continuation |
1763 | * write from the same process, try to add it to the buffer. |
1764 | */ |
1765 | if (cont.len) { |
1766 | #if (AML_LOSE_CONTLINE_DEF == 1) |
1767 | if (cont.owner == current && !(lflags & LOG_PREFIX)) { |
1768 | #else |
1769 | if (cont.owner == current && (lflags & LOG_CONT)) { |
1770 | #endif |
1771 | if (cont_add(facility, level, lflags, text, text_len)) |
1772 | return text_len; |
1773 | } |
1774 | /* Otherwise, make sure it's flushed */ |
1775 | cont_flush(); |
1776 | } |
1777 | |
1778 | /* Skip empty continuation lines that couldn't be added - they just flush */ |
1779 | if (!text_len && (lflags & LOG_CONT)) |
1780 | return 0; |
1781 | |
1782 | /* If it doesn't end in a newline, try to buffer the current line */ |
1783 | if (!(lflags & LOG_NEWLINE)) { |
1784 | if (cont_add(facility, level, lflags, text, text_len)) |
1785 | return text_len; |
1786 | } |
1787 | |
1788 | /* Store it in the record log */ |
1789 | return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len); |
1790 | } |
1791 | |
1792 | asmlinkage int vprintk_emit(int facility, int level, |
1793 | const char *dict, size_t dictlen, |
1794 | const char *fmt, va_list args) |
1795 | { |
1796 | static bool recursion_bug; |
1797 | static char textbuf[LOG_LINE_MAX]; |
1798 | char *text = textbuf; |
1799 | size_t text_len = 0; |
1800 | enum log_flags lflags = 0; |
1801 | unsigned long flags; |
1802 | int this_cpu; |
1803 | int printed_len = 0; |
1804 | int nmi_message_lost; |
1805 | bool in_sched = false; |
1806 | /* cpu currently holding logbuf_lock in this function */ |
1807 | static unsigned int logbuf_cpu = UINT_MAX; |
1808 | |
1809 | if (level == LOGLEVEL_SCHED) { |
1810 | level = LOGLEVEL_DEFAULT; |
1811 | in_sched = true; |
1812 | } |
1813 | |
1814 | boot_delay_msec(level); |
1815 | printk_delay(); |
1816 | |
1817 | local_irq_save(flags); |
1818 | this_cpu = smp_processor_id(); |
1819 | |
1820 | /* |
1821 | * Ouch, printk recursed into itself! |
1822 | */ |
1823 | if (unlikely(logbuf_cpu == this_cpu)) { |
1824 | /* |
1825 | * If a crash is occurring during printk() on this CPU, |
1826 | * then try to get the crash message out but make sure |
1827 | * we can't deadlock. Otherwise just return to avoid the |
1828 | * recursion and return - but flag the recursion so that |
1829 | * it can be printed at the next appropriate moment: |
1830 | */ |
1831 | if (!oops_in_progress && !lockdep_recursing(current)) { |
1832 | recursion_bug = true; |
1833 | local_irq_restore(flags); |
1834 | return 0; |
1835 | } |
1836 | zap_locks(); |
1837 | } |
1838 | |
1839 | lockdep_off(); |
1840 | /* This stops the holder of console_sem just where we want him */ |
1841 | raw_spin_lock(&logbuf_lock); |
1842 | logbuf_cpu = this_cpu; |
1843 | |
1844 | if (unlikely(recursion_bug)) { |
1845 | static const char recursion_msg[] = |
1846 | "BUG: recent printk recursion!"; |
1847 | |
1848 | recursion_bug = false; |
1849 | /* emit KERN_CRIT message */ |
1850 | printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0, |
1851 | NULL, 0, recursion_msg, |
1852 | strlen(recursion_msg)); |
1853 | } |
1854 | |
1855 | nmi_message_lost = get_nmi_message_lost(); |
1856 | if (unlikely(nmi_message_lost)) { |
1857 | text_len = scnprintf(textbuf, sizeof(textbuf), |
1858 | "BAD LUCK: lost %d message(s) from NMI context!", |
1859 | nmi_message_lost); |
1860 | printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0, |
1861 | NULL, 0, textbuf, text_len); |
1862 | } |
1863 | |
1864 | /* |
1865 | * The printf needs to come first; we need the syslog |
1866 | * prefix which might be passed-in as a parameter. |
1867 | */ |
1868 | text_len = vscnprintf(text, sizeof(textbuf), fmt, args); |
1869 | |
1870 | /* mark and strip a trailing newline */ |
1871 | if (text_len && text[text_len-1] == '\n') { |
1872 | text_len--; |
1873 | lflags |= LOG_NEWLINE; |
1874 | } |
1875 | |
1876 | /* strip kernel syslog prefix and extract log level or control flags */ |
1877 | if (facility == 0) { |
1878 | int kern_level; |
1879 | |
1880 | while ((kern_level = printk_get_level(text)) != 0) { |
1881 | switch (kern_level) { |
1882 | case '0' ... '7': |
1883 | if (level == LOGLEVEL_DEFAULT) |
1884 | level = kern_level - '0'; |
1885 | /* fallthrough */ |
1886 | case 'd': /* KERN_DEFAULT */ |
1887 | lflags |= LOG_PREFIX; |
1888 | break; |
1889 | case 'c': /* KERN_CONT */ |
1890 | lflags |= LOG_CONT; |
1891 | } |
1892 | |
1893 | text_len -= 2; |
1894 | text += 2; |
1895 | } |
1896 | } |
1897 | |
1898 | #ifdef CONFIG_EARLY_PRINTK_DIRECT |
1899 | printascii(text); |
1900 | #endif |
1901 | |
1902 | if (level == LOGLEVEL_DEFAULT) |
1903 | level = default_message_loglevel; |
1904 | |
1905 | if (dict) |
1906 | lflags |= LOG_PREFIX|LOG_NEWLINE; |
1907 | |
1908 | printed_len += log_output(facility, level, lflags, dict, dictlen, text, text_len); |
1909 | |
1910 | logbuf_cpu = UINT_MAX; |
1911 | raw_spin_unlock(&logbuf_lock); |
1912 | lockdep_on(); |
1913 | local_irq_restore(flags); |
1914 | |
1915 | /* If called from the scheduler, we can not call up(). */ |
1916 | if (!in_sched) { |
1917 | lockdep_off(); |
1918 | /* |
1919 | * Try to acquire and then immediately release the console |
1920 | * semaphore. The release will print out buffers and wake up |
1921 | * /dev/kmsg and syslog() users. |
1922 | */ |
1923 | if (console_trylock()) |
1924 | console_unlock(); |
1925 | lockdep_on(); |
1926 | } |
1927 | |
1928 | return printed_len; |
1929 | } |
1930 | EXPORT_SYMBOL(vprintk_emit); |
1931 | |
1932 | asmlinkage int vprintk(const char *fmt, va_list args) |
1933 | { |
1934 | return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); |
1935 | } |
1936 | EXPORT_SYMBOL(vprintk); |
1937 | |
1938 | asmlinkage int printk_emit(int facility, int level, |
1939 | const char *dict, size_t dictlen, |
1940 | const char *fmt, ...) |
1941 | { |
1942 | va_list args; |
1943 | int r; |
1944 | |
1945 | va_start(args, fmt); |
1946 | r = vprintk_emit(facility, level, dict, dictlen, fmt, args); |
1947 | va_end(args); |
1948 | |
1949 | return r; |
1950 | } |
1951 | EXPORT_SYMBOL(printk_emit); |
1952 | |
1953 | int vprintk_default(const char *fmt, va_list args) |
1954 | { |
1955 | int r; |
1956 | |
1957 | #ifdef CONFIG_KGDB_KDB |
1958 | if (unlikely(kdb_trap_printk)) { |
1959 | r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); |
1960 | return r; |
1961 | } |
1962 | #endif |
1963 | r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); |
1964 | |
1965 | return r; |
1966 | } |
1967 | EXPORT_SYMBOL_GPL(vprintk_default); |
1968 | |
1969 | /** |
1970 | * printk - print a kernel message |
1971 | * @fmt: format string |
1972 | * |
1973 | * This is printk(). It can be called from any context. We want it to work. |
1974 | * |
1975 | * We try to grab the console_lock. If we succeed, it's easy - we log the |
1976 | * output and call the console drivers. If we fail to get the semaphore, we |
1977 | * place the output into the log buffer and return. The current holder of |
1978 | * the console_sem will notice the new output in console_unlock(); and will |
1979 | * send it to the consoles before releasing the lock. |
1980 | * |
1981 | * One effect of this deferred printing is that code which calls printk() and |
1982 | * then changes console_loglevel may break. This is because console_loglevel |
1983 | * is inspected when the actual printing occurs. |
1984 | * |
1985 | * See also: |
1986 | * printf(3) |
1987 | * |
1988 | * See the vsnprintf() documentation for format string extensions over C99. |
1989 | */ |
1990 | asmlinkage __visible int printk(const char *fmt, ...) |
1991 | { |
1992 | va_list args; |
1993 | int r; |
1994 | |
1995 | va_start(args, fmt); |
1996 | r = vprintk_func(fmt, args); |
1997 | va_end(args); |
1998 | |
1999 | return r; |
2000 | } |
2001 | EXPORT_SYMBOL(printk); |
2002 | |
2003 | #else /* CONFIG_PRINTK */ |
2004 | |
2005 | #define LOG_LINE_MAX 0 |
2006 | #define PREFIX_MAX 0 |
2007 | |
2008 | static u64 syslog_seq; |
2009 | static u32 syslog_idx; |
2010 | static u64 console_seq; |
2011 | static u32 console_idx; |
2012 | static enum log_flags syslog_prev; |
2013 | static u64 log_first_seq; |
2014 | static u32 log_first_idx; |
2015 | static u64 log_next_seq; |
2016 | static enum log_flags console_prev; |
2017 | static struct cont { |
2018 | size_t len; |
2019 | size_t cons; |
2020 | u8 level; |
2021 | bool flushed:1; |
2022 | } cont; |
2023 | static char *log_text(const struct printk_log *msg) { return NULL; } |
2024 | static char *log_dict(const struct printk_log *msg) { return NULL; } |
2025 | static struct printk_log *log_from_idx(u32 idx) { return NULL; } |
2026 | static u32 log_next(u32 idx) { return 0; } |
2027 | static ssize_t msg_print_ext_header(char *buf, size_t size, |
2028 | struct printk_log *msg, u64 seq, |
2029 | enum log_flags prev_flags) { return 0; } |
2030 | static ssize_t msg_print_ext_body(char *buf, size_t size, |
2031 | char *dict, size_t dict_len, |
2032 | char *text, size_t text_len) { return 0; } |
2033 | static void call_console_drivers(int level, |
2034 | const char *ext_text, size_t ext_len, |
2035 | const char *text, size_t len) {} |
2036 | static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, |
2037 | bool syslog, char *buf, size_t size) { return 0; } |
2038 | static size_t cont_print_text(char *text, size_t size) { return 0; } |
2039 | static bool suppress_message_printing(int level) { return false; } |
2040 | |
2041 | /* Still needs to be defined for users */ |
2042 | DEFINE_PER_CPU(printk_func_t, printk_func); |
2043 | |
2044 | #endif /* CONFIG_PRINTK */ |
2045 | |
2046 | #ifdef CONFIG_EARLY_PRINTK |
2047 | struct console *early_console; |
2048 | |
2049 | asmlinkage __visible void early_printk(const char *fmt, ...) |
2050 | { |
2051 | va_list ap; |
2052 | char buf[512]; |
2053 | int n; |
2054 | |
2055 | if (!early_console) |
2056 | return; |
2057 | |
2058 | va_start(ap, fmt); |
2059 | n = vscnprintf(buf, sizeof(buf), fmt, ap); |
2060 | va_end(ap); |
2061 | |
2062 | early_console->write(early_console, buf, n); |
2063 | } |
2064 | #endif |
2065 | |
2066 | static int __add_preferred_console(char *name, int idx, char *options, |
2067 | char *brl_options) |
2068 | { |
2069 | struct console_cmdline *c; |
2070 | int i; |
2071 | |
2072 | /* |
2073 | * See if this tty is not yet registered, and |
2074 | * if we have a slot free. |
2075 | */ |
2076 | for (i = 0, c = console_cmdline; |
2077 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2078 | i++, c++) { |
2079 | if (strcmp(c->name, name) == 0 && c->index == idx) { |
2080 | if (!brl_options) |
2081 | selected_console = i; |
2082 | return 0; |
2083 | } |
2084 | } |
2085 | if (i == MAX_CMDLINECONSOLES) |
2086 | return -E2BIG; |
2087 | if (!brl_options) |
2088 | selected_console = i; |
2089 | strlcpy(c->name, name, sizeof(c->name)); |
2090 | c->options = options; |
2091 | braille_set_options(c, brl_options); |
2092 | |
2093 | c->index = idx; |
2094 | return 0; |
2095 | } |
2096 | /* |
2097 | * Set up a console. Called via do_early_param() in init/main.c |
2098 | * for each "console=" parameter in the boot command line. |
2099 | */ |
2100 | static int __init console_setup(char *str) |
2101 | { |
2102 | char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */ |
2103 | char *s, *options, *brl_options = NULL; |
2104 | int idx; |
2105 | |
2106 | if (_braille_console_setup(&str, &brl_options)) |
2107 | return 1; |
2108 | |
2109 | /* |
2110 | * Decode str into name, index, options. |
2111 | */ |
2112 | if (str[0] >= '0' && str[0] <= '9') { |
2113 | strcpy(buf, "ttyS"); |
2114 | strncpy(buf + 4, str, sizeof(buf) - 5); |
2115 | } else { |
2116 | strncpy(buf, str, sizeof(buf) - 1); |
2117 | } |
2118 | buf[sizeof(buf) - 1] = 0; |
2119 | options = strchr(str, ','); |
2120 | if (options) |
2121 | *(options++) = 0; |
2122 | #ifdef __sparc__ |
2123 | if (!strcmp(str, "ttya")) |
2124 | strcpy(buf, "ttyS0"); |
2125 | if (!strcmp(str, "ttyb")) |
2126 | strcpy(buf, "ttyS1"); |
2127 | #endif |
2128 | for (s = buf; *s; s++) |
2129 | if (isdigit(*s) || *s == ',') |
2130 | break; |
2131 | idx = simple_strtoul(s, NULL, 10); |
2132 | *s = 0; |
2133 | |
2134 | __add_preferred_console(buf, idx, options, brl_options); |
2135 | console_set_on_cmdline = 1; |
2136 | return 1; |
2137 | } |
2138 | __setup("console=", console_setup); |
2139 | |
2140 | /** |
2141 | * add_preferred_console - add a device to the list of preferred consoles. |
2142 | * @name: device name |
2143 | * @idx: device index |
2144 | * @options: options for this console |
2145 | * |
2146 | * The last preferred console added will be used for kernel messages |
2147 | * and stdin/out/err for init. Normally this is used by console_setup |
2148 | * above to handle user-supplied console arguments; however it can also |
2149 | * be used by arch-specific code either to override the user or more |
2150 | * commonly to provide a default console (ie from PROM variables) when |
2151 | * the user has not supplied one. |
2152 | */ |
2153 | int add_preferred_console(char *name, int idx, char *options) |
2154 | { |
2155 | return __add_preferred_console(name, idx, options, NULL); |
2156 | } |
2157 | |
2158 | bool console_suspend_enabled = true; |
2159 | EXPORT_SYMBOL(console_suspend_enabled); |
2160 | |
2161 | static int __init console_suspend_disable(char *str) |
2162 | { |
2163 | console_suspend_enabled = false; |
2164 | return 1; |
2165 | } |
2166 | __setup("no_console_suspend", console_suspend_disable); |
2167 | module_param_named(console_suspend, console_suspend_enabled, |
2168 | bool, S_IRUGO | S_IWUSR); |
2169 | MODULE_PARM_DESC(console_suspend, "suspend console during suspend" |
2170 | " and hibernate operations"); |
2171 | |
2172 | /** |
2173 | * suspend_console - suspend the console subsystem |
2174 | * |
2175 | * This disables printk() while we go into suspend states |
2176 | */ |
2177 | void suspend_console(void) |
2178 | { |
2179 | if (!console_suspend_enabled) |
2180 | return; |
2181 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); |
2182 | console_lock(); |
2183 | console_suspended = 1; |
2184 | up_console_sem(); |
2185 | } |
2186 | |
2187 | void resume_console(void) |
2188 | { |
2189 | if (!console_suspend_enabled) |
2190 | return; |
2191 | down_console_sem(); |
2192 | console_suspended = 0; |
2193 | console_unlock(); |
2194 | } |
2195 | |
2196 | /** |
2197 | * console_cpu_notify - print deferred console messages after CPU hotplug |
2198 | * @self: notifier struct |
2199 | * @action: CPU hotplug event |
2200 | * @hcpu: unused |
2201 | * |
2202 | * If printk() is called from a CPU that is not online yet, the messages |
2203 | * will be spooled but will not show up on the console. This function is |
2204 | * called when a new CPU comes online (or fails to come up), and ensures |
2205 | * that any such output gets printed. |
2206 | */ |
2207 | static int console_cpu_notify(struct notifier_block *self, |
2208 | unsigned long action, void *hcpu) |
2209 | { |
2210 | switch (action) { |
2211 | case CPU_ONLINE: |
2212 | case CPU_DEAD: |
2213 | case CPU_DOWN_FAILED: |
2214 | case CPU_UP_CANCELED: |
2215 | console_lock(); |
2216 | console_unlock(); |
2217 | } |
2218 | return NOTIFY_OK; |
2219 | } |
2220 | |
2221 | /** |
2222 | * console_lock - lock the console system for exclusive use. |
2223 | * |
2224 | * Acquires a lock which guarantees that the caller has |
2225 | * exclusive access to the console system and the console_drivers list. |
2226 | * |
2227 | * Can sleep, returns nothing. |
2228 | */ |
2229 | void console_lock(void) |
2230 | { |
2231 | might_sleep(); |
2232 | |
2233 | down_console_sem(); |
2234 | if (console_suspended) |
2235 | return; |
2236 | console_locked = 1; |
2237 | console_may_schedule = 1; |
2238 | } |
2239 | EXPORT_SYMBOL(console_lock); |
2240 | |
2241 | /** |
2242 | * console_trylock - try to lock the console system for exclusive use. |
2243 | * |
2244 | * Try to acquire a lock which guarantees that the caller has exclusive |
2245 | * access to the console system and the console_drivers list. |
2246 | * |
2247 | * returns 1 on success, and 0 on failure to acquire the lock. |
2248 | */ |
2249 | int console_trylock(void) |
2250 | { |
2251 | if (down_trylock_console_sem()) |
2252 | return 0; |
2253 | if (console_suspended) { |
2254 | up_console_sem(); |
2255 | return 0; |
2256 | } |
2257 | console_locked = 1; |
2258 | /* |
2259 | * When PREEMPT_COUNT disabled we can't reliably detect if it's |
2260 | * safe to schedule (e.g. calling printk while holding a spin_lock), |
2261 | * because preempt_disable()/preempt_enable() are just barriers there |
2262 | * and preempt_count() is always 0. |
2263 | * |
2264 | * RCU read sections have a separate preemption counter when |
2265 | * PREEMPT_RCU enabled thus we must take extra care and check |
2266 | * rcu_preempt_depth(), otherwise RCU read sections modify |
2267 | * preempt_count(). |
2268 | */ |
2269 | console_may_schedule = !oops_in_progress && |
2270 | preemptible() && |
2271 | !rcu_preempt_depth(); |
2272 | return 1; |
2273 | } |
2274 | EXPORT_SYMBOL(console_trylock); |
2275 | |
2276 | int is_console_locked(void) |
2277 | { |
2278 | return console_locked; |
2279 | } |
2280 | |
2281 | /* |
2282 | * Check if we have any console that is capable of printing while cpu is |
2283 | * booting or shutting down. Requires console_sem. |
2284 | */ |
2285 | static int have_callable_console(void) |
2286 | { |
2287 | struct console *con; |
2288 | |
2289 | for_each_console(con) |
2290 | if ((con->flags & CON_ENABLED) && |
2291 | (con->flags & CON_ANYTIME)) |
2292 | return 1; |
2293 | |
2294 | return 0; |
2295 | } |
2296 | |
2297 | /* |
2298 | * Can we actually use the console at this time on this cpu? |
2299 | * |
2300 | * Console drivers may assume that per-cpu resources have been allocated. So |
2301 | * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't |
2302 | * call them until this CPU is officially up. |
2303 | */ |
2304 | static inline int can_use_console(void) |
2305 | { |
2306 | return cpu_online(raw_smp_processor_id()) || have_callable_console(); |
2307 | } |
2308 | |
2309 | static void console_cont_flush(char *text, size_t size) |
2310 | { |
2311 | unsigned long flags; |
2312 | size_t len; |
2313 | |
2314 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
2315 | |
2316 | if (!cont.len) |
2317 | goto out; |
2318 | |
2319 | if (suppress_message_printing(cont.level)) { |
2320 | cont.cons = cont.len; |
2321 | if (cont.flushed) |
2322 | cont.len = 0; |
2323 | goto out; |
2324 | } |
2325 | |
2326 | /* |
2327 | * We still queue earlier records, likely because the console was |
2328 | * busy. The earlier ones need to be printed before this one, we |
2329 | * did not flush any fragment so far, so just let it queue up. |
2330 | */ |
2331 | if (console_seq < log_next_seq && !cont.cons) |
2332 | goto out; |
2333 | |
2334 | len = cont_print_text(text, size); |
2335 | raw_spin_unlock(&logbuf_lock); |
2336 | stop_critical_timings(); |
2337 | call_console_drivers(cont.level, NULL, 0, text, len); |
2338 | start_critical_timings(); |
2339 | local_irq_restore(flags); |
2340 | return; |
2341 | out: |
2342 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
2343 | } |
2344 | |
2345 | /** |
2346 | * console_unlock - unlock the console system |
2347 | * |
2348 | * Releases the console_lock which the caller holds on the console system |
2349 | * and the console driver list. |
2350 | * |
2351 | * While the console_lock was held, console output may have been buffered |
2352 | * by printk(). If this is the case, console_unlock(); emits |
2353 | * the output prior to releasing the lock. |
2354 | * |
2355 | * If there is output waiting, we wake /dev/kmsg and syslog() users. |
2356 | * |
2357 | * console_unlock(); may be called from any context. |
2358 | */ |
2359 | void console_unlock(void) |
2360 | { |
2361 | static char ext_text[CONSOLE_EXT_LOG_MAX]; |
2362 | static char text[LOG_LINE_MAX + PREFIX_MAX]; |
2363 | static u64 seen_seq; |
2364 | unsigned long flags; |
2365 | bool wake_klogd = false; |
2366 | bool do_cond_resched, retry; |
2367 | |
2368 | if (console_suspended) { |
2369 | up_console_sem(); |
2370 | return; |
2371 | } |
2372 | |
2373 | /* |
2374 | * Console drivers are called with interrupts disabled, so |
2375 | * @console_may_schedule should be cleared before; however, we may |
2376 | * end up dumping a lot of lines, for example, if called from |
2377 | * console registration path, and should invoke cond_resched() |
2378 | * between lines if allowable. Not doing so can cause a very long |
2379 | * scheduling stall on a slow console leading to RCU stall and |
2380 | * softlockup warnings which exacerbate the issue with more |
2381 | * messages practically incapacitating the system. |
2382 | * |
2383 | * console_trylock() is not able to detect the preemptive |
2384 | * context reliably. Therefore the value must be stored before |
2385 | * and cleared after the the "again" goto label. |
2386 | */ |
2387 | do_cond_resched = console_may_schedule; |
2388 | again: |
2389 | console_may_schedule = 0; |
2390 | |
2391 | /* |
2392 | * We released the console_sem lock, so we need to recheck if |
2393 | * cpu is online and (if not) is there at least one CON_ANYTIME |
2394 | * console. |
2395 | */ |
2396 | if (!can_use_console()) { |
2397 | console_locked = 0; |
2398 | up_console_sem(); |
2399 | return; |
2400 | } |
2401 | |
2402 | /* flush buffered message fragment immediately to console */ |
2403 | console_cont_flush(text, sizeof(text)); |
2404 | |
2405 | for (;;) { |
2406 | struct printk_log *msg; |
2407 | size_t ext_len = 0; |
2408 | size_t len; |
2409 | int level; |
2410 | |
2411 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
2412 | if (seen_seq != log_next_seq) { |
2413 | wake_klogd = true; |
2414 | seen_seq = log_next_seq; |
2415 | } |
2416 | |
2417 | if (console_seq < log_first_seq) { |
2418 | len = sprintf(text, "** %u printk messages dropped ** ", |
2419 | (unsigned)(log_first_seq - console_seq)); |
2420 | |
2421 | /* messages are gone, move to first one */ |
2422 | console_seq = log_first_seq; |
2423 | console_idx = log_first_idx; |
2424 | console_prev = 0; |
2425 | } else { |
2426 | len = 0; |
2427 | } |
2428 | skip: |
2429 | if (console_seq == log_next_seq) |
2430 | break; |
2431 | |
2432 | msg = log_from_idx(console_idx); |
2433 | level = msg->level; |
2434 | if ((msg->flags & LOG_NOCONS) || |
2435 | suppress_message_printing(level)) { |
2436 | /* |
2437 | * Skip record we have buffered and already printed |
2438 | * directly to the console when we received it, and |
2439 | * record that has level above the console loglevel. |
2440 | */ |
2441 | console_idx = log_next(console_idx); |
2442 | console_seq++; |
2443 | /* |
2444 | * We will get here again when we register a new |
2445 | * CON_PRINTBUFFER console. Clear the flag so we |
2446 | * will properly dump everything later. |
2447 | */ |
2448 | msg->flags &= ~LOG_NOCONS; |
2449 | console_prev = msg->flags; |
2450 | goto skip; |
2451 | } |
2452 | |
2453 | len += msg_print_text(msg, console_prev, false, |
2454 | text + len, sizeof(text) - len); |
2455 | if (nr_ext_console_drivers) { |
2456 | ext_len = msg_print_ext_header(ext_text, |
2457 | sizeof(ext_text), |
2458 | msg, console_seq, console_prev); |
2459 | ext_len += msg_print_ext_body(ext_text + ext_len, |
2460 | sizeof(ext_text) - ext_len, |
2461 | log_dict(msg), msg->dict_len, |
2462 | log_text(msg), msg->text_len); |
2463 | } |
2464 | console_idx = log_next(console_idx); |
2465 | console_seq++; |
2466 | console_prev = msg->flags; |
2467 | raw_spin_unlock(&logbuf_lock); |
2468 | |
2469 | stop_critical_timings(); /* don't trace print latency */ |
2470 | call_console_drivers(level, ext_text, ext_len, text, len); |
2471 | start_critical_timings(); |
2472 | local_irq_restore(flags); |
2473 | |
2474 | if (do_cond_resched) |
2475 | cond_resched(); |
2476 | } |
2477 | console_locked = 0; |
2478 | |
2479 | /* Release the exclusive_console once it is used */ |
2480 | if (unlikely(exclusive_console)) |
2481 | exclusive_console = NULL; |
2482 | |
2483 | raw_spin_unlock(&logbuf_lock); |
2484 | |
2485 | up_console_sem(); |
2486 | |
2487 | /* |
2488 | * Someone could have filled up the buffer again, so re-check if there's |
2489 | * something to flush. In case we cannot trylock the console_sem again, |
2490 | * there's a new owner and the console_unlock() from them will do the |
2491 | * flush, no worries. |
2492 | */ |
2493 | raw_spin_lock(&logbuf_lock); |
2494 | retry = console_seq != log_next_seq; |
2495 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
2496 | |
2497 | if (retry && console_trylock()) |
2498 | goto again; |
2499 | |
2500 | if (wake_klogd) |
2501 | wake_up_klogd(); |
2502 | } |
2503 | EXPORT_SYMBOL(console_unlock); |
2504 | |
2505 | /** |
2506 | * console_conditional_schedule - yield the CPU if required |
2507 | * |
2508 | * If the console code is currently allowed to sleep, and |
2509 | * if this CPU should yield the CPU to another task, do |
2510 | * so here. |
2511 | * |
2512 | * Must be called within console_lock();. |
2513 | */ |
2514 | void __sched console_conditional_schedule(void) |
2515 | { |
2516 | if (console_may_schedule) |
2517 | cond_resched(); |
2518 | } |
2519 | EXPORT_SYMBOL(console_conditional_schedule); |
2520 | |
2521 | void console_unblank(void) |
2522 | { |
2523 | struct console *c; |
2524 | |
2525 | /* |
2526 | * console_unblank can no longer be called in interrupt context unless |
2527 | * oops_in_progress is set to 1.. |
2528 | */ |
2529 | if (oops_in_progress) { |
2530 | if (down_trylock_console_sem() != 0) |
2531 | return; |
2532 | } else |
2533 | console_lock(); |
2534 | |
2535 | console_locked = 1; |
2536 | console_may_schedule = 0; |
2537 | for_each_console(c) |
2538 | if ((c->flags & CON_ENABLED) && c->unblank) |
2539 | c->unblank(); |
2540 | console_unlock(); |
2541 | } |
2542 | |
2543 | /** |
2544 | * console_flush_on_panic - flush console content on panic |
2545 | * |
2546 | * Immediately output all pending messages no matter what. |
2547 | */ |
2548 | void console_flush_on_panic(void) |
2549 | { |
2550 | /* |
2551 | * If someone else is holding the console lock, trylock will fail |
2552 | * and may_schedule may be set. Ignore and proceed to unlock so |
2553 | * that messages are flushed out. As this can be called from any |
2554 | * context and we don't want to get preempted while flushing, |
2555 | * ensure may_schedule is cleared. |
2556 | */ |
2557 | console_trylock(); |
2558 | console_may_schedule = 0; |
2559 | console_unlock(); |
2560 | } |
2561 | |
2562 | /* |
2563 | * Return the console tty driver structure and its associated index |
2564 | */ |
2565 | struct tty_driver *console_device(int *index) |
2566 | { |
2567 | struct console *c; |
2568 | struct tty_driver *driver = NULL; |
2569 | |
2570 | console_lock(); |
2571 | for_each_console(c) { |
2572 | if (!c->device) |
2573 | continue; |
2574 | driver = c->device(c, index); |
2575 | if (driver) |
2576 | break; |
2577 | } |
2578 | console_unlock(); |
2579 | return driver; |
2580 | } |
2581 | |
2582 | /* |
2583 | * Prevent further output on the passed console device so that (for example) |
2584 | * serial drivers can disable console output before suspending a port, and can |
2585 | * re-enable output afterwards. |
2586 | */ |
2587 | void console_stop(struct console *console) |
2588 | { |
2589 | console_lock(); |
2590 | console->flags &= ~CON_ENABLED; |
2591 | console_unlock(); |
2592 | } |
2593 | EXPORT_SYMBOL(console_stop); |
2594 | |
2595 | void console_start(struct console *console) |
2596 | { |
2597 | console_lock(); |
2598 | console->flags |= CON_ENABLED; |
2599 | console_unlock(); |
2600 | } |
2601 | EXPORT_SYMBOL(console_start); |
2602 | |
2603 | static int __read_mostly keep_bootcon; |
2604 | |
2605 | static int __init keep_bootcon_setup(char *str) |
2606 | { |
2607 | keep_bootcon = 1; |
2608 | pr_info("debug: skip boot console de-registration.\n"); |
2609 | |
2610 | return 0; |
2611 | } |
2612 | |
2613 | early_param("keep_bootcon", keep_bootcon_setup); |
2614 | |
2615 | /* |
2616 | * The console driver calls this routine during kernel initialization |
2617 | * to register the console printing procedure with printk() and to |
2618 | * print any messages that were printed by the kernel before the |
2619 | * console driver was initialized. |
2620 | * |
2621 | * This can happen pretty early during the boot process (because of |
2622 | * early_printk) - sometimes before setup_arch() completes - be careful |
2623 | * of what kernel features are used - they may not be initialised yet. |
2624 | * |
2625 | * There are two types of consoles - bootconsoles (early_printk) and |
2626 | * "real" consoles (everything which is not a bootconsole) which are |
2627 | * handled differently. |
2628 | * - Any number of bootconsoles can be registered at any time. |
2629 | * - As soon as a "real" console is registered, all bootconsoles |
2630 | * will be unregistered automatically. |
2631 | * - Once a "real" console is registered, any attempt to register a |
2632 | * bootconsoles will be rejected |
2633 | */ |
2634 | void register_console(struct console *newcon) |
2635 | { |
2636 | int i; |
2637 | unsigned long flags; |
2638 | struct console *bcon = NULL; |
2639 | struct console_cmdline *c; |
2640 | |
2641 | if (console_drivers) |
2642 | for_each_console(bcon) |
2643 | if (WARN(bcon == newcon, |
2644 | "console '%s%d' already registered\n", |
2645 | bcon->name, bcon->index)) |
2646 | return; |
2647 | |
2648 | /* |
2649 | * before we register a new CON_BOOT console, make sure we don't |
2650 | * already have a valid console |
2651 | */ |
2652 | if (console_drivers && newcon->flags & CON_BOOT) { |
2653 | /* find the last or real console */ |
2654 | for_each_console(bcon) { |
2655 | if (!(bcon->flags & CON_BOOT)) { |
2656 | pr_info("Too late to register bootconsole %s%d\n", |
2657 | newcon->name, newcon->index); |
2658 | return; |
2659 | } |
2660 | } |
2661 | } |
2662 | |
2663 | if (console_drivers && console_drivers->flags & CON_BOOT) |
2664 | bcon = console_drivers; |
2665 | |
2666 | if (preferred_console < 0 || bcon || !console_drivers) |
2667 | preferred_console = selected_console; |
2668 | |
2669 | /* |
2670 | * See if we want to use this console driver. If we |
2671 | * didn't select a console we take the first one |
2672 | * that registers here. |
2673 | */ |
2674 | if (preferred_console < 0) { |
2675 | if (newcon->index < 0) |
2676 | newcon->index = 0; |
2677 | if (newcon->setup == NULL || |
2678 | newcon->setup(newcon, NULL) == 0) { |
2679 | newcon->flags |= CON_ENABLED; |
2680 | if (newcon->device) { |
2681 | newcon->flags |= CON_CONSDEV; |
2682 | preferred_console = 0; |
2683 | } |
2684 | } |
2685 | } |
2686 | |
2687 | /* |
2688 | * See if this console matches one we selected on |
2689 | * the command line. |
2690 | */ |
2691 | for (i = 0, c = console_cmdline; |
2692 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2693 | i++, c++) { |
2694 | if (!newcon->match || |
2695 | newcon->match(newcon, c->name, c->index, c->options) != 0) { |
2696 | /* default matching */ |
2697 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); |
2698 | if (strcmp(c->name, newcon->name) != 0) |
2699 | continue; |
2700 | if (newcon->index >= 0 && |
2701 | newcon->index != c->index) |
2702 | continue; |
2703 | if (newcon->index < 0) |
2704 | newcon->index = c->index; |
2705 | |
2706 | if (_braille_register_console(newcon, c)) |
2707 | return; |
2708 | |
2709 | if (newcon->setup && |
2710 | newcon->setup(newcon, c->options) != 0) |
2711 | break; |
2712 | } |
2713 | |
2714 | newcon->flags |= CON_ENABLED; |
2715 | if (i == selected_console) { |
2716 | newcon->flags |= CON_CONSDEV; |
2717 | preferred_console = selected_console; |
2718 | } |
2719 | break; |
2720 | } |
2721 | |
2722 | if (!(newcon->flags & CON_ENABLED)) |
2723 | return; |
2724 | |
2725 | /* |
2726 | * If we have a bootconsole, and are switching to a real console, |
2727 | * don't print everything out again, since when the boot console, and |
2728 | * the real console are the same physical device, it's annoying to |
2729 | * see the beginning boot messages twice |
2730 | */ |
2731 | if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) |
2732 | newcon->flags &= ~CON_PRINTBUFFER; |
2733 | |
2734 | /* |
2735 | * Put this console in the list - keep the |
2736 | * preferred driver at the head of the list. |
2737 | */ |
2738 | console_lock(); |
2739 | if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { |
2740 | newcon->next = console_drivers; |
2741 | console_drivers = newcon; |
2742 | if (newcon->next) |
2743 | newcon->next->flags &= ~CON_CONSDEV; |
2744 | } else { |
2745 | newcon->next = console_drivers->next; |
2746 | console_drivers->next = newcon; |
2747 | } |
2748 | |
2749 | if (newcon->flags & CON_EXTENDED) |
2750 | if (!nr_ext_console_drivers++) |
2751 | pr_info("printk: continuation disabled due to ext consoles, expect more fragments in /dev/kmsg\n"); |
2752 | |
2753 | if (newcon->flags & CON_PRINTBUFFER) { |
2754 | /* |
2755 | * console_unlock(); will print out the buffered messages |
2756 | * for us. |
2757 | */ |
2758 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
2759 | console_seq = syslog_seq; |
2760 | console_idx = syslog_idx; |
2761 | console_prev = syslog_prev; |
2762 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
2763 | /* |
2764 | * We're about to replay the log buffer. Only do this to the |
2765 | * just-registered console to avoid excessive message spam to |
2766 | * the already-registered consoles. |
2767 | */ |
2768 | exclusive_console = newcon; |
2769 | } |
2770 | console_unlock(); |
2771 | console_sysfs_notify(); |
2772 | |
2773 | /* |
2774 | * By unregistering the bootconsoles after we enable the real console |
2775 | * we get the "console xxx enabled" message on all the consoles - |
2776 | * boot consoles, real consoles, etc - this is to ensure that end |
2777 | * users know there might be something in the kernel's log buffer that |
2778 | * went to the bootconsole (that they do not see on the real console) |
2779 | */ |
2780 | pr_info("%sconsole [%s%d] enabled\n", |
2781 | (newcon->flags & CON_BOOT) ? "boot" : "" , |
2782 | newcon->name, newcon->index); |
2783 | if (bcon && |
2784 | ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && |
2785 | !keep_bootcon) { |
2786 | /* We need to iterate through all boot consoles, to make |
2787 | * sure we print everything out, before we unregister them. |
2788 | */ |
2789 | for_each_console(bcon) |
2790 | if (bcon->flags & CON_BOOT) |
2791 | unregister_console(bcon); |
2792 | } |
2793 | } |
2794 | EXPORT_SYMBOL(register_console); |
2795 | |
2796 | int unregister_console(struct console *console) |
2797 | { |
2798 | struct console *a, *b; |
2799 | int res; |
2800 | |
2801 | pr_info("%sconsole [%s%d] disabled\n", |
2802 | (console->flags & CON_BOOT) ? "boot" : "" , |
2803 | console->name, console->index); |
2804 | |
2805 | res = _braille_unregister_console(console); |
2806 | if (res) |
2807 | return res; |
2808 | |
2809 | res = 1; |
2810 | console_lock(); |
2811 | if (console_drivers == console) { |
2812 | console_drivers=console->next; |
2813 | res = 0; |
2814 | } else if (console_drivers) { |
2815 | for (a=console_drivers->next, b=console_drivers ; |
2816 | a; b=a, a=b->next) { |
2817 | if (a == console) { |
2818 | b->next = a->next; |
2819 | res = 0; |
2820 | break; |
2821 | } |
2822 | } |
2823 | } |
2824 | |
2825 | if (!res && (console->flags & CON_EXTENDED)) |
2826 | nr_ext_console_drivers--; |
2827 | |
2828 | /* |
2829 | * If this isn't the last console and it has CON_CONSDEV set, we |
2830 | * need to set it on the next preferred console. |
2831 | */ |
2832 | if (console_drivers != NULL && console->flags & CON_CONSDEV) |
2833 | console_drivers->flags |= CON_CONSDEV; |
2834 | |
2835 | console->flags &= ~CON_ENABLED; |
2836 | console_unlock(); |
2837 | console_sysfs_notify(); |
2838 | return res; |
2839 | } |
2840 | EXPORT_SYMBOL(unregister_console); |
2841 | |
2842 | /* |
2843 | * Some boot consoles access data that is in the init section and which will |
2844 | * be discarded after the initcalls have been run. To make sure that no code |
2845 | * will access this data, unregister the boot consoles in a late initcall. |
2846 | * |
2847 | * If for some reason, such as deferred probe or the driver being a loadable |
2848 | * module, the real console hasn't registered yet at this point, there will |
2849 | * be a brief interval in which no messages are logged to the console, which |
2850 | * makes it difficult to diagnose problems that occur during this time. |
2851 | * |
2852 | * To mitigate this problem somewhat, only unregister consoles whose memory |
2853 | * intersects with the init section. Note that code exists elsewhere to get |
2854 | * rid of the boot console as soon as the proper console shows up, so there |
2855 | * won't be side-effects from postponing the removal. |
2856 | */ |
2857 | static int __init printk_late_init(void) |
2858 | { |
2859 | struct console *con; |
2860 | |
2861 | for_each_console(con) { |
2862 | if (!keep_bootcon && con->flags & CON_BOOT) { |
2863 | /* |
2864 | * Make sure to unregister boot consoles whose data |
2865 | * resides in the init section before the init section |
2866 | * is discarded. Boot consoles whose data will stick |
2867 | * around will automatically be unregistered when the |
2868 | * proper console replaces them. |
2869 | */ |
2870 | if (init_section_intersects(con, sizeof(*con))) |
2871 | unregister_console(con); |
2872 | } |
2873 | } |
2874 | hotcpu_notifier(console_cpu_notify, 0); |
2875 | return 0; |
2876 | } |
2877 | late_initcall(printk_late_init); |
2878 | |
2879 | #if defined CONFIG_PRINTK |
2880 | /* |
2881 | * Delayed printk version, for scheduler-internal messages: |
2882 | */ |
2883 | #define PRINTK_PENDING_WAKEUP 0x01 |
2884 | #define PRINTK_PENDING_OUTPUT 0x02 |
2885 | |
2886 | static DEFINE_PER_CPU(int, printk_pending); |
2887 | |
2888 | static void wake_up_klogd_work_func(struct irq_work *irq_work) |
2889 | { |
2890 | int pending = __this_cpu_xchg(printk_pending, 0); |
2891 | |
2892 | if (pending & PRINTK_PENDING_OUTPUT) { |
2893 | /* If trylock fails, someone else is doing the printing */ |
2894 | if (console_trylock()) |
2895 | console_unlock(); |
2896 | } |
2897 | |
2898 | if (pending & PRINTK_PENDING_WAKEUP) |
2899 | wake_up_interruptible(&log_wait); |
2900 | } |
2901 | |
2902 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { |
2903 | .func = wake_up_klogd_work_func, |
2904 | .flags = IRQ_WORK_LAZY, |
2905 | }; |
2906 | |
2907 | void wake_up_klogd(void) |
2908 | { |
2909 | preempt_disable(); |
2910 | if (waitqueue_active(&log_wait)) { |
2911 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); |
2912 | irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); |
2913 | } |
2914 | preempt_enable(); |
2915 | } |
2916 | |
2917 | int printk_deferred(const char *fmt, ...) |
2918 | { |
2919 | va_list args; |
2920 | int r; |
2921 | |
2922 | preempt_disable(); |
2923 | va_start(args, fmt); |
2924 | r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); |
2925 | va_end(args); |
2926 | |
2927 | __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); |
2928 | irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); |
2929 | preempt_enable(); |
2930 | |
2931 | return r; |
2932 | } |
2933 | |
2934 | /* |
2935 | * printk rate limiting, lifted from the networking subsystem. |
2936 | * |
2937 | * This enforces a rate limit: not more than 10 kernel messages |
2938 | * every 5s to make a denial-of-service attack impossible. |
2939 | */ |
2940 | DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); |
2941 | |
2942 | int __printk_ratelimit(const char *func) |
2943 | { |
2944 | return ___ratelimit(&printk_ratelimit_state, func); |
2945 | } |
2946 | EXPORT_SYMBOL(__printk_ratelimit); |
2947 | |
2948 | /** |
2949 | * printk_timed_ratelimit - caller-controlled printk ratelimiting |
2950 | * @caller_jiffies: pointer to caller's state |
2951 | * @interval_msecs: minimum interval between prints |
2952 | * |
2953 | * printk_timed_ratelimit() returns true if more than @interval_msecs |
2954 | * milliseconds have elapsed since the last time printk_timed_ratelimit() |
2955 | * returned true. |
2956 | */ |
2957 | bool printk_timed_ratelimit(unsigned long *caller_jiffies, |
2958 | unsigned int interval_msecs) |
2959 | { |
2960 | unsigned long elapsed = jiffies - *caller_jiffies; |
2961 | |
2962 | if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs)) |
2963 | return false; |
2964 | |
2965 | *caller_jiffies = jiffies; |
2966 | return true; |
2967 | } |
2968 | EXPORT_SYMBOL(printk_timed_ratelimit); |
2969 | |
2970 | static DEFINE_SPINLOCK(dump_list_lock); |
2971 | static LIST_HEAD(dump_list); |
2972 | |
2973 | /** |
2974 | * kmsg_dump_register - register a kernel log dumper. |
2975 | * @dumper: pointer to the kmsg_dumper structure |
2976 | * |
2977 | * Adds a kernel log dumper to the system. The dump callback in the |
2978 | * structure will be called when the kernel oopses or panics and must be |
2979 | * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. |
2980 | */ |
2981 | int kmsg_dump_register(struct kmsg_dumper *dumper) |
2982 | { |
2983 | unsigned long flags; |
2984 | int err = -EBUSY; |
2985 | |
2986 | /* The dump callback needs to be set */ |
2987 | if (!dumper->dump) |
2988 | return -EINVAL; |
2989 | |
2990 | spin_lock_irqsave(&dump_list_lock, flags); |
2991 | /* Don't allow registering multiple times */ |
2992 | if (!dumper->registered) { |
2993 | dumper->registered = 1; |
2994 | list_add_tail_rcu(&dumper->list, &dump_list); |
2995 | err = 0; |
2996 | } |
2997 | spin_unlock_irqrestore(&dump_list_lock, flags); |
2998 | |
2999 | return err; |
3000 | } |
3001 | EXPORT_SYMBOL_GPL(kmsg_dump_register); |
3002 | |
3003 | /** |
3004 | * kmsg_dump_unregister - unregister a kmsg dumper. |
3005 | * @dumper: pointer to the kmsg_dumper structure |
3006 | * |
3007 | * Removes a dump device from the system. Returns zero on success and |
3008 | * %-EINVAL otherwise. |
3009 | */ |
3010 | int kmsg_dump_unregister(struct kmsg_dumper *dumper) |
3011 | { |
3012 | unsigned long flags; |
3013 | int err = -EINVAL; |
3014 | |
3015 | spin_lock_irqsave(&dump_list_lock, flags); |
3016 | if (dumper->registered) { |
3017 | dumper->registered = 0; |
3018 | list_del_rcu(&dumper->list); |
3019 | err = 0; |
3020 | } |
3021 | spin_unlock_irqrestore(&dump_list_lock, flags); |
3022 | synchronize_rcu(); |
3023 | |
3024 | return err; |
3025 | } |
3026 | EXPORT_SYMBOL_GPL(kmsg_dump_unregister); |
3027 | |
3028 | static bool always_kmsg_dump; |
3029 | module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); |
3030 | |
3031 | /** |
3032 | * kmsg_dump - dump kernel log to kernel message dumpers. |
3033 | * @reason: the reason (oops, panic etc) for dumping |
3034 | * |
3035 | * Call each of the registered dumper's dump() callback, which can |
3036 | * retrieve the kmsg records with kmsg_dump_get_line() or |
3037 | * kmsg_dump_get_buffer(). |
3038 | */ |
3039 | void kmsg_dump(enum kmsg_dump_reason reason) |
3040 | { |
3041 | struct kmsg_dumper *dumper; |
3042 | unsigned long flags; |
3043 | |
3044 | if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) |
3045 | return; |
3046 | |
3047 | rcu_read_lock(); |
3048 | list_for_each_entry_rcu(dumper, &dump_list, list) { |
3049 | if (dumper->max_reason && reason > dumper->max_reason) |
3050 | continue; |
3051 | |
3052 | /* initialize iterator with data about the stored records */ |
3053 | dumper->active = true; |
3054 | |
3055 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3056 | dumper->cur_seq = clear_seq; |
3057 | dumper->cur_idx = clear_idx; |
3058 | dumper->next_seq = log_next_seq; |
3059 | dumper->next_idx = log_next_idx; |
3060 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3061 | |
3062 | /* invoke dumper which will iterate over records */ |
3063 | dumper->dump(dumper, reason); |
3064 | |
3065 | /* reset iterator */ |
3066 | dumper->active = false; |
3067 | } |
3068 | rcu_read_unlock(); |
3069 | } |
3070 | |
3071 | /** |
3072 | * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version) |
3073 | * @dumper: registered kmsg dumper |
3074 | * @syslog: include the "<4>" prefixes |
3075 | * @line: buffer to copy the line to |
3076 | * @size: maximum size of the buffer |
3077 | * @len: length of line placed into buffer |
3078 | * |
3079 | * Start at the beginning of the kmsg buffer, with the oldest kmsg |
3080 | * record, and copy one record into the provided buffer. |
3081 | * |
3082 | * Consecutive calls will return the next available record moving |
3083 | * towards the end of the buffer with the youngest messages. |
3084 | * |
3085 | * A return value of FALSE indicates that there are no more records to |
3086 | * read. |
3087 | * |
3088 | * The function is similar to kmsg_dump_get_line(), but grabs no locks. |
3089 | */ |
3090 | bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, |
3091 | char *line, size_t size, size_t *len) |
3092 | { |
3093 | struct printk_log *msg; |
3094 | size_t l = 0; |
3095 | bool ret = false; |
3096 | |
3097 | if (!dumper->active) |
3098 | goto out; |
3099 | |
3100 | if (dumper->cur_seq < log_first_seq) { |
3101 | /* messages are gone, move to first available one */ |
3102 | dumper->cur_seq = log_first_seq; |
3103 | dumper->cur_idx = log_first_idx; |
3104 | } |
3105 | |
3106 | /* last entry */ |
3107 | if (dumper->cur_seq >= log_next_seq) |
3108 | goto out; |
3109 | |
3110 | msg = log_from_idx(dumper->cur_idx); |
3111 | l = msg_print_text(msg, 0, syslog, line, size); |
3112 | |
3113 | dumper->cur_idx = log_next(dumper->cur_idx); |
3114 | dumper->cur_seq++; |
3115 | ret = true; |
3116 | out: |
3117 | if (len) |
3118 | *len = l; |
3119 | return ret; |
3120 | } |
3121 | |
3122 | /** |
3123 | * kmsg_dump_get_line - retrieve one kmsg log line |
3124 | * @dumper: registered kmsg dumper |
3125 | * @syslog: include the "<4>" prefixes |
3126 | * @line: buffer to copy the line to |
3127 | * @size: maximum size of the buffer |
3128 | * @len: length of line placed into buffer |
3129 | * |
3130 | * Start at the beginning of the kmsg buffer, with the oldest kmsg |
3131 | * record, and copy one record into the provided buffer. |
3132 | * |
3133 | * Consecutive calls will return the next available record moving |
3134 | * towards the end of the buffer with the youngest messages. |
3135 | * |
3136 | * A return value of FALSE indicates that there are no more records to |
3137 | * read. |
3138 | */ |
3139 | bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, |
3140 | char *line, size_t size, size_t *len) |
3141 | { |
3142 | unsigned long flags; |
3143 | bool ret; |
3144 | |
3145 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3146 | ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); |
3147 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3148 | |
3149 | return ret; |
3150 | } |
3151 | EXPORT_SYMBOL_GPL(kmsg_dump_get_line); |
3152 | |
3153 | /** |
3154 | * kmsg_dump_get_buffer - copy kmsg log lines |
3155 | * @dumper: registered kmsg dumper |
3156 | * @syslog: include the "<4>" prefixes |
3157 | * @buf: buffer to copy the line to |
3158 | * @size: maximum size of the buffer |
3159 | * @len: length of line placed into buffer |
3160 | * |
3161 | * Start at the end of the kmsg buffer and fill the provided buffer |
3162 | * with as many of the the *youngest* kmsg records that fit into it. |
3163 | * If the buffer is large enough, all available kmsg records will be |
3164 | * copied with a single call. |
3165 | * |
3166 | * Consecutive calls will fill the buffer with the next block of |
3167 | * available older records, not including the earlier retrieved ones. |
3168 | * |
3169 | * A return value of FALSE indicates that there are no more records to |
3170 | * read. |
3171 | */ |
3172 | bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, |
3173 | char *buf, size_t size, size_t *len) |
3174 | { |
3175 | unsigned long flags; |
3176 | u64 seq; |
3177 | u32 idx; |
3178 | u64 next_seq; |
3179 | u32 next_idx; |
3180 | enum log_flags prev; |
3181 | size_t l = 0; |
3182 | bool ret = false; |
3183 | |
3184 | if (!dumper->active) |
3185 | goto out; |
3186 | |
3187 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3188 | if (dumper->cur_seq < log_first_seq) { |
3189 | /* messages are gone, move to first available one */ |
3190 | dumper->cur_seq = log_first_seq; |
3191 | dumper->cur_idx = log_first_idx; |
3192 | } |
3193 | |
3194 | /* last entry */ |
3195 | if (dumper->cur_seq >= dumper->next_seq) { |
3196 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3197 | goto out; |
3198 | } |
3199 | |
3200 | /* calculate length of entire buffer */ |
3201 | seq = dumper->cur_seq; |
3202 | idx = dumper->cur_idx; |
3203 | prev = 0; |
3204 | while (seq < dumper->next_seq) { |
3205 | struct printk_log *msg = log_from_idx(idx); |
3206 | |
3207 | l += msg_print_text(msg, prev, true, NULL, 0); |
3208 | idx = log_next(idx); |
3209 | seq++; |
3210 | prev = msg->flags; |
3211 | } |
3212 | |
3213 | /* move first record forward until length fits into the buffer */ |
3214 | seq = dumper->cur_seq; |
3215 | idx = dumper->cur_idx; |
3216 | prev = 0; |
3217 | while (l > size && seq < dumper->next_seq) { |
3218 | struct printk_log *msg = log_from_idx(idx); |
3219 | |
3220 | l -= msg_print_text(msg, prev, true, NULL, 0); |
3221 | idx = log_next(idx); |
3222 | seq++; |
3223 | prev = msg->flags; |
3224 | } |
3225 | |
3226 | /* last message in next interation */ |
3227 | next_seq = seq; |
3228 | next_idx = idx; |
3229 | |
3230 | l = 0; |
3231 | while (seq < dumper->next_seq) { |
3232 | struct printk_log *msg = log_from_idx(idx); |
3233 | |
3234 | l += msg_print_text(msg, prev, syslog, buf + l, size - l); |
3235 | idx = log_next(idx); |
3236 | seq++; |
3237 | prev = msg->flags; |
3238 | } |
3239 | |
3240 | dumper->next_seq = next_seq; |
3241 | dumper->next_idx = next_idx; |
3242 | ret = true; |
3243 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3244 | out: |
3245 | if (len) |
3246 | *len = l; |
3247 | return ret; |
3248 | } |
3249 | EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); |
3250 | |
3251 | /** |
3252 | * kmsg_dump_rewind_nolock - reset the interator (unlocked version) |
3253 | * @dumper: registered kmsg dumper |
3254 | * |
3255 | * Reset the dumper's iterator so that kmsg_dump_get_line() and |
3256 | * kmsg_dump_get_buffer() can be called again and used multiple |
3257 | * times within the same dumper.dump() callback. |
3258 | * |
3259 | * The function is similar to kmsg_dump_rewind(), but grabs no locks. |
3260 | */ |
3261 | void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) |
3262 | { |
3263 | dumper->cur_seq = clear_seq; |
3264 | dumper->cur_idx = clear_idx; |
3265 | dumper->next_seq = log_next_seq; |
3266 | dumper->next_idx = log_next_idx; |
3267 | } |
3268 | |
3269 | /** |
3270 | * kmsg_dump_rewind - reset the interator |
3271 | * @dumper: registered kmsg dumper |
3272 | * |
3273 | * Reset the dumper's iterator so that kmsg_dump_get_line() and |
3274 | * kmsg_dump_get_buffer() can be called again and used multiple |
3275 | * times within the same dumper.dump() callback. |
3276 | */ |
3277 | void kmsg_dump_rewind(struct kmsg_dumper *dumper) |
3278 | { |
3279 | unsigned long flags; |
3280 | |
3281 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3282 | kmsg_dump_rewind_nolock(dumper); |
3283 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3284 | } |
3285 | EXPORT_SYMBOL_GPL(kmsg_dump_rewind); |
3286 | |
3287 | static char dump_stack_arch_desc_str[128]; |
3288 | |
3289 | /** |
3290 | * dump_stack_set_arch_desc - set arch-specific str to show with task dumps |
3291 | * @fmt: printf-style format string |
3292 | * @...: arguments for the format string |
3293 | * |
3294 | * The configured string will be printed right after utsname during task |
3295 | * dumps. Usually used to add arch-specific system identifiers. If an |
3296 | * arch wants to make use of such an ID string, it should initialize this |
3297 | * as soon as possible during boot. |
3298 | */ |
3299 | void __init dump_stack_set_arch_desc(const char *fmt, ...) |
3300 | { |
3301 | va_list args; |
3302 | |
3303 | va_start(args, fmt); |
3304 | vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str), |
3305 | fmt, args); |
3306 | va_end(args); |
3307 | } |
3308 | |
3309 | /** |
3310 | * dump_stack_print_info - print generic debug info for dump_stack() |
3311 | * @log_lvl: log level |
3312 | * |
3313 | * Arch-specific dump_stack() implementations can use this function to |
3314 | * print out the same debug information as the generic dump_stack(). |
3315 | */ |
3316 | void dump_stack_print_info(const char *log_lvl) |
3317 | { |
3318 | printk("%sCPU: %d PID: %d Comm: %.20s %s %s %.*s\n", |
3319 | log_lvl, raw_smp_processor_id(), current->pid, current->comm, |
3320 | print_tainted(), init_utsname()->release, |
3321 | (int)strcspn(init_utsname()->version, " "), |
3322 | init_utsname()->version); |
3323 | |
3324 | if (dump_stack_arch_desc_str[0] != '\0') |
3325 | printk("%sHardware name: %s\n", |
3326 | log_lvl, dump_stack_arch_desc_str); |
3327 | |
3328 | print_worker_info(log_lvl, current); |
3329 | } |
3330 | |
3331 | /** |
3332 | * show_regs_print_info - print generic debug info for show_regs() |
3333 | * @log_lvl: log level |
3334 | * |
3335 | * show_regs() implementations can use this function to print out generic |
3336 | * debug information. |
3337 | */ |
3338 | void show_regs_print_info(const char *log_lvl) |
3339 | { |
3340 | dump_stack_print_info(log_lvl); |
3341 | |
3342 | printk("%stask: %p task.stack: %p\n", |
3343 | log_lvl, current, task_stack_page(current)); |
3344 | } |
3345 | |
3346 | #endif |
3347 |