blob: 8e236155157cba54c4c3b38c6742fe192aca9421
1 | /* |
2 | * linux/kernel/printk.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * |
6 | * Modified to make sys_syslog() more flexible: added commands to |
7 | * return the last 4k of kernel messages, regardless of whether |
8 | * they've been read or not. Added option to suppress kernel printk's |
9 | * to the console. Added hook for sending the console messages |
10 | * elsewhere, in preparation for a serial line console (someday). |
11 | * Ted Ts'o, 2/11/93. |
12 | * Modified for sysctl support, 1/8/97, Chris Horn. |
13 | * Fixed SMP synchronization, 08/08/99, Manfred Spraul |
14 | * manfred@colorfullife.com |
15 | * Rewrote bits to get rid of console_lock |
16 | * 01Mar01 Andrew Morton |
17 | */ |
18 | |
19 | #include <linux/kernel.h> |
20 | #include <linux/mm.h> |
21 | #include <linux/tty.h> |
22 | #include <linux/tty_driver.h> |
23 | #include <linux/console.h> |
24 | #include <linux/init.h> |
25 | #include <linux/jiffies.h> |
26 | #include <linux/nmi.h> |
27 | #include <linux/module.h> |
28 | #include <linux/moduleparam.h> |
29 | #include <linux/delay.h> |
30 | #include <linux/smp.h> |
31 | #include <linux/security.h> |
32 | #include <linux/bootmem.h> |
33 | #include <linux/memblock.h> |
34 | #include <linux/syscalls.h> |
35 | #include <linux/kexec.h> |
36 | #include <linux/kdb.h> |
37 | #include <linux/ratelimit.h> |
38 | #include <linux/kmsg_dump.h> |
39 | #include <linux/syslog.h> |
40 | #include <linux/cpu.h> |
41 | #include <linux/notifier.h> |
42 | #include <linux/rculist.h> |
43 | #include <linux/poll.h> |
44 | #include <linux/irq_work.h> |
45 | #include <linux/utsname.h> |
46 | #include <linux/ctype.h> |
47 | #include <linux/uio.h> |
48 | |
49 | #include <asm/uaccess.h> |
50 | #include <asm/sections.h> |
51 | |
52 | #define CREATE_TRACE_POINTS |
53 | #include <trace/events/printk.h> |
54 | |
55 | #include "console_cmdline.h" |
56 | #include "braille.h" |
57 | #include "internal.h" |
58 | |
59 | #ifdef CONFIG_EARLY_PRINTK_DIRECT |
60 | extern void printascii(char *); |
61 | #endif |
62 | |
63 | int console_printk[4] = { |
64 | CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ |
65 | MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */ |
66 | CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */ |
67 | CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */ |
68 | }; |
69 | |
70 | /* |
71 | * Low level drivers may need that to know if they can schedule in |
72 | * their unblank() callback or not. So let's export it. |
73 | */ |
74 | int oops_in_progress; |
75 | EXPORT_SYMBOL(oops_in_progress); |
76 | |
77 | /* |
78 | * console_sem protects the console_drivers list, and also |
79 | * provides serialisation for access to the entire console |
80 | * driver system. |
81 | */ |
82 | static DEFINE_SEMAPHORE(console_sem); |
83 | struct console *console_drivers; |
84 | EXPORT_SYMBOL_GPL(console_drivers); |
85 | |
86 | #ifdef CONFIG_LOCKDEP |
87 | static struct lockdep_map console_lock_dep_map = { |
88 | .name = "console_lock" |
89 | }; |
90 | #endif |
91 | |
92 | enum devkmsg_log_bits { |
93 | __DEVKMSG_LOG_BIT_ON = 0, |
94 | __DEVKMSG_LOG_BIT_OFF, |
95 | __DEVKMSG_LOG_BIT_LOCK, |
96 | }; |
97 | |
98 | enum devkmsg_log_masks { |
99 | DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON), |
100 | DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF), |
101 | DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK), |
102 | }; |
103 | |
104 | /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */ |
105 | #define DEVKMSG_LOG_MASK_DEFAULT 0 |
106 | |
107 | static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; |
108 | |
109 | static int __control_devkmsg(char *str) |
110 | { |
111 | if (!str) |
112 | return -EINVAL; |
113 | |
114 | if (!strncmp(str, "on", 2)) { |
115 | devkmsg_log = DEVKMSG_LOG_MASK_ON; |
116 | return 2; |
117 | } else if (!strncmp(str, "off", 3)) { |
118 | devkmsg_log = DEVKMSG_LOG_MASK_OFF; |
119 | return 3; |
120 | } else if (!strncmp(str, "ratelimit", 9)) { |
121 | devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; |
122 | return 9; |
123 | } |
124 | return -EINVAL; |
125 | } |
126 | |
127 | static int __init control_devkmsg(char *str) |
128 | { |
129 | if (__control_devkmsg(str) < 0) |
130 | return 1; |
131 | |
132 | /* |
133 | * Set sysctl string accordingly: |
134 | */ |
135 | if (devkmsg_log == DEVKMSG_LOG_MASK_ON) { |
136 | memset(devkmsg_log_str, 0, DEVKMSG_STR_MAX_SIZE); |
137 | strncpy(devkmsg_log_str, "on", 2); |
138 | } else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF) { |
139 | memset(devkmsg_log_str, 0, DEVKMSG_STR_MAX_SIZE); |
140 | strncpy(devkmsg_log_str, "off", 3); |
141 | } |
142 | /* else "ratelimit" which is set by default. */ |
143 | |
144 | /* |
145 | * Sysctl cannot change it anymore. The kernel command line setting of |
146 | * this parameter is to force the setting to be permanent throughout the |
147 | * runtime of the system. This is a precation measure against userspace |
148 | * trying to be a smarta** and attempting to change it up on us. |
149 | */ |
150 | devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; |
151 | |
152 | return 0; |
153 | } |
154 | __setup("printk.devkmsg=", control_devkmsg); |
155 | |
156 | char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; |
157 | |
158 | int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, |
159 | void __user *buffer, size_t *lenp, loff_t *ppos) |
160 | { |
161 | char old_str[DEVKMSG_STR_MAX_SIZE]; |
162 | unsigned int old; |
163 | int err; |
164 | |
165 | if (write) { |
166 | if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK) |
167 | return -EINVAL; |
168 | |
169 | old = devkmsg_log; |
170 | strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE); |
171 | } |
172 | |
173 | err = proc_dostring(table, write, buffer, lenp, ppos); |
174 | if (err) |
175 | return err; |
176 | |
177 | if (write) { |
178 | err = __control_devkmsg(devkmsg_log_str); |
179 | |
180 | /* |
181 | * Do not accept an unknown string OR a known string with |
182 | * trailing crap... |
183 | */ |
184 | if (err < 0 || (err + 1 != *lenp)) { |
185 | |
186 | /* ... and restore old setting. */ |
187 | devkmsg_log = old; |
188 | strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE); |
189 | |
190 | return -EINVAL; |
191 | } |
192 | } |
193 | |
194 | return 0; |
195 | } |
196 | |
197 | /* |
198 | * Number of registered extended console drivers. |
199 | * |
200 | * If extended consoles are present, in-kernel cont reassembly is disabled |
201 | * and each fragment is stored as a separate log entry with proper |
202 | * continuation flag so that every emitted message has full metadata. This |
203 | * doesn't change the result for regular consoles or /proc/kmsg. For |
204 | * /dev/kmsg, as long as the reader concatenates messages according to |
205 | * consecutive continuation flags, the end result should be the same too. |
206 | */ |
207 | static int nr_ext_console_drivers; |
208 | |
209 | /* |
210 | * Helper macros to handle lockdep when locking/unlocking console_sem. We use |
211 | * macros instead of functions so that _RET_IP_ contains useful information. |
212 | */ |
213 | #define down_console_sem() do { \ |
214 | down(&console_sem);\ |
215 | mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\ |
216 | } while (0) |
217 | |
218 | static int __down_trylock_console_sem(unsigned long ip) |
219 | { |
220 | if (down_trylock(&console_sem)) |
221 | return 1; |
222 | mutex_acquire(&console_lock_dep_map, 0, 1, ip); |
223 | return 0; |
224 | } |
225 | #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_) |
226 | |
227 | #define up_console_sem() do { \ |
228 | mutex_release(&console_lock_dep_map, 1, _RET_IP_);\ |
229 | up(&console_sem);\ |
230 | } while (0) |
231 | |
232 | /* |
233 | * This is used for debugging the mess that is the VT code by |
234 | * keeping track if we have the console semaphore held. It's |
235 | * definitely not the perfect debug tool (we don't know if _WE_ |
236 | * hold it and are racing, but it helps tracking those weird code |
237 | * paths in the console code where we end up in places I want |
238 | * locked without the console sempahore held). |
239 | */ |
240 | static int console_locked, console_suspended; |
241 | |
242 | /* |
243 | * If exclusive_console is non-NULL then only this console is to be printed to. |
244 | */ |
245 | static struct console *exclusive_console; |
246 | |
247 | /* |
248 | * Array of consoles built from command line options (console=) |
249 | */ |
250 | |
251 | #define MAX_CMDLINECONSOLES 8 |
252 | |
253 | static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; |
254 | |
255 | static int selected_console = -1; |
256 | static int preferred_console = -1; |
257 | int console_set_on_cmdline; |
258 | EXPORT_SYMBOL(console_set_on_cmdline); |
259 | |
260 | /* Flag: console code may call schedule() */ |
261 | static int console_may_schedule; |
262 | |
263 | /* |
264 | * The printk log buffer consists of a chain of concatenated variable |
265 | * length records. Every record starts with a record header, containing |
266 | * the overall length of the record. |
267 | * |
268 | * The heads to the first and last entry in the buffer, as well as the |
269 | * sequence numbers of these entries are maintained when messages are |
270 | * stored. |
271 | * |
272 | * If the heads indicate available messages, the length in the header |
273 | * tells the start next message. A length == 0 for the next message |
274 | * indicates a wrap-around to the beginning of the buffer. |
275 | * |
276 | * Every record carries the monotonic timestamp in microseconds, as well as |
277 | * the standard userspace syslog level and syslog facility. The usual |
278 | * kernel messages use LOG_KERN; userspace-injected messages always carry |
279 | * a matching syslog facility, by default LOG_USER. The origin of every |
280 | * message can be reliably determined that way. |
281 | * |
282 | * The human readable log message directly follows the message header. The |
283 | * length of the message text is stored in the header, the stored message |
284 | * is not terminated. |
285 | * |
286 | * Optionally, a message can carry a dictionary of properties (key/value pairs), |
287 | * to provide userspace with a machine-readable message context. |
288 | * |
289 | * Examples for well-defined, commonly used property names are: |
290 | * DEVICE=b12:8 device identifier |
291 | * b12:8 block dev_t |
292 | * c127:3 char dev_t |
293 | * n8 netdev ifindex |
294 | * +sound:card0 subsystem:devname |
295 | * SUBSYSTEM=pci driver-core subsystem name |
296 | * |
297 | * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value |
298 | * follows directly after a '=' character. Every property is terminated by |
299 | * a '\0' character. The last property is not terminated. |
300 | * |
301 | * Example of a message structure: |
302 | * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec |
303 | * 0008 34 00 record is 52 bytes long |
304 | * 000a 0b 00 text is 11 bytes long |
305 | * 000c 1f 00 dictionary is 23 bytes long |
306 | * 000e 03 00 LOG_KERN (facility) LOG_ERR (level) |
307 | * 0010 69 74 27 73 20 61 20 6c "it's a l" |
308 | * 69 6e 65 "ine" |
309 | * 001b 44 45 56 49 43 "DEVIC" |
310 | * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D" |
311 | * 52 49 56 45 52 3d 62 75 "RIVER=bu" |
312 | * 67 "g" |
313 | * 0032 00 00 00 padding to next message header |
314 | * |
315 | * The 'struct printk_log' buffer header must never be directly exported to |
316 | * userspace, it is a kernel-private implementation detail that might |
317 | * need to be changed in the future, when the requirements change. |
318 | * |
319 | * /dev/kmsg exports the structured data in the following line format: |
320 | * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" |
321 | * |
322 | * Users of the export format should ignore possible additional values |
323 | * separated by ',', and find the message after the ';' character. |
324 | * |
325 | * The optional key/value pairs are attached as continuation lines starting |
326 | * with a space character and terminated by a newline. All possible |
327 | * non-prinatable characters are escaped in the "\xff" notation. |
328 | */ |
329 | |
330 | enum log_flags { |
331 | LOG_NOCONS = 1, /* already flushed, do not print to console */ |
332 | LOG_NEWLINE = 2, /* text ended with a newline */ |
333 | LOG_PREFIX = 4, /* text started with a prefix */ |
334 | LOG_CONT = 8, /* text is a fragment of a continuation line */ |
335 | }; |
336 | |
337 | struct printk_log { |
338 | u64 ts_nsec; /* timestamp in nanoseconds */ |
339 | u16 len; /* length of entire record */ |
340 | u16 text_len; /* length of text buffer */ |
341 | u16 dict_len; /* length of dictionary buffer */ |
342 | u8 facility; /* syslog facility */ |
343 | u8 flags:5; /* internal record flags */ |
344 | u8 level:3; /* syslog level */ |
345 | #ifdef CONFIG_AMLOGIC_DRIVER |
346 | int cpu; |
347 | #endif |
348 | } |
349 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
350 | __packed __aligned(4) |
351 | #endif |
352 | ; |
353 | |
354 | /* |
355 | * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken |
356 | * within the scheduler's rq lock. It must be released before calling |
357 | * console_unlock() or anything else that might wake up a process. |
358 | */ |
359 | DEFINE_RAW_SPINLOCK(logbuf_lock); |
360 | |
361 | #ifdef CONFIG_PRINTK |
362 | DECLARE_WAIT_QUEUE_HEAD(log_wait); |
363 | #ifdef CONFIG_AMLOGIC_DRIVER |
364 | static int current_cpu; |
365 | #endif |
366 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ |
367 | static u64 syslog_seq; |
368 | static u32 syslog_idx; |
369 | static enum log_flags syslog_prev; |
370 | static size_t syslog_partial; |
371 | |
372 | /* index and sequence number of the first record stored in the buffer */ |
373 | static u64 log_first_seq; |
374 | static u32 log_first_idx; |
375 | |
376 | /* index and sequence number of the next record to store in the buffer */ |
377 | static u64 log_next_seq; |
378 | static u32 log_next_idx; |
379 | |
380 | /* the next printk record to write to the console */ |
381 | static u64 console_seq; |
382 | static u32 console_idx; |
383 | static enum log_flags console_prev; |
384 | |
385 | /* the next printk record to read after the last 'clear' command */ |
386 | static u64 clear_seq; |
387 | static u32 clear_idx; |
388 | |
389 | #define PREFIX_MAX 32 |
390 | #define LOG_LINE_MAX (1024 - PREFIX_MAX) |
391 | |
392 | #define LOG_LEVEL(v) ((v) & 0x07) |
393 | #define LOG_FACILITY(v) ((v) >> 3 & 0xff) |
394 | |
395 | /* record buffer */ |
396 | #define LOG_ALIGN __alignof__(struct printk_log) |
397 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) |
398 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); |
399 | static char *log_buf = __log_buf; |
400 | static u32 log_buf_len = __LOG_BUF_LEN; |
401 | |
402 | /* Return log buffer address */ |
403 | char *log_buf_addr_get(void) |
404 | { |
405 | return log_buf; |
406 | } |
407 | |
408 | /* Return log buffer size */ |
409 | u32 log_buf_len_get(void) |
410 | { |
411 | return log_buf_len; |
412 | } |
413 | |
414 | /* human readable text of the record */ |
415 | static char *log_text(const struct printk_log *msg) |
416 | { |
417 | return (char *)msg + sizeof(struct printk_log); |
418 | } |
419 | |
420 | /* optional key/value pair dictionary attached to the record */ |
421 | static char *log_dict(const struct printk_log *msg) |
422 | { |
423 | return (char *)msg + sizeof(struct printk_log) + msg->text_len; |
424 | } |
425 | |
426 | /* get record by index; idx must point to valid msg */ |
427 | static struct printk_log *log_from_idx(u32 idx) |
428 | { |
429 | struct printk_log *msg = (struct printk_log *)(log_buf + idx); |
430 | |
431 | /* |
432 | * A length == 0 record is the end of buffer marker. Wrap around and |
433 | * read the message at the start of the buffer. |
434 | */ |
435 | if (!msg->len) |
436 | return (struct printk_log *)log_buf; |
437 | return msg; |
438 | } |
439 | |
440 | /* get next record; idx must point to valid msg */ |
441 | static u32 log_next(u32 idx) |
442 | { |
443 | struct printk_log *msg = (struct printk_log *)(log_buf + idx); |
444 | |
445 | /* length == 0 indicates the end of the buffer; wrap */ |
446 | /* |
447 | * A length == 0 record is the end of buffer marker. Wrap around and |
448 | * read the message at the start of the buffer as *this* one, and |
449 | * return the one after that. |
450 | */ |
451 | if (!msg->len) { |
452 | msg = (struct printk_log *)log_buf; |
453 | return msg->len; |
454 | } |
455 | return idx + msg->len; |
456 | } |
457 | |
458 | /* |
459 | * Check whether there is enough free space for the given message. |
460 | * |
461 | * The same values of first_idx and next_idx mean that the buffer |
462 | * is either empty or full. |
463 | * |
464 | * If the buffer is empty, we must respect the position of the indexes. |
465 | * They cannot be reset to the beginning of the buffer. |
466 | */ |
467 | static int logbuf_has_space(u32 msg_size, bool empty) |
468 | { |
469 | u32 free; |
470 | |
471 | if (log_next_idx > log_first_idx || empty) |
472 | free = max(log_buf_len - log_next_idx, log_first_idx); |
473 | else |
474 | free = log_first_idx - log_next_idx; |
475 | |
476 | /* |
477 | * We need space also for an empty header that signalizes wrapping |
478 | * of the buffer. |
479 | */ |
480 | return free >= msg_size + sizeof(struct printk_log); |
481 | } |
482 | |
483 | static int log_make_free_space(u32 msg_size) |
484 | { |
485 | while (log_first_seq < log_next_seq && |
486 | !logbuf_has_space(msg_size, false)) { |
487 | /* drop old messages until we have enough contiguous space */ |
488 | log_first_idx = log_next(log_first_idx); |
489 | log_first_seq++; |
490 | } |
491 | |
492 | if (clear_seq < log_first_seq) { |
493 | clear_seq = log_first_seq; |
494 | clear_idx = log_first_idx; |
495 | } |
496 | |
497 | /* sequence numbers are equal, so the log buffer is empty */ |
498 | if (logbuf_has_space(msg_size, log_first_seq == log_next_seq)) |
499 | return 0; |
500 | |
501 | return -ENOMEM; |
502 | } |
503 | |
504 | /* compute the message size including the padding bytes */ |
505 | static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len) |
506 | { |
507 | u32 size; |
508 | |
509 | size = sizeof(struct printk_log) + text_len + dict_len; |
510 | *pad_len = (-size) & (LOG_ALIGN - 1); |
511 | size += *pad_len; |
512 | |
513 | return size; |
514 | } |
515 | |
516 | /* |
517 | * Define how much of the log buffer we could take at maximum. The value |
518 | * must be greater than two. Note that only half of the buffer is available |
519 | * when the index points to the middle. |
520 | */ |
521 | #define MAX_LOG_TAKE_PART 4 |
522 | static const char trunc_msg[] = "<truncated>"; |
523 | |
524 | static u32 truncate_msg(u16 *text_len, u16 *trunc_msg_len, |
525 | u16 *dict_len, u32 *pad_len) |
526 | { |
527 | /* |
528 | * The message should not take the whole buffer. Otherwise, it might |
529 | * get removed too soon. |
530 | */ |
531 | u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART; |
532 | if (*text_len > max_text_len) |
533 | *text_len = max_text_len; |
534 | /* enable the warning message */ |
535 | *trunc_msg_len = strlen(trunc_msg); |
536 | /* disable the "dict" completely */ |
537 | *dict_len = 0; |
538 | /* compute the size again, count also the warning message */ |
539 | return msg_used_size(*text_len + *trunc_msg_len, 0, pad_len); |
540 | } |
541 | |
542 | /* insert record into the buffer, discard old ones, update heads */ |
543 | static int log_store(int facility, int level, |
544 | enum log_flags flags, u64 ts_nsec, |
545 | const char *dict, u16 dict_len, |
546 | const char *text, u16 text_len) |
547 | { |
548 | struct printk_log *msg; |
549 | u32 size, pad_len; |
550 | u16 trunc_msg_len = 0; |
551 | |
552 | /* number of '\0' padding bytes to next message */ |
553 | size = msg_used_size(text_len, dict_len, &pad_len); |
554 | |
555 | if (log_make_free_space(size)) { |
556 | /* truncate the message if it is too long for empty buffer */ |
557 | size = truncate_msg(&text_len, &trunc_msg_len, |
558 | &dict_len, &pad_len); |
559 | /* survive when the log buffer is too small for trunc_msg */ |
560 | if (log_make_free_space(size)) |
561 | return 0; |
562 | } |
563 | |
564 | if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) { |
565 | /* |
566 | * This message + an additional empty header does not fit |
567 | * at the end of the buffer. Add an empty header with len == 0 |
568 | * to signify a wrap around. |
569 | */ |
570 | memset(log_buf + log_next_idx, 0, sizeof(struct printk_log)); |
571 | log_next_idx = 0; |
572 | } |
573 | |
574 | /* fill message */ |
575 | msg = (struct printk_log *)(log_buf + log_next_idx); |
576 | memcpy(log_text(msg), text, text_len); |
577 | msg->text_len = text_len; |
578 | if (trunc_msg_len) { |
579 | memcpy(log_text(msg) + text_len, trunc_msg, trunc_msg_len); |
580 | msg->text_len += trunc_msg_len; |
581 | } |
582 | memcpy(log_dict(msg), dict, dict_len); |
583 | msg->dict_len = dict_len; |
584 | msg->facility = facility; |
585 | msg->level = level & 7; |
586 | msg->flags = flags & 0x1f; |
587 | #ifdef CONFIG_AMLOGIC_DRIVER |
588 | msg->cpu = smp_processor_id(); |
589 | #endif |
590 | if (ts_nsec > 0) |
591 | msg->ts_nsec = ts_nsec; |
592 | else |
593 | msg->ts_nsec = local_clock(); |
594 | memset(log_dict(msg) + dict_len, 0, pad_len); |
595 | msg->len = size; |
596 | |
597 | /* insert message */ |
598 | log_next_idx += msg->len; |
599 | log_next_seq++; |
600 | |
601 | return msg->text_len; |
602 | } |
603 | |
604 | int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); |
605 | |
606 | static int syslog_action_restricted(int type) |
607 | { |
608 | if (dmesg_restrict) |
609 | return 1; |
610 | /* |
611 | * Unless restricted, we allow "read all" and "get buffer size" |
612 | * for everybody. |
613 | */ |
614 | return type != SYSLOG_ACTION_READ_ALL && |
615 | type != SYSLOG_ACTION_SIZE_BUFFER; |
616 | } |
617 | |
618 | int check_syslog_permissions(int type, int source) |
619 | { |
620 | /* |
621 | * If this is from /proc/kmsg and we've already opened it, then we've |
622 | * already done the capabilities checks at open time. |
623 | */ |
624 | if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN) |
625 | goto ok; |
626 | |
627 | if (syslog_action_restricted(type)) { |
628 | if (capable(CAP_SYSLOG)) |
629 | goto ok; |
630 | /* |
631 | * For historical reasons, accept CAP_SYS_ADMIN too, with |
632 | * a warning. |
633 | */ |
634 | if (capable(CAP_SYS_ADMIN)) { |
635 | pr_warn_once("%s (%d): Attempt to access syslog with " |
636 | "CAP_SYS_ADMIN but no CAP_SYSLOG " |
637 | "(deprecated).\n", |
638 | current->comm, task_pid_nr(current)); |
639 | goto ok; |
640 | } |
641 | return -EPERM; |
642 | } |
643 | ok: |
644 | return security_syslog(type); |
645 | } |
646 | EXPORT_SYMBOL_GPL(check_syslog_permissions); |
647 | |
648 | static void append_char(char **pp, char *e, char c) |
649 | { |
650 | if (*pp < e) |
651 | *(*pp)++ = c; |
652 | } |
653 | |
654 | static ssize_t msg_print_ext_header(char *buf, size_t size, |
655 | struct printk_log *msg, u64 seq, |
656 | enum log_flags prev_flags) |
657 | { |
658 | u64 ts_usec = msg->ts_nsec; |
659 | char cont = '-'; |
660 | |
661 | do_div(ts_usec, 1000); |
662 | |
663 | /* |
664 | * If we couldn't merge continuation line fragments during the print, |
665 | * export the stored flags to allow an optional external merge of the |
666 | * records. Merging the records isn't always neccessarily correct, like |
667 | * when we hit a race during printing. In most cases though, it produces |
668 | * better readable output. 'c' in the record flags mark the first |
669 | * fragment of a line, '+' the following. |
670 | */ |
671 | if (msg->flags & LOG_CONT) |
672 | cont = (prev_flags & LOG_CONT) ? '+' : 'c'; |
673 | |
674 | return scnprintf(buf, size, "%u,%llu,%llu,%c;", |
675 | (msg->facility << 3) | msg->level, seq, ts_usec, cont); |
676 | } |
677 | |
678 | static ssize_t msg_print_ext_body(char *buf, size_t size, |
679 | char *dict, size_t dict_len, |
680 | char *text, size_t text_len) |
681 | { |
682 | char *p = buf, *e = buf + size; |
683 | size_t i; |
684 | |
685 | /* escape non-printable characters */ |
686 | for (i = 0; i < text_len; i++) { |
687 | unsigned char c = text[i]; |
688 | |
689 | if (c < ' ' || c >= 127 || c == '\\') |
690 | p += scnprintf(p, e - p, "\\x%02x", c); |
691 | else |
692 | append_char(&p, e, c); |
693 | } |
694 | append_char(&p, e, '\n'); |
695 | |
696 | if (dict_len) { |
697 | bool line = true; |
698 | |
699 | for (i = 0; i < dict_len; i++) { |
700 | unsigned char c = dict[i]; |
701 | |
702 | if (line) { |
703 | append_char(&p, e, ' '); |
704 | line = false; |
705 | } |
706 | |
707 | if (c == '\0') { |
708 | append_char(&p, e, '\n'); |
709 | line = true; |
710 | continue; |
711 | } |
712 | |
713 | if (c < ' ' || c >= 127 || c == '\\') { |
714 | p += scnprintf(p, e - p, "\\x%02x", c); |
715 | continue; |
716 | } |
717 | |
718 | append_char(&p, e, c); |
719 | } |
720 | append_char(&p, e, '\n'); |
721 | } |
722 | |
723 | return p - buf; |
724 | } |
725 | |
726 | /* /dev/kmsg - userspace message inject/listen interface */ |
727 | struct devkmsg_user { |
728 | u64 seq; |
729 | u32 idx; |
730 | enum log_flags prev; |
731 | struct ratelimit_state rs; |
732 | struct mutex lock; |
733 | char buf[CONSOLE_EXT_LOG_MAX]; |
734 | }; |
735 | |
736 | static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) |
737 | { |
738 | char *buf, *line; |
739 | int level = default_message_loglevel; |
740 | int facility = 1; /* LOG_USER */ |
741 | struct file *file = iocb->ki_filp; |
742 | struct devkmsg_user *user = file->private_data; |
743 | size_t len = iov_iter_count(from); |
744 | ssize_t ret = len; |
745 | |
746 | if (!user || len > LOG_LINE_MAX) |
747 | return -EINVAL; |
748 | |
749 | /* Ignore when user logging is disabled. */ |
750 | if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) |
751 | return len; |
752 | |
753 | /* Ratelimit when not explicitly enabled. */ |
754 | if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) { |
755 | if (!___ratelimit(&user->rs, current->comm)) |
756 | return ret; |
757 | } |
758 | |
759 | buf = kmalloc(len+1, GFP_KERNEL); |
760 | if (buf == NULL) |
761 | return -ENOMEM; |
762 | |
763 | buf[len] = '\0'; |
764 | if (copy_from_iter(buf, len, from) != len) { |
765 | kfree(buf); |
766 | return -EFAULT; |
767 | } |
768 | |
769 | /* |
770 | * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace |
771 | * the decimal value represents 32bit, the lower 3 bit are the log |
772 | * level, the rest are the log facility. |
773 | * |
774 | * If no prefix or no userspace facility is specified, we |
775 | * enforce LOG_USER, to be able to reliably distinguish |
776 | * kernel-generated messages from userspace-injected ones. |
777 | */ |
778 | line = buf; |
779 | if (line[0] == '<') { |
780 | char *endp = NULL; |
781 | unsigned int u; |
782 | |
783 | u = simple_strtoul(line + 1, &endp, 10); |
784 | if (endp && endp[0] == '>') { |
785 | level = LOG_LEVEL(u); |
786 | if (LOG_FACILITY(u) != 0) |
787 | facility = LOG_FACILITY(u); |
788 | endp++; |
789 | len -= endp - line; |
790 | line = endp; |
791 | } |
792 | } |
793 | |
794 | printk_emit(facility, level, NULL, 0, "%s", line); |
795 | kfree(buf); |
796 | return ret; |
797 | } |
798 | |
799 | static ssize_t devkmsg_read(struct file *file, char __user *buf, |
800 | size_t count, loff_t *ppos) |
801 | { |
802 | struct devkmsg_user *user = file->private_data; |
803 | struct printk_log *msg; |
804 | size_t len; |
805 | ssize_t ret; |
806 | |
807 | if (!user) |
808 | return -EBADF; |
809 | |
810 | ret = mutex_lock_interruptible(&user->lock); |
811 | if (ret) |
812 | return ret; |
813 | raw_spin_lock_irq(&logbuf_lock); |
814 | while (user->seq == log_next_seq) { |
815 | if (file->f_flags & O_NONBLOCK) { |
816 | ret = -EAGAIN; |
817 | raw_spin_unlock_irq(&logbuf_lock); |
818 | goto out; |
819 | } |
820 | |
821 | raw_spin_unlock_irq(&logbuf_lock); |
822 | ret = wait_event_interruptible(log_wait, |
823 | user->seq != log_next_seq); |
824 | if (ret) |
825 | goto out; |
826 | raw_spin_lock_irq(&logbuf_lock); |
827 | } |
828 | |
829 | if (user->seq < log_first_seq) { |
830 | /* our last seen message is gone, return error and reset */ |
831 | user->idx = log_first_idx; |
832 | user->seq = log_first_seq; |
833 | ret = -EPIPE; |
834 | raw_spin_unlock_irq(&logbuf_lock); |
835 | goto out; |
836 | } |
837 | |
838 | msg = log_from_idx(user->idx); |
839 | len = msg_print_ext_header(user->buf, sizeof(user->buf), |
840 | msg, user->seq, user->prev); |
841 | len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len, |
842 | log_dict(msg), msg->dict_len, |
843 | log_text(msg), msg->text_len); |
844 | |
845 | user->prev = msg->flags; |
846 | user->idx = log_next(user->idx); |
847 | user->seq++; |
848 | raw_spin_unlock_irq(&logbuf_lock); |
849 | |
850 | if (len > count) { |
851 | ret = -EINVAL; |
852 | goto out; |
853 | } |
854 | |
855 | if (copy_to_user(buf, user->buf, len)) { |
856 | ret = -EFAULT; |
857 | goto out; |
858 | } |
859 | ret = len; |
860 | out: |
861 | mutex_unlock(&user->lock); |
862 | return ret; |
863 | } |
864 | |
865 | static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) |
866 | { |
867 | struct devkmsg_user *user = file->private_data; |
868 | loff_t ret = 0; |
869 | |
870 | if (!user) |
871 | return -EBADF; |
872 | if (offset) |
873 | return -ESPIPE; |
874 | |
875 | raw_spin_lock_irq(&logbuf_lock); |
876 | switch (whence) { |
877 | case SEEK_SET: |
878 | /* the first record */ |
879 | user->idx = log_first_idx; |
880 | user->seq = log_first_seq; |
881 | break; |
882 | case SEEK_DATA: |
883 | /* |
884 | * The first record after the last SYSLOG_ACTION_CLEAR, |
885 | * like issued by 'dmesg -c'. Reading /dev/kmsg itself |
886 | * changes no global state, and does not clear anything. |
887 | */ |
888 | user->idx = clear_idx; |
889 | user->seq = clear_seq; |
890 | break; |
891 | case SEEK_END: |
892 | /* after the last record */ |
893 | user->idx = log_next_idx; |
894 | user->seq = log_next_seq; |
895 | break; |
896 | default: |
897 | ret = -EINVAL; |
898 | } |
899 | raw_spin_unlock_irq(&logbuf_lock); |
900 | return ret; |
901 | } |
902 | |
903 | static unsigned int devkmsg_poll(struct file *file, poll_table *wait) |
904 | { |
905 | struct devkmsg_user *user = file->private_data; |
906 | int ret = 0; |
907 | |
908 | if (!user) |
909 | return POLLERR|POLLNVAL; |
910 | |
911 | poll_wait(file, &log_wait, wait); |
912 | |
913 | raw_spin_lock_irq(&logbuf_lock); |
914 | if (user->seq < log_next_seq) { |
915 | /* return error when data has vanished underneath us */ |
916 | if (user->seq < log_first_seq) |
917 | ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI; |
918 | else |
919 | ret = POLLIN|POLLRDNORM; |
920 | } |
921 | raw_spin_unlock_irq(&logbuf_lock); |
922 | |
923 | return ret; |
924 | } |
925 | |
926 | static int devkmsg_open(struct inode *inode, struct file *file) |
927 | { |
928 | struct devkmsg_user *user; |
929 | int err; |
930 | |
931 | if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) |
932 | return -EPERM; |
933 | |
934 | /* write-only does not need any file context */ |
935 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { |
936 | err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, |
937 | SYSLOG_FROM_READER); |
938 | if (err) |
939 | return err; |
940 | } |
941 | |
942 | user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); |
943 | if (!user) |
944 | return -ENOMEM; |
945 | |
946 | ratelimit_default_init(&user->rs); |
947 | ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE); |
948 | |
949 | mutex_init(&user->lock); |
950 | |
951 | raw_spin_lock_irq(&logbuf_lock); |
952 | user->idx = log_first_idx; |
953 | user->seq = log_first_seq; |
954 | raw_spin_unlock_irq(&logbuf_lock); |
955 | |
956 | file->private_data = user; |
957 | return 0; |
958 | } |
959 | |
960 | static int devkmsg_release(struct inode *inode, struct file *file) |
961 | { |
962 | struct devkmsg_user *user = file->private_data; |
963 | |
964 | if (!user) |
965 | return 0; |
966 | |
967 | ratelimit_state_exit(&user->rs); |
968 | |
969 | mutex_destroy(&user->lock); |
970 | kfree(user); |
971 | return 0; |
972 | } |
973 | |
974 | const struct file_operations kmsg_fops = { |
975 | .open = devkmsg_open, |
976 | .read = devkmsg_read, |
977 | .write_iter = devkmsg_write, |
978 | .llseek = devkmsg_llseek, |
979 | .poll = devkmsg_poll, |
980 | .release = devkmsg_release, |
981 | }; |
982 | |
983 | #ifdef CONFIG_KEXEC_CORE |
984 | /* |
985 | * This appends the listed symbols to /proc/vmcore |
986 | * |
987 | * /proc/vmcore is used by various utilities, like crash and makedumpfile to |
988 | * obtain access to symbols that are otherwise very difficult to locate. These |
989 | * symbols are specifically used so that utilities can access and extract the |
990 | * dmesg log from a vmcore file after a crash. |
991 | */ |
992 | void log_buf_kexec_setup(void) |
993 | { |
994 | VMCOREINFO_SYMBOL(log_buf); |
995 | VMCOREINFO_SYMBOL(log_buf_len); |
996 | VMCOREINFO_SYMBOL(log_first_idx); |
997 | VMCOREINFO_SYMBOL(clear_idx); |
998 | VMCOREINFO_SYMBOL(log_next_idx); |
999 | /* |
1000 | * Export struct printk_log size and field offsets. User space tools can |
1001 | * parse it and detect any changes to structure down the line. |
1002 | */ |
1003 | VMCOREINFO_STRUCT_SIZE(printk_log); |
1004 | VMCOREINFO_OFFSET(printk_log, ts_nsec); |
1005 | VMCOREINFO_OFFSET(printk_log, len); |
1006 | VMCOREINFO_OFFSET(printk_log, text_len); |
1007 | VMCOREINFO_OFFSET(printk_log, dict_len); |
1008 | } |
1009 | #endif |
1010 | |
1011 | /* requested log_buf_len from kernel cmdline */ |
1012 | static unsigned long __initdata new_log_buf_len; |
1013 | |
1014 | /* we practice scaling the ring buffer by powers of 2 */ |
1015 | static void __init log_buf_len_update(unsigned size) |
1016 | { |
1017 | if (size) |
1018 | size = roundup_pow_of_two(size); |
1019 | if (size > log_buf_len) |
1020 | new_log_buf_len = size; |
1021 | } |
1022 | |
1023 | /* save requested log_buf_len since it's too early to process it */ |
1024 | static int __init log_buf_len_setup(char *str) |
1025 | { |
1026 | unsigned size = memparse(str, &str); |
1027 | |
1028 | log_buf_len_update(size); |
1029 | |
1030 | return 0; |
1031 | } |
1032 | early_param("log_buf_len", log_buf_len_setup); |
1033 | |
1034 | #ifdef CONFIG_SMP |
1035 | #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT) |
1036 | |
1037 | static void __init log_buf_add_cpu(void) |
1038 | { |
1039 | unsigned int cpu_extra; |
1040 | |
1041 | /* |
1042 | * archs should set up cpu_possible_bits properly with |
1043 | * set_cpu_possible() after setup_arch() but just in |
1044 | * case lets ensure this is valid. |
1045 | */ |
1046 | if (num_possible_cpus() == 1) |
1047 | return; |
1048 | |
1049 | cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN; |
1050 | |
1051 | /* by default this will only continue through for large > 64 CPUs */ |
1052 | if (cpu_extra <= __LOG_BUF_LEN / 2) |
1053 | return; |
1054 | |
1055 | pr_info("log_buf_len individual max cpu contribution: %d bytes\n", |
1056 | __LOG_CPU_MAX_BUF_LEN); |
1057 | pr_info("log_buf_len total cpu_extra contributions: %d bytes\n", |
1058 | cpu_extra); |
1059 | pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN); |
1060 | |
1061 | log_buf_len_update(cpu_extra + __LOG_BUF_LEN); |
1062 | } |
1063 | #else /* !CONFIG_SMP */ |
1064 | static inline void log_buf_add_cpu(void) {} |
1065 | #endif /* CONFIG_SMP */ |
1066 | |
1067 | void __init setup_log_buf(int early) |
1068 | { |
1069 | unsigned long flags; |
1070 | char *new_log_buf; |
1071 | int free; |
1072 | |
1073 | if (log_buf != __log_buf) |
1074 | return; |
1075 | |
1076 | if (!early && !new_log_buf_len) |
1077 | log_buf_add_cpu(); |
1078 | |
1079 | if (!new_log_buf_len) |
1080 | return; |
1081 | |
1082 | if (early) { |
1083 | new_log_buf = |
1084 | memblock_virt_alloc(new_log_buf_len, LOG_ALIGN); |
1085 | } else { |
1086 | new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len, |
1087 | LOG_ALIGN); |
1088 | } |
1089 | |
1090 | if (unlikely(!new_log_buf)) { |
1091 | pr_err("log_buf_len: %ld bytes not available\n", |
1092 | new_log_buf_len); |
1093 | return; |
1094 | } |
1095 | |
1096 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
1097 | log_buf_len = new_log_buf_len; |
1098 | log_buf = new_log_buf; |
1099 | new_log_buf_len = 0; |
1100 | free = __LOG_BUF_LEN - log_next_idx; |
1101 | memcpy(log_buf, __log_buf, __LOG_BUF_LEN); |
1102 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
1103 | |
1104 | pr_info("log_buf_len: %d bytes\n", log_buf_len); |
1105 | pr_info("early log buf free: %d(%d%%)\n", |
1106 | free, (free * 100) / __LOG_BUF_LEN); |
1107 | } |
1108 | |
1109 | static bool __read_mostly ignore_loglevel; |
1110 | |
1111 | static int __init ignore_loglevel_setup(char *str) |
1112 | { |
1113 | ignore_loglevel = true; |
1114 | pr_info("debug: ignoring loglevel setting.\n"); |
1115 | |
1116 | return 0; |
1117 | } |
1118 | |
1119 | early_param("ignore_loglevel", ignore_loglevel_setup); |
1120 | module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); |
1121 | MODULE_PARM_DESC(ignore_loglevel, |
1122 | "ignore loglevel setting (prints all kernel messages to the console)"); |
1123 | |
1124 | static bool suppress_message_printing(int level) |
1125 | { |
1126 | return (level >= console_loglevel && !ignore_loglevel); |
1127 | } |
1128 | |
1129 | #ifdef CONFIG_BOOT_PRINTK_DELAY |
1130 | |
1131 | static int boot_delay; /* msecs delay after each printk during bootup */ |
1132 | static unsigned long long loops_per_msec; /* based on boot_delay */ |
1133 | |
1134 | static int __init boot_delay_setup(char *str) |
1135 | { |
1136 | unsigned long lpj; |
1137 | |
1138 | lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ |
1139 | loops_per_msec = (unsigned long long)lpj / 1000 * HZ; |
1140 | |
1141 | get_option(&str, &boot_delay); |
1142 | if (boot_delay > 10 * 1000) |
1143 | boot_delay = 0; |
1144 | |
1145 | pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " |
1146 | "HZ: %d, loops_per_msec: %llu\n", |
1147 | boot_delay, preset_lpj, lpj, HZ, loops_per_msec); |
1148 | return 0; |
1149 | } |
1150 | early_param("boot_delay", boot_delay_setup); |
1151 | |
1152 | static void boot_delay_msec(int level) |
1153 | { |
1154 | unsigned long long k; |
1155 | unsigned long timeout; |
1156 | |
1157 | if ((boot_delay == 0 || system_state != SYSTEM_BOOTING) |
1158 | || suppress_message_printing(level)) { |
1159 | return; |
1160 | } |
1161 | |
1162 | k = (unsigned long long)loops_per_msec * boot_delay; |
1163 | |
1164 | timeout = jiffies + msecs_to_jiffies(boot_delay); |
1165 | while (k) { |
1166 | k--; |
1167 | cpu_relax(); |
1168 | /* |
1169 | * use (volatile) jiffies to prevent |
1170 | * compiler reduction; loop termination via jiffies |
1171 | * is secondary and may or may not happen. |
1172 | */ |
1173 | if (time_after(jiffies, timeout)) |
1174 | break; |
1175 | touch_nmi_watchdog(); |
1176 | } |
1177 | } |
1178 | #else |
1179 | static inline void boot_delay_msec(int level) |
1180 | { |
1181 | } |
1182 | #endif |
1183 | |
1184 | static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME); |
1185 | module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); |
1186 | |
1187 | static size_t print_time(u64 ts, char *buf) |
1188 | { |
1189 | unsigned long rem_nsec; |
1190 | |
1191 | if (!printk_time) |
1192 | return 0; |
1193 | |
1194 | rem_nsec = do_div(ts, 1000000000); |
1195 | |
1196 | if (!buf) |
1197 | return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts); |
1198 | |
1199 | #if defined(CONFIG_SMP) && defined(CONFIG_AMLOGIC_DRIVER) |
1200 | return sprintf(buf, "[%5lu.%06lu@%d] ", |
1201 | (unsigned long)ts, rem_nsec / 1000, current_cpu); |
1202 | #else |
1203 | return sprintf(buf, "[%5lu.%06lu] ", |
1204 | (unsigned long)ts, rem_nsec / 1000); |
1205 | #endif |
1206 | } |
1207 | |
1208 | static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf) |
1209 | { |
1210 | size_t len = 0; |
1211 | unsigned int prefix = (msg->facility << 3) | msg->level; |
1212 | |
1213 | if (syslog) { |
1214 | if (buf) { |
1215 | len += sprintf(buf, "<%u>", prefix); |
1216 | } else { |
1217 | len += 3; |
1218 | if (prefix > 999) |
1219 | len += 3; |
1220 | else if (prefix > 99) |
1221 | len += 2; |
1222 | else if (prefix > 9) |
1223 | len++; |
1224 | } |
1225 | } |
1226 | #ifdef CONFIG_AMLOGIC_DRIVER |
1227 | current_cpu = msg->cpu; |
1228 | #endif |
1229 | len += print_time(msg->ts_nsec, buf ? buf + len : NULL); |
1230 | return len; |
1231 | } |
1232 | |
1233 | static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, |
1234 | bool syslog, char *buf, size_t size) |
1235 | { |
1236 | const char *text = log_text(msg); |
1237 | size_t text_size = msg->text_len; |
1238 | bool prefix = true; |
1239 | bool newline = true; |
1240 | size_t len = 0; |
1241 | |
1242 | if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX)) |
1243 | prefix = false; |
1244 | |
1245 | if (msg->flags & LOG_CONT) { |
1246 | if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE)) |
1247 | prefix = false; |
1248 | |
1249 | if (!(msg->flags & LOG_NEWLINE)) |
1250 | newline = false; |
1251 | } |
1252 | |
1253 | do { |
1254 | const char *next = memchr(text, '\n', text_size); |
1255 | size_t text_len; |
1256 | |
1257 | if (next) { |
1258 | text_len = next - text; |
1259 | next++; |
1260 | text_size -= next - text; |
1261 | } else { |
1262 | text_len = text_size; |
1263 | } |
1264 | |
1265 | if (buf) { |
1266 | if (print_prefix(msg, syslog, NULL) + |
1267 | text_len + 1 >= size - len) |
1268 | break; |
1269 | |
1270 | if (prefix) |
1271 | len += print_prefix(msg, syslog, buf + len); |
1272 | memcpy(buf + len, text, text_len); |
1273 | len += text_len; |
1274 | if (next || newline) |
1275 | buf[len++] = '\n'; |
1276 | } else { |
1277 | /* SYSLOG_ACTION_* buffer size only calculation */ |
1278 | if (prefix) |
1279 | len += print_prefix(msg, syslog, NULL); |
1280 | len += text_len; |
1281 | if (next || newline) |
1282 | len++; |
1283 | } |
1284 | |
1285 | prefix = true; |
1286 | text = next; |
1287 | } while (text); |
1288 | |
1289 | return len; |
1290 | } |
1291 | |
1292 | static int syslog_print(char __user *buf, int size) |
1293 | { |
1294 | char *text; |
1295 | struct printk_log *msg; |
1296 | int len = 0; |
1297 | |
1298 | text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); |
1299 | if (!text) |
1300 | return -ENOMEM; |
1301 | |
1302 | while (size > 0) { |
1303 | size_t n; |
1304 | size_t skip; |
1305 | |
1306 | raw_spin_lock_irq(&logbuf_lock); |
1307 | if (syslog_seq < log_first_seq) { |
1308 | /* messages are gone, move to first one */ |
1309 | syslog_seq = log_first_seq; |
1310 | syslog_idx = log_first_idx; |
1311 | syslog_prev = 0; |
1312 | syslog_partial = 0; |
1313 | } |
1314 | if (syslog_seq == log_next_seq) { |
1315 | raw_spin_unlock_irq(&logbuf_lock); |
1316 | break; |
1317 | } |
1318 | |
1319 | skip = syslog_partial; |
1320 | msg = log_from_idx(syslog_idx); |
1321 | n = msg_print_text(msg, syslog_prev, true, text, |
1322 | LOG_LINE_MAX + PREFIX_MAX); |
1323 | if (n - syslog_partial <= size) { |
1324 | /* message fits into buffer, move forward */ |
1325 | syslog_idx = log_next(syslog_idx); |
1326 | syslog_seq++; |
1327 | syslog_prev = msg->flags; |
1328 | n -= syslog_partial; |
1329 | syslog_partial = 0; |
1330 | } else if (!len){ |
1331 | /* partial read(), remember position */ |
1332 | n = size; |
1333 | syslog_partial += n; |
1334 | } else |
1335 | n = 0; |
1336 | raw_spin_unlock_irq(&logbuf_lock); |
1337 | |
1338 | if (!n) |
1339 | break; |
1340 | |
1341 | if (copy_to_user(buf, text + skip, n)) { |
1342 | if (!len) |
1343 | len = -EFAULT; |
1344 | break; |
1345 | } |
1346 | |
1347 | len += n; |
1348 | size -= n; |
1349 | buf += n; |
1350 | } |
1351 | |
1352 | kfree(text); |
1353 | return len; |
1354 | } |
1355 | |
1356 | static int syslog_print_all(char __user *buf, int size, bool clear) |
1357 | { |
1358 | char *text; |
1359 | int len = 0; |
1360 | |
1361 | text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); |
1362 | if (!text) |
1363 | return -ENOMEM; |
1364 | |
1365 | raw_spin_lock_irq(&logbuf_lock); |
1366 | if (buf) { |
1367 | u64 next_seq; |
1368 | u64 seq; |
1369 | u32 idx; |
1370 | enum log_flags prev; |
1371 | |
1372 | /* |
1373 | * Find first record that fits, including all following records, |
1374 | * into the user-provided buffer for this dump. |
1375 | */ |
1376 | seq = clear_seq; |
1377 | idx = clear_idx; |
1378 | prev = 0; |
1379 | while (seq < log_next_seq) { |
1380 | struct printk_log *msg = log_from_idx(idx); |
1381 | |
1382 | len += msg_print_text(msg, prev, true, NULL, 0); |
1383 | prev = msg->flags; |
1384 | idx = log_next(idx); |
1385 | seq++; |
1386 | } |
1387 | |
1388 | /* move first record forward until length fits into the buffer */ |
1389 | seq = clear_seq; |
1390 | idx = clear_idx; |
1391 | prev = 0; |
1392 | while (len > size && seq < log_next_seq) { |
1393 | struct printk_log *msg = log_from_idx(idx); |
1394 | |
1395 | len -= msg_print_text(msg, prev, true, NULL, 0); |
1396 | prev = msg->flags; |
1397 | idx = log_next(idx); |
1398 | seq++; |
1399 | } |
1400 | |
1401 | /* last message fitting into this dump */ |
1402 | next_seq = log_next_seq; |
1403 | |
1404 | len = 0; |
1405 | while (len >= 0 && seq < next_seq) { |
1406 | struct printk_log *msg = log_from_idx(idx); |
1407 | int textlen; |
1408 | |
1409 | textlen = msg_print_text(msg, prev, true, text, |
1410 | LOG_LINE_MAX + PREFIX_MAX); |
1411 | if (textlen < 0) { |
1412 | len = textlen; |
1413 | break; |
1414 | } |
1415 | idx = log_next(idx); |
1416 | seq++; |
1417 | prev = msg->flags; |
1418 | |
1419 | raw_spin_unlock_irq(&logbuf_lock); |
1420 | if (copy_to_user(buf + len, text, textlen)) |
1421 | len = -EFAULT; |
1422 | else |
1423 | len += textlen; |
1424 | raw_spin_lock_irq(&logbuf_lock); |
1425 | |
1426 | if (seq < log_first_seq) { |
1427 | /* messages are gone, move to next one */ |
1428 | seq = log_first_seq; |
1429 | idx = log_first_idx; |
1430 | prev = 0; |
1431 | } |
1432 | } |
1433 | } |
1434 | |
1435 | if (clear) { |
1436 | clear_seq = log_next_seq; |
1437 | clear_idx = log_next_idx; |
1438 | } |
1439 | raw_spin_unlock_irq(&logbuf_lock); |
1440 | |
1441 | kfree(text); |
1442 | return len; |
1443 | } |
1444 | |
1445 | int do_syslog(int type, char __user *buf, int len, int source) |
1446 | { |
1447 | bool clear = false; |
1448 | static int saved_console_loglevel = LOGLEVEL_DEFAULT; |
1449 | int error; |
1450 | |
1451 | error = check_syslog_permissions(type, source); |
1452 | if (error) |
1453 | goto out; |
1454 | |
1455 | switch (type) { |
1456 | case SYSLOG_ACTION_CLOSE: /* Close log */ |
1457 | break; |
1458 | case SYSLOG_ACTION_OPEN: /* Open log */ |
1459 | break; |
1460 | case SYSLOG_ACTION_READ: /* Read from log */ |
1461 | error = -EINVAL; |
1462 | if (!buf || len < 0) |
1463 | goto out; |
1464 | error = 0; |
1465 | if (!len) |
1466 | goto out; |
1467 | if (!access_ok(VERIFY_WRITE, buf, len)) { |
1468 | error = -EFAULT; |
1469 | goto out; |
1470 | } |
1471 | error = wait_event_interruptible(log_wait, |
1472 | syslog_seq != log_next_seq); |
1473 | if (error) |
1474 | goto out; |
1475 | error = syslog_print(buf, len); |
1476 | break; |
1477 | /* Read/clear last kernel messages */ |
1478 | case SYSLOG_ACTION_READ_CLEAR: |
1479 | clear = true; |
1480 | /* FALL THRU */ |
1481 | /* Read last kernel messages */ |
1482 | case SYSLOG_ACTION_READ_ALL: |
1483 | error = -EINVAL; |
1484 | if (!buf || len < 0) |
1485 | goto out; |
1486 | error = 0; |
1487 | if (!len) |
1488 | goto out; |
1489 | if (!access_ok(VERIFY_WRITE, buf, len)) { |
1490 | error = -EFAULT; |
1491 | goto out; |
1492 | } |
1493 | error = syslog_print_all(buf, len, clear); |
1494 | break; |
1495 | /* Clear ring buffer */ |
1496 | case SYSLOG_ACTION_CLEAR: |
1497 | syslog_print_all(NULL, 0, true); |
1498 | break; |
1499 | /* Disable logging to console */ |
1500 | case SYSLOG_ACTION_CONSOLE_OFF: |
1501 | if (saved_console_loglevel == LOGLEVEL_DEFAULT) |
1502 | saved_console_loglevel = console_loglevel; |
1503 | console_loglevel = minimum_console_loglevel; |
1504 | break; |
1505 | /* Enable logging to console */ |
1506 | case SYSLOG_ACTION_CONSOLE_ON: |
1507 | if (saved_console_loglevel != LOGLEVEL_DEFAULT) { |
1508 | console_loglevel = saved_console_loglevel; |
1509 | saved_console_loglevel = LOGLEVEL_DEFAULT; |
1510 | } |
1511 | break; |
1512 | /* Set level of messages printed to console */ |
1513 | case SYSLOG_ACTION_CONSOLE_LEVEL: |
1514 | error = -EINVAL; |
1515 | if (len < 1 || len > 8) |
1516 | goto out; |
1517 | if (len < minimum_console_loglevel) |
1518 | len = minimum_console_loglevel; |
1519 | console_loglevel = len; |
1520 | /* Implicitly re-enable logging to console */ |
1521 | saved_console_loglevel = LOGLEVEL_DEFAULT; |
1522 | error = 0; |
1523 | break; |
1524 | /* Number of chars in the log buffer */ |
1525 | case SYSLOG_ACTION_SIZE_UNREAD: |
1526 | raw_spin_lock_irq(&logbuf_lock); |
1527 | if (syslog_seq < log_first_seq) { |
1528 | /* messages are gone, move to first one */ |
1529 | syslog_seq = log_first_seq; |
1530 | syslog_idx = log_first_idx; |
1531 | syslog_prev = 0; |
1532 | syslog_partial = 0; |
1533 | } |
1534 | if (source == SYSLOG_FROM_PROC) { |
1535 | /* |
1536 | * Short-cut for poll(/"proc/kmsg") which simply checks |
1537 | * for pending data, not the size; return the count of |
1538 | * records, not the length. |
1539 | */ |
1540 | error = log_next_seq - syslog_seq; |
1541 | } else { |
1542 | u64 seq = syslog_seq; |
1543 | u32 idx = syslog_idx; |
1544 | enum log_flags prev = syslog_prev; |
1545 | |
1546 | error = 0; |
1547 | while (seq < log_next_seq) { |
1548 | struct printk_log *msg = log_from_idx(idx); |
1549 | |
1550 | error += msg_print_text(msg, prev, true, NULL, 0); |
1551 | idx = log_next(idx); |
1552 | seq++; |
1553 | prev = msg->flags; |
1554 | } |
1555 | error -= syslog_partial; |
1556 | } |
1557 | raw_spin_unlock_irq(&logbuf_lock); |
1558 | break; |
1559 | /* Size of the log buffer */ |
1560 | case SYSLOG_ACTION_SIZE_BUFFER: |
1561 | error = log_buf_len; |
1562 | break; |
1563 | default: |
1564 | error = -EINVAL; |
1565 | break; |
1566 | } |
1567 | out: |
1568 | return error; |
1569 | } |
1570 | |
1571 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) |
1572 | { |
1573 | return do_syslog(type, buf, len, SYSLOG_FROM_READER); |
1574 | } |
1575 | |
1576 | /* |
1577 | * Call the console drivers, asking them to write out |
1578 | * log_buf[start] to log_buf[end - 1]. |
1579 | * The console_lock must be held. |
1580 | */ |
1581 | static void call_console_drivers(int level, |
1582 | const char *ext_text, size_t ext_len, |
1583 | const char *text, size_t len) |
1584 | { |
1585 | struct console *con; |
1586 | |
1587 | trace_console_rcuidle(text, len); |
1588 | |
1589 | if (!console_drivers) |
1590 | return; |
1591 | |
1592 | for_each_console(con) { |
1593 | if (exclusive_console && con != exclusive_console) |
1594 | continue; |
1595 | if (!(con->flags & CON_ENABLED)) |
1596 | continue; |
1597 | if (!con->write) |
1598 | continue; |
1599 | if (!cpu_online(smp_processor_id()) && |
1600 | !(con->flags & CON_ANYTIME)) |
1601 | continue; |
1602 | if (con->flags & CON_EXTENDED) |
1603 | con->write(con, ext_text, ext_len); |
1604 | else |
1605 | con->write(con, text, len); |
1606 | } |
1607 | } |
1608 | |
1609 | /* |
1610 | * Zap console related locks when oopsing. |
1611 | * To leave time for slow consoles to print a full oops, |
1612 | * only zap at most once every 30 seconds. |
1613 | */ |
1614 | static void zap_locks(void) |
1615 | { |
1616 | static unsigned long oops_timestamp; |
1617 | |
1618 | if (time_after_eq(jiffies, oops_timestamp) && |
1619 | !time_after(jiffies, oops_timestamp + 30 * HZ)) |
1620 | return; |
1621 | |
1622 | oops_timestamp = jiffies; |
1623 | |
1624 | debug_locks_off(); |
1625 | /* If a crash is occurring, make sure we can't deadlock */ |
1626 | raw_spin_lock_init(&logbuf_lock); |
1627 | /* And make sure that we print immediately */ |
1628 | sema_init(&console_sem, 1); |
1629 | } |
1630 | |
1631 | int printk_delay_msec __read_mostly; |
1632 | |
1633 | static inline void printk_delay(void) |
1634 | { |
1635 | if (unlikely(printk_delay_msec)) { |
1636 | int m = printk_delay_msec; |
1637 | |
1638 | while (m--) { |
1639 | mdelay(1); |
1640 | touch_nmi_watchdog(); |
1641 | } |
1642 | } |
1643 | } |
1644 | |
1645 | /* |
1646 | * Continuation lines are buffered, and not committed to the record buffer |
1647 | * until the line is complete, or a race forces it. The line fragments |
1648 | * though, are printed immediately to the consoles to ensure everything has |
1649 | * reached the console in case of a kernel crash. |
1650 | */ |
1651 | static struct cont { |
1652 | char buf[LOG_LINE_MAX]; |
1653 | size_t len; /* length == 0 means unused buffer */ |
1654 | size_t cons; /* bytes written to console */ |
1655 | struct task_struct *owner; /* task of first print*/ |
1656 | u64 ts_nsec; /* time of first print */ |
1657 | u8 level; /* log level of first message */ |
1658 | u8 facility; /* log facility of first message */ |
1659 | enum log_flags flags; /* prefix, newline flags */ |
1660 | bool flushed:1; /* buffer sealed and committed */ |
1661 | } cont; |
1662 | |
1663 | static void cont_flush(void) |
1664 | { |
1665 | if (cont.flushed) |
1666 | return; |
1667 | if (cont.len == 0) |
1668 | return; |
1669 | if (cont.cons) { |
1670 | /* |
1671 | * If a fragment of this line was directly flushed to the |
1672 | * console; wait for the console to pick up the rest of the |
1673 | * line. LOG_NOCONS suppresses a duplicated output. |
1674 | */ |
1675 | log_store(cont.facility, cont.level, cont.flags | LOG_NOCONS, |
1676 | cont.ts_nsec, NULL, 0, cont.buf, cont.len); |
1677 | cont.flushed = true; |
1678 | } else { |
1679 | /* |
1680 | * If no fragment of this line ever reached the console, |
1681 | * just submit it to the store and free the buffer. |
1682 | */ |
1683 | log_store(cont.facility, cont.level, cont.flags, 0, |
1684 | NULL, 0, cont.buf, cont.len); |
1685 | cont.len = 0; |
1686 | } |
1687 | } |
1688 | |
1689 | static bool cont_add(int facility, int level, enum log_flags flags, const char *text, size_t len) |
1690 | { |
1691 | if (cont.len && cont.flushed) |
1692 | return false; |
1693 | |
1694 | /* |
1695 | * If ext consoles are present, flush and skip in-kernel |
1696 | * continuation. See nr_ext_console_drivers definition. Also, if |
1697 | * the line gets too long, split it up in separate records. |
1698 | */ |
1699 | if (nr_ext_console_drivers || cont.len + len > sizeof(cont.buf)) { |
1700 | cont_flush(); |
1701 | return false; |
1702 | } |
1703 | |
1704 | if (!cont.len) { |
1705 | cont.facility = facility; |
1706 | cont.level = level; |
1707 | cont.owner = current; |
1708 | cont.ts_nsec = local_clock(); |
1709 | cont.flags = flags; |
1710 | cont.cons = 0; |
1711 | cont.flushed = false; |
1712 | } |
1713 | |
1714 | memcpy(cont.buf + cont.len, text, len); |
1715 | cont.len += len; |
1716 | |
1717 | // The original flags come from the first line, |
1718 | // but later continuations can add a newline. |
1719 | if (flags & LOG_NEWLINE) { |
1720 | cont.flags |= LOG_NEWLINE; |
1721 | cont_flush(); |
1722 | } |
1723 | |
1724 | if (cont.len > (sizeof(cont.buf) * 80) / 100) |
1725 | cont_flush(); |
1726 | |
1727 | return true; |
1728 | } |
1729 | |
1730 | static size_t cont_print_text(char *text, size_t size) |
1731 | { |
1732 | size_t textlen = 0; |
1733 | size_t len; |
1734 | |
1735 | if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) { |
1736 | textlen += print_time(cont.ts_nsec, text); |
1737 | size -= textlen; |
1738 | } |
1739 | |
1740 | len = cont.len - cont.cons; |
1741 | if (len > 0) { |
1742 | if (len+1 > size) |
1743 | len = size-1; |
1744 | memcpy(text + textlen, cont.buf + cont.cons, len); |
1745 | textlen += len; |
1746 | cont.cons = cont.len; |
1747 | } |
1748 | |
1749 | if (cont.flushed) { |
1750 | if (cont.flags & LOG_NEWLINE) |
1751 | text[textlen++] = '\n'; |
1752 | /* got everything, release buffer */ |
1753 | cont.len = 0; |
1754 | } |
1755 | return textlen; |
1756 | } |
1757 | |
1758 | #define AML_LOSE_CONTLINE_DEF 1 |
1759 | |
1760 | static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len) |
1761 | { |
1762 | /* |
1763 | * If an earlier line was buffered, and we're a continuation |
1764 | * write from the same process, try to add it to the buffer. |
1765 | */ |
1766 | if (cont.len) { |
1767 | #if (AML_LOSE_CONTLINE_DEF == 1) |
1768 | if (cont.owner == current && !(lflags & LOG_PREFIX)) { |
1769 | #else |
1770 | if (cont.owner == current && (lflags & LOG_CONT)) { |
1771 | #endif |
1772 | if (cont_add(facility, level, lflags, text, text_len)) |
1773 | return text_len; |
1774 | } |
1775 | /* Otherwise, make sure it's flushed */ |
1776 | cont_flush(); |
1777 | } |
1778 | |
1779 | /* Skip empty continuation lines that couldn't be added - they just flush */ |
1780 | if (!text_len && (lflags & LOG_CONT)) |
1781 | return 0; |
1782 | |
1783 | /* If it doesn't end in a newline, try to buffer the current line */ |
1784 | if (!(lflags & LOG_NEWLINE)) { |
1785 | if (cont_add(facility, level, lflags, text, text_len)) |
1786 | return text_len; |
1787 | } |
1788 | |
1789 | /* Store it in the record log */ |
1790 | return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len); |
1791 | } |
1792 | |
1793 | asmlinkage int vprintk_emit(int facility, int level, |
1794 | const char *dict, size_t dictlen, |
1795 | const char *fmt, va_list args) |
1796 | { |
1797 | static bool recursion_bug; |
1798 | static char textbuf[LOG_LINE_MAX]; |
1799 | char *text = textbuf; |
1800 | size_t text_len = 0; |
1801 | enum log_flags lflags = 0; |
1802 | unsigned long flags; |
1803 | int this_cpu; |
1804 | int printed_len = 0; |
1805 | int nmi_message_lost; |
1806 | bool in_sched = false; |
1807 | /* cpu currently holding logbuf_lock in this function */ |
1808 | static unsigned int logbuf_cpu = UINT_MAX; |
1809 | |
1810 | if (level == LOGLEVEL_SCHED) { |
1811 | level = LOGLEVEL_DEFAULT; |
1812 | in_sched = true; |
1813 | } |
1814 | |
1815 | boot_delay_msec(level); |
1816 | printk_delay(); |
1817 | |
1818 | local_irq_save(flags); |
1819 | this_cpu = smp_processor_id(); |
1820 | |
1821 | /* |
1822 | * Ouch, printk recursed into itself! |
1823 | */ |
1824 | if (unlikely(logbuf_cpu == this_cpu)) { |
1825 | /* |
1826 | * If a crash is occurring during printk() on this CPU, |
1827 | * then try to get the crash message out but make sure |
1828 | * we can't deadlock. Otherwise just return to avoid the |
1829 | * recursion and return - but flag the recursion so that |
1830 | * it can be printed at the next appropriate moment: |
1831 | */ |
1832 | if (!oops_in_progress && !lockdep_recursing(current)) { |
1833 | recursion_bug = true; |
1834 | local_irq_restore(flags); |
1835 | return 0; |
1836 | } |
1837 | zap_locks(); |
1838 | } |
1839 | |
1840 | lockdep_off(); |
1841 | /* This stops the holder of console_sem just where we want him */ |
1842 | raw_spin_lock(&logbuf_lock); |
1843 | logbuf_cpu = this_cpu; |
1844 | |
1845 | if (unlikely(recursion_bug)) { |
1846 | static const char recursion_msg[] = |
1847 | "BUG: recent printk recursion!"; |
1848 | |
1849 | recursion_bug = false; |
1850 | /* emit KERN_CRIT message */ |
1851 | printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0, |
1852 | NULL, 0, recursion_msg, |
1853 | strlen(recursion_msg)); |
1854 | } |
1855 | |
1856 | nmi_message_lost = get_nmi_message_lost(); |
1857 | if (unlikely(nmi_message_lost)) { |
1858 | text_len = scnprintf(textbuf, sizeof(textbuf), |
1859 | "BAD LUCK: lost %d message(s) from NMI context!", |
1860 | nmi_message_lost); |
1861 | printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0, |
1862 | NULL, 0, textbuf, text_len); |
1863 | } |
1864 | |
1865 | /* |
1866 | * The printf needs to come first; we need the syslog |
1867 | * prefix which might be passed-in as a parameter. |
1868 | */ |
1869 | text_len = vscnprintf(text, sizeof(textbuf), fmt, args); |
1870 | |
1871 | /* mark and strip a trailing newline */ |
1872 | if (text_len && text[text_len-1] == '\n') { |
1873 | text_len--; |
1874 | lflags |= LOG_NEWLINE; |
1875 | } |
1876 | |
1877 | /* strip kernel syslog prefix and extract log level or control flags */ |
1878 | if (facility == 0) { |
1879 | int kern_level; |
1880 | |
1881 | while ((kern_level = printk_get_level(text)) != 0) { |
1882 | switch (kern_level) { |
1883 | case '0' ... '7': |
1884 | if (level == LOGLEVEL_DEFAULT) |
1885 | level = kern_level - '0'; |
1886 | /* fallthrough */ |
1887 | case 'd': /* KERN_DEFAULT */ |
1888 | lflags |= LOG_PREFIX; |
1889 | break; |
1890 | case 'c': /* KERN_CONT */ |
1891 | lflags |= LOG_CONT; |
1892 | } |
1893 | |
1894 | text_len -= 2; |
1895 | text += 2; |
1896 | } |
1897 | } |
1898 | |
1899 | #ifdef CONFIG_EARLY_PRINTK_DIRECT |
1900 | printascii(text); |
1901 | #endif |
1902 | |
1903 | if (level == LOGLEVEL_DEFAULT) |
1904 | level = default_message_loglevel; |
1905 | |
1906 | if (dict) |
1907 | lflags |= LOG_PREFIX|LOG_NEWLINE; |
1908 | |
1909 | printed_len += log_output(facility, level, lflags, dict, dictlen, text, text_len); |
1910 | |
1911 | logbuf_cpu = UINT_MAX; |
1912 | raw_spin_unlock(&logbuf_lock); |
1913 | lockdep_on(); |
1914 | local_irq_restore(flags); |
1915 | |
1916 | /* If called from the scheduler, we can not call up(). */ |
1917 | if (!in_sched) { |
1918 | lockdep_off(); |
1919 | /* |
1920 | * Try to acquire and then immediately release the console |
1921 | * semaphore. The release will print out buffers and wake up |
1922 | * /dev/kmsg and syslog() users. |
1923 | */ |
1924 | if (console_trylock()) |
1925 | console_unlock(); |
1926 | lockdep_on(); |
1927 | } |
1928 | |
1929 | return printed_len; |
1930 | } |
1931 | EXPORT_SYMBOL(vprintk_emit); |
1932 | |
1933 | asmlinkage int vprintk(const char *fmt, va_list args) |
1934 | { |
1935 | return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); |
1936 | } |
1937 | EXPORT_SYMBOL(vprintk); |
1938 | |
1939 | asmlinkage int printk_emit(int facility, int level, |
1940 | const char *dict, size_t dictlen, |
1941 | const char *fmt, ...) |
1942 | { |
1943 | va_list args; |
1944 | int r; |
1945 | |
1946 | va_start(args, fmt); |
1947 | r = vprintk_emit(facility, level, dict, dictlen, fmt, args); |
1948 | va_end(args); |
1949 | |
1950 | return r; |
1951 | } |
1952 | EXPORT_SYMBOL(printk_emit); |
1953 | |
1954 | int vprintk_default(const char *fmt, va_list args) |
1955 | { |
1956 | int r; |
1957 | |
1958 | #ifdef CONFIG_KGDB_KDB |
1959 | if (unlikely(kdb_trap_printk)) { |
1960 | r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); |
1961 | return r; |
1962 | } |
1963 | #endif |
1964 | r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); |
1965 | |
1966 | return r; |
1967 | } |
1968 | EXPORT_SYMBOL_GPL(vprintk_default); |
1969 | |
1970 | /** |
1971 | * printk - print a kernel message |
1972 | * @fmt: format string |
1973 | * |
1974 | * This is printk(). It can be called from any context. We want it to work. |
1975 | * |
1976 | * We try to grab the console_lock. If we succeed, it's easy - we log the |
1977 | * output and call the console drivers. If we fail to get the semaphore, we |
1978 | * place the output into the log buffer and return. The current holder of |
1979 | * the console_sem will notice the new output in console_unlock(); and will |
1980 | * send it to the consoles before releasing the lock. |
1981 | * |
1982 | * One effect of this deferred printing is that code which calls printk() and |
1983 | * then changes console_loglevel may break. This is because console_loglevel |
1984 | * is inspected when the actual printing occurs. |
1985 | * |
1986 | * See also: |
1987 | * printf(3) |
1988 | * |
1989 | * See the vsnprintf() documentation for format string extensions over C99. |
1990 | */ |
1991 | asmlinkage __visible int printk(const char *fmt, ...) |
1992 | { |
1993 | va_list args; |
1994 | int r; |
1995 | |
1996 | va_start(args, fmt); |
1997 | r = vprintk_func(fmt, args); |
1998 | va_end(args); |
1999 | |
2000 | return r; |
2001 | } |
2002 | EXPORT_SYMBOL(printk); |
2003 | |
2004 | #else /* CONFIG_PRINTK */ |
2005 | |
2006 | #define LOG_LINE_MAX 0 |
2007 | #define PREFIX_MAX 0 |
2008 | |
2009 | static u64 syslog_seq; |
2010 | static u32 syslog_idx; |
2011 | static u64 console_seq; |
2012 | static u32 console_idx; |
2013 | static enum log_flags syslog_prev; |
2014 | static u64 log_first_seq; |
2015 | static u32 log_first_idx; |
2016 | static u64 log_next_seq; |
2017 | static enum log_flags console_prev; |
2018 | static struct cont { |
2019 | size_t len; |
2020 | size_t cons; |
2021 | u8 level; |
2022 | bool flushed:1; |
2023 | } cont; |
2024 | static char *log_text(const struct printk_log *msg) { return NULL; } |
2025 | static char *log_dict(const struct printk_log *msg) { return NULL; } |
2026 | static struct printk_log *log_from_idx(u32 idx) { return NULL; } |
2027 | static u32 log_next(u32 idx) { return 0; } |
2028 | static ssize_t msg_print_ext_header(char *buf, size_t size, |
2029 | struct printk_log *msg, u64 seq, |
2030 | enum log_flags prev_flags) { return 0; } |
2031 | static ssize_t msg_print_ext_body(char *buf, size_t size, |
2032 | char *dict, size_t dict_len, |
2033 | char *text, size_t text_len) { return 0; } |
2034 | static void call_console_drivers(int level, |
2035 | const char *ext_text, size_t ext_len, |
2036 | const char *text, size_t len) {} |
2037 | static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, |
2038 | bool syslog, char *buf, size_t size) { return 0; } |
2039 | static size_t cont_print_text(char *text, size_t size) { return 0; } |
2040 | static bool suppress_message_printing(int level) { return false; } |
2041 | |
2042 | /* Still needs to be defined for users */ |
2043 | DEFINE_PER_CPU(printk_func_t, printk_func); |
2044 | |
2045 | #endif /* CONFIG_PRINTK */ |
2046 | |
2047 | #ifdef CONFIG_EARLY_PRINTK |
2048 | struct console *early_console; |
2049 | |
2050 | asmlinkage __visible void early_printk(const char *fmt, ...) |
2051 | { |
2052 | va_list ap; |
2053 | char buf[512]; |
2054 | int n; |
2055 | |
2056 | if (!early_console) |
2057 | return; |
2058 | |
2059 | va_start(ap, fmt); |
2060 | n = vscnprintf(buf, sizeof(buf), fmt, ap); |
2061 | va_end(ap); |
2062 | |
2063 | early_console->write(early_console, buf, n); |
2064 | } |
2065 | #endif |
2066 | |
2067 | static int __add_preferred_console(char *name, int idx, char *options, |
2068 | char *brl_options) |
2069 | { |
2070 | struct console_cmdline *c; |
2071 | int i; |
2072 | |
2073 | /* |
2074 | * See if this tty is not yet registered, and |
2075 | * if we have a slot free. |
2076 | */ |
2077 | for (i = 0, c = console_cmdline; |
2078 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2079 | i++, c++) { |
2080 | if (strcmp(c->name, name) == 0 && c->index == idx) { |
2081 | if (!brl_options) |
2082 | selected_console = i; |
2083 | return 0; |
2084 | } |
2085 | } |
2086 | if (i == MAX_CMDLINECONSOLES) |
2087 | return -E2BIG; |
2088 | if (!brl_options) |
2089 | selected_console = i; |
2090 | strlcpy(c->name, name, sizeof(c->name)); |
2091 | c->options = options; |
2092 | braille_set_options(c, brl_options); |
2093 | |
2094 | c->index = idx; |
2095 | return 0; |
2096 | } |
2097 | /* |
2098 | * Set up a console. Called via do_early_param() in init/main.c |
2099 | * for each "console=" parameter in the boot command line. |
2100 | */ |
2101 | static int __init console_setup(char *str) |
2102 | { |
2103 | char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */ |
2104 | char *s, *options, *brl_options = NULL; |
2105 | int idx; |
2106 | |
2107 | if (_braille_console_setup(&str, &brl_options)) |
2108 | return 1; |
2109 | |
2110 | /* |
2111 | * Decode str into name, index, options. |
2112 | */ |
2113 | if (str[0] >= '0' && str[0] <= '9') { |
2114 | strcpy(buf, "ttyS"); |
2115 | strncpy(buf + 4, str, sizeof(buf) - 5); |
2116 | } else { |
2117 | strncpy(buf, str, sizeof(buf) - 1); |
2118 | } |
2119 | buf[sizeof(buf) - 1] = 0; |
2120 | options = strchr(str, ','); |
2121 | if (options) |
2122 | *(options++) = 0; |
2123 | #ifdef __sparc__ |
2124 | if (!strcmp(str, "ttya")) |
2125 | strcpy(buf, "ttyS0"); |
2126 | if (!strcmp(str, "ttyb")) |
2127 | strcpy(buf, "ttyS1"); |
2128 | #endif |
2129 | for (s = buf; *s; s++) |
2130 | if (isdigit(*s) || *s == ',') |
2131 | break; |
2132 | idx = simple_strtoul(s, NULL, 10); |
2133 | *s = 0; |
2134 | |
2135 | __add_preferred_console(buf, idx, options, brl_options); |
2136 | console_set_on_cmdline = 1; |
2137 | return 1; |
2138 | } |
2139 | __setup("console=", console_setup); |
2140 | |
2141 | /** |
2142 | * add_preferred_console - add a device to the list of preferred consoles. |
2143 | * @name: device name |
2144 | * @idx: device index |
2145 | * @options: options for this console |
2146 | * |
2147 | * The last preferred console added will be used for kernel messages |
2148 | * and stdin/out/err for init. Normally this is used by console_setup |
2149 | * above to handle user-supplied console arguments; however it can also |
2150 | * be used by arch-specific code either to override the user or more |
2151 | * commonly to provide a default console (ie from PROM variables) when |
2152 | * the user has not supplied one. |
2153 | */ |
2154 | int add_preferred_console(char *name, int idx, char *options) |
2155 | { |
2156 | return __add_preferred_console(name, idx, options, NULL); |
2157 | } |
2158 | |
2159 | bool console_suspend_enabled = true; |
2160 | EXPORT_SYMBOL(console_suspend_enabled); |
2161 | |
2162 | static int __init console_suspend_disable(char *str) |
2163 | { |
2164 | console_suspend_enabled = false; |
2165 | return 1; |
2166 | } |
2167 | __setup("no_console_suspend", console_suspend_disable); |
2168 | module_param_named(console_suspend, console_suspend_enabled, |
2169 | bool, S_IRUGO | S_IWUSR); |
2170 | MODULE_PARM_DESC(console_suspend, "suspend console during suspend" |
2171 | " and hibernate operations"); |
2172 | |
2173 | /** |
2174 | * suspend_console - suspend the console subsystem |
2175 | * |
2176 | * This disables printk() while we go into suspend states |
2177 | */ |
2178 | void suspend_console(void) |
2179 | { |
2180 | if (!console_suspend_enabled) |
2181 | return; |
2182 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); |
2183 | console_lock(); |
2184 | console_suspended = 1; |
2185 | up_console_sem(); |
2186 | } |
2187 | |
2188 | void resume_console(void) |
2189 | { |
2190 | if (!console_suspend_enabled) |
2191 | return; |
2192 | down_console_sem(); |
2193 | console_suspended = 0; |
2194 | console_unlock(); |
2195 | } |
2196 | |
2197 | /** |
2198 | * console_cpu_notify - print deferred console messages after CPU hotplug |
2199 | * @self: notifier struct |
2200 | * @action: CPU hotplug event |
2201 | * @hcpu: unused |
2202 | * |
2203 | * If printk() is called from a CPU that is not online yet, the messages |
2204 | * will be spooled but will not show up on the console. This function is |
2205 | * called when a new CPU comes online (or fails to come up), and ensures |
2206 | * that any such output gets printed. |
2207 | */ |
2208 | static int console_cpu_notify(struct notifier_block *self, |
2209 | unsigned long action, void *hcpu) |
2210 | { |
2211 | switch (action) { |
2212 | case CPU_ONLINE: |
2213 | case CPU_DEAD: |
2214 | case CPU_DOWN_FAILED: |
2215 | case CPU_UP_CANCELED: |
2216 | console_lock(); |
2217 | console_unlock(); |
2218 | } |
2219 | return NOTIFY_OK; |
2220 | } |
2221 | |
2222 | /** |
2223 | * console_lock - lock the console system for exclusive use. |
2224 | * |
2225 | * Acquires a lock which guarantees that the caller has |
2226 | * exclusive access to the console system and the console_drivers list. |
2227 | * |
2228 | * Can sleep, returns nothing. |
2229 | */ |
2230 | void console_lock(void) |
2231 | { |
2232 | might_sleep(); |
2233 | |
2234 | down_console_sem(); |
2235 | if (console_suspended) |
2236 | return; |
2237 | console_locked = 1; |
2238 | console_may_schedule = 1; |
2239 | } |
2240 | EXPORT_SYMBOL(console_lock); |
2241 | |
2242 | /** |
2243 | * console_trylock - try to lock the console system for exclusive use. |
2244 | * |
2245 | * Try to acquire a lock which guarantees that the caller has exclusive |
2246 | * access to the console system and the console_drivers list. |
2247 | * |
2248 | * returns 1 on success, and 0 on failure to acquire the lock. |
2249 | */ |
2250 | int console_trylock(void) |
2251 | { |
2252 | if (down_trylock_console_sem()) |
2253 | return 0; |
2254 | if (console_suspended) { |
2255 | up_console_sem(); |
2256 | return 0; |
2257 | } |
2258 | console_locked = 1; |
2259 | /* |
2260 | * When PREEMPT_COUNT disabled we can't reliably detect if it's |
2261 | * safe to schedule (e.g. calling printk while holding a spin_lock), |
2262 | * because preempt_disable()/preempt_enable() are just barriers there |
2263 | * and preempt_count() is always 0. |
2264 | * |
2265 | * RCU read sections have a separate preemption counter when |
2266 | * PREEMPT_RCU enabled thus we must take extra care and check |
2267 | * rcu_preempt_depth(), otherwise RCU read sections modify |
2268 | * preempt_count(). |
2269 | */ |
2270 | console_may_schedule = !oops_in_progress && |
2271 | preemptible() && |
2272 | !rcu_preempt_depth(); |
2273 | return 1; |
2274 | } |
2275 | EXPORT_SYMBOL(console_trylock); |
2276 | |
2277 | int is_console_locked(void) |
2278 | { |
2279 | return console_locked; |
2280 | } |
2281 | |
2282 | /* |
2283 | * Check if we have any console that is capable of printing while cpu is |
2284 | * booting or shutting down. Requires console_sem. |
2285 | */ |
2286 | static int have_callable_console(void) |
2287 | { |
2288 | struct console *con; |
2289 | |
2290 | for_each_console(con) |
2291 | if ((con->flags & CON_ENABLED) && |
2292 | (con->flags & CON_ANYTIME)) |
2293 | return 1; |
2294 | |
2295 | return 0; |
2296 | } |
2297 | |
2298 | /* |
2299 | * Can we actually use the console at this time on this cpu? |
2300 | * |
2301 | * Console drivers may assume that per-cpu resources have been allocated. So |
2302 | * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't |
2303 | * call them until this CPU is officially up. |
2304 | */ |
2305 | static inline int can_use_console(void) |
2306 | { |
2307 | return cpu_online(raw_smp_processor_id()) || have_callable_console(); |
2308 | } |
2309 | |
2310 | static void console_cont_flush(char *text, size_t size) |
2311 | { |
2312 | unsigned long flags; |
2313 | size_t len; |
2314 | |
2315 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
2316 | |
2317 | if (!cont.len) |
2318 | goto out; |
2319 | |
2320 | if (suppress_message_printing(cont.level)) { |
2321 | cont.cons = cont.len; |
2322 | if (cont.flushed) |
2323 | cont.len = 0; |
2324 | goto out; |
2325 | } |
2326 | |
2327 | /* |
2328 | * We still queue earlier records, likely because the console was |
2329 | * busy. The earlier ones need to be printed before this one, we |
2330 | * did not flush any fragment so far, so just let it queue up. |
2331 | */ |
2332 | if (console_seq < log_next_seq && !cont.cons) |
2333 | goto out; |
2334 | |
2335 | len = cont_print_text(text, size); |
2336 | raw_spin_unlock(&logbuf_lock); |
2337 | stop_critical_timings(); |
2338 | call_console_drivers(cont.level, NULL, 0, text, len); |
2339 | start_critical_timings(); |
2340 | local_irq_restore(flags); |
2341 | return; |
2342 | out: |
2343 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
2344 | } |
2345 | |
2346 | /** |
2347 | * console_unlock - unlock the console system |
2348 | * |
2349 | * Releases the console_lock which the caller holds on the console system |
2350 | * and the console driver list. |
2351 | * |
2352 | * While the console_lock was held, console output may have been buffered |
2353 | * by printk(). If this is the case, console_unlock(); emits |
2354 | * the output prior to releasing the lock. |
2355 | * |
2356 | * If there is output waiting, we wake /dev/kmsg and syslog() users. |
2357 | * |
2358 | * console_unlock(); may be called from any context. |
2359 | */ |
2360 | void console_unlock(void) |
2361 | { |
2362 | static char ext_text[CONSOLE_EXT_LOG_MAX]; |
2363 | static char text[LOG_LINE_MAX + PREFIX_MAX]; |
2364 | static u64 seen_seq; |
2365 | unsigned long flags; |
2366 | bool wake_klogd = false; |
2367 | bool do_cond_resched, retry; |
2368 | |
2369 | if (console_suspended) { |
2370 | up_console_sem(); |
2371 | return; |
2372 | } |
2373 | |
2374 | /* |
2375 | * Console drivers are called with interrupts disabled, so |
2376 | * @console_may_schedule should be cleared before; however, we may |
2377 | * end up dumping a lot of lines, for example, if called from |
2378 | * console registration path, and should invoke cond_resched() |
2379 | * between lines if allowable. Not doing so can cause a very long |
2380 | * scheduling stall on a slow console leading to RCU stall and |
2381 | * softlockup warnings which exacerbate the issue with more |
2382 | * messages practically incapacitating the system. |
2383 | * |
2384 | * console_trylock() is not able to detect the preemptive |
2385 | * context reliably. Therefore the value must be stored before |
2386 | * and cleared after the the "again" goto label. |
2387 | */ |
2388 | do_cond_resched = console_may_schedule; |
2389 | again: |
2390 | console_may_schedule = 0; |
2391 | |
2392 | /* |
2393 | * We released the console_sem lock, so we need to recheck if |
2394 | * cpu is online and (if not) is there at least one CON_ANYTIME |
2395 | * console. |
2396 | */ |
2397 | if (!can_use_console()) { |
2398 | console_locked = 0; |
2399 | up_console_sem(); |
2400 | return; |
2401 | } |
2402 | |
2403 | /* flush buffered message fragment immediately to console */ |
2404 | console_cont_flush(text, sizeof(text)); |
2405 | |
2406 | for (;;) { |
2407 | struct printk_log *msg; |
2408 | size_t ext_len = 0; |
2409 | size_t len; |
2410 | int level; |
2411 | |
2412 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
2413 | if (seen_seq != log_next_seq) { |
2414 | wake_klogd = true; |
2415 | seen_seq = log_next_seq; |
2416 | } |
2417 | |
2418 | if (console_seq < log_first_seq) { |
2419 | len = sprintf(text, "** %u printk messages dropped ** ", |
2420 | (unsigned)(log_first_seq - console_seq)); |
2421 | |
2422 | /* messages are gone, move to first one */ |
2423 | console_seq = log_first_seq; |
2424 | console_idx = log_first_idx; |
2425 | console_prev = 0; |
2426 | } else { |
2427 | len = 0; |
2428 | } |
2429 | skip: |
2430 | if (console_seq == log_next_seq) |
2431 | break; |
2432 | |
2433 | msg = log_from_idx(console_idx); |
2434 | level = msg->level; |
2435 | if ((msg->flags & LOG_NOCONS) || |
2436 | suppress_message_printing(level)) { |
2437 | /* |
2438 | * Skip record we have buffered and already printed |
2439 | * directly to the console when we received it, and |
2440 | * record that has level above the console loglevel. |
2441 | */ |
2442 | console_idx = log_next(console_idx); |
2443 | console_seq++; |
2444 | /* |
2445 | * We will get here again when we register a new |
2446 | * CON_PRINTBUFFER console. Clear the flag so we |
2447 | * will properly dump everything later. |
2448 | */ |
2449 | msg->flags &= ~LOG_NOCONS; |
2450 | console_prev = msg->flags; |
2451 | goto skip; |
2452 | } |
2453 | |
2454 | len += msg_print_text(msg, console_prev, false, |
2455 | text + len, sizeof(text) - len); |
2456 | if (nr_ext_console_drivers) { |
2457 | ext_len = msg_print_ext_header(ext_text, |
2458 | sizeof(ext_text), |
2459 | msg, console_seq, console_prev); |
2460 | ext_len += msg_print_ext_body(ext_text + ext_len, |
2461 | sizeof(ext_text) - ext_len, |
2462 | log_dict(msg), msg->dict_len, |
2463 | log_text(msg), msg->text_len); |
2464 | } |
2465 | console_idx = log_next(console_idx); |
2466 | console_seq++; |
2467 | console_prev = msg->flags; |
2468 | raw_spin_unlock(&logbuf_lock); |
2469 | |
2470 | stop_critical_timings(); /* don't trace print latency */ |
2471 | call_console_drivers(level, ext_text, ext_len, text, len); |
2472 | start_critical_timings(); |
2473 | local_irq_restore(flags); |
2474 | |
2475 | if (do_cond_resched) |
2476 | cond_resched(); |
2477 | } |
2478 | console_locked = 0; |
2479 | |
2480 | /* Release the exclusive_console once it is used */ |
2481 | if (unlikely(exclusive_console)) |
2482 | exclusive_console = NULL; |
2483 | |
2484 | raw_spin_unlock(&logbuf_lock); |
2485 | |
2486 | up_console_sem(); |
2487 | |
2488 | /* |
2489 | * Someone could have filled up the buffer again, so re-check if there's |
2490 | * something to flush. In case we cannot trylock the console_sem again, |
2491 | * there's a new owner and the console_unlock() from them will do the |
2492 | * flush, no worries. |
2493 | */ |
2494 | raw_spin_lock(&logbuf_lock); |
2495 | retry = console_seq != log_next_seq; |
2496 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
2497 | |
2498 | if (retry && console_trylock()) |
2499 | goto again; |
2500 | |
2501 | if (wake_klogd) |
2502 | wake_up_klogd(); |
2503 | } |
2504 | EXPORT_SYMBOL(console_unlock); |
2505 | |
2506 | /** |
2507 | * console_conditional_schedule - yield the CPU if required |
2508 | * |
2509 | * If the console code is currently allowed to sleep, and |
2510 | * if this CPU should yield the CPU to another task, do |
2511 | * so here. |
2512 | * |
2513 | * Must be called within console_lock();. |
2514 | */ |
2515 | void __sched console_conditional_schedule(void) |
2516 | { |
2517 | if (console_may_schedule) |
2518 | cond_resched(); |
2519 | } |
2520 | EXPORT_SYMBOL(console_conditional_schedule); |
2521 | |
2522 | void console_unblank(void) |
2523 | { |
2524 | struct console *c; |
2525 | |
2526 | /* |
2527 | * console_unblank can no longer be called in interrupt context unless |
2528 | * oops_in_progress is set to 1.. |
2529 | */ |
2530 | if (oops_in_progress) { |
2531 | if (down_trylock_console_sem() != 0) |
2532 | return; |
2533 | } else |
2534 | console_lock(); |
2535 | |
2536 | console_locked = 1; |
2537 | console_may_schedule = 0; |
2538 | for_each_console(c) |
2539 | if ((c->flags & CON_ENABLED) && c->unblank) |
2540 | c->unblank(); |
2541 | console_unlock(); |
2542 | } |
2543 | |
2544 | /** |
2545 | * console_flush_on_panic - flush console content on panic |
2546 | * |
2547 | * Immediately output all pending messages no matter what. |
2548 | */ |
2549 | void console_flush_on_panic(void) |
2550 | { |
2551 | /* |
2552 | * If someone else is holding the console lock, trylock will fail |
2553 | * and may_schedule may be set. Ignore and proceed to unlock so |
2554 | * that messages are flushed out. As this can be called from any |
2555 | * context and we don't want to get preempted while flushing, |
2556 | * ensure may_schedule is cleared. |
2557 | */ |
2558 | console_trylock(); |
2559 | console_may_schedule = 0; |
2560 | console_unlock(); |
2561 | } |
2562 | |
2563 | /* |
2564 | * Return the console tty driver structure and its associated index |
2565 | */ |
2566 | struct tty_driver *console_device(int *index) |
2567 | { |
2568 | struct console *c; |
2569 | struct tty_driver *driver = NULL; |
2570 | |
2571 | console_lock(); |
2572 | for_each_console(c) { |
2573 | if (!c->device) |
2574 | continue; |
2575 | driver = c->device(c, index); |
2576 | if (driver) |
2577 | break; |
2578 | } |
2579 | console_unlock(); |
2580 | return driver; |
2581 | } |
2582 | |
2583 | /* |
2584 | * Prevent further output on the passed console device so that (for example) |
2585 | * serial drivers can disable console output before suspending a port, and can |
2586 | * re-enable output afterwards. |
2587 | */ |
2588 | void console_stop(struct console *console) |
2589 | { |
2590 | console_lock(); |
2591 | console->flags &= ~CON_ENABLED; |
2592 | console_unlock(); |
2593 | } |
2594 | EXPORT_SYMBOL(console_stop); |
2595 | |
2596 | void console_start(struct console *console) |
2597 | { |
2598 | console_lock(); |
2599 | console->flags |= CON_ENABLED; |
2600 | console_unlock(); |
2601 | } |
2602 | EXPORT_SYMBOL(console_start); |
2603 | |
2604 | static int __read_mostly keep_bootcon; |
2605 | |
2606 | static int __init keep_bootcon_setup(char *str) |
2607 | { |
2608 | keep_bootcon = 1; |
2609 | pr_info("debug: skip boot console de-registration.\n"); |
2610 | |
2611 | return 0; |
2612 | } |
2613 | |
2614 | early_param("keep_bootcon", keep_bootcon_setup); |
2615 | |
2616 | /* |
2617 | * The console driver calls this routine during kernel initialization |
2618 | * to register the console printing procedure with printk() and to |
2619 | * print any messages that were printed by the kernel before the |
2620 | * console driver was initialized. |
2621 | * |
2622 | * This can happen pretty early during the boot process (because of |
2623 | * early_printk) - sometimes before setup_arch() completes - be careful |
2624 | * of what kernel features are used - they may not be initialised yet. |
2625 | * |
2626 | * There are two types of consoles - bootconsoles (early_printk) and |
2627 | * "real" consoles (everything which is not a bootconsole) which are |
2628 | * handled differently. |
2629 | * - Any number of bootconsoles can be registered at any time. |
2630 | * - As soon as a "real" console is registered, all bootconsoles |
2631 | * will be unregistered automatically. |
2632 | * - Once a "real" console is registered, any attempt to register a |
2633 | * bootconsoles will be rejected |
2634 | */ |
2635 | void register_console(struct console *newcon) |
2636 | { |
2637 | int i; |
2638 | unsigned long flags; |
2639 | struct console *bcon = NULL; |
2640 | struct console_cmdline *c; |
2641 | |
2642 | if (console_drivers) |
2643 | for_each_console(bcon) |
2644 | if (WARN(bcon == newcon, |
2645 | "console '%s%d' already registered\n", |
2646 | bcon->name, bcon->index)) |
2647 | return; |
2648 | |
2649 | /* |
2650 | * before we register a new CON_BOOT console, make sure we don't |
2651 | * already have a valid console |
2652 | */ |
2653 | if (console_drivers && newcon->flags & CON_BOOT) { |
2654 | /* find the last or real console */ |
2655 | for_each_console(bcon) { |
2656 | if (!(bcon->flags & CON_BOOT)) { |
2657 | pr_info("Too late to register bootconsole %s%d\n", |
2658 | newcon->name, newcon->index); |
2659 | return; |
2660 | } |
2661 | } |
2662 | } |
2663 | |
2664 | if (console_drivers && console_drivers->flags & CON_BOOT) |
2665 | bcon = console_drivers; |
2666 | |
2667 | if (preferred_console < 0 || bcon || !console_drivers) |
2668 | preferred_console = selected_console; |
2669 | |
2670 | /* |
2671 | * See if we want to use this console driver. If we |
2672 | * didn't select a console we take the first one |
2673 | * that registers here. |
2674 | */ |
2675 | if (preferred_console < 0) { |
2676 | if (newcon->index < 0) |
2677 | newcon->index = 0; |
2678 | if (newcon->setup == NULL || |
2679 | newcon->setup(newcon, NULL) == 0) { |
2680 | newcon->flags |= CON_ENABLED; |
2681 | if (newcon->device) { |
2682 | newcon->flags |= CON_CONSDEV; |
2683 | preferred_console = 0; |
2684 | } |
2685 | } |
2686 | } |
2687 | |
2688 | /* |
2689 | * See if this console matches one we selected on |
2690 | * the command line. |
2691 | */ |
2692 | for (i = 0, c = console_cmdline; |
2693 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2694 | i++, c++) { |
2695 | if (!newcon->match || |
2696 | newcon->match(newcon, c->name, c->index, c->options) != 0) { |
2697 | /* default matching */ |
2698 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); |
2699 | if (strcmp(c->name, newcon->name) != 0) |
2700 | continue; |
2701 | if (newcon->index >= 0 && |
2702 | newcon->index != c->index) |
2703 | continue; |
2704 | if (newcon->index < 0) |
2705 | newcon->index = c->index; |
2706 | |
2707 | if (_braille_register_console(newcon, c)) |
2708 | return; |
2709 | |
2710 | if (newcon->setup && |
2711 | newcon->setup(newcon, c->options) != 0) |
2712 | break; |
2713 | } |
2714 | |
2715 | newcon->flags |= CON_ENABLED; |
2716 | if (i == selected_console) { |
2717 | newcon->flags |= CON_CONSDEV; |
2718 | preferred_console = selected_console; |
2719 | } |
2720 | break; |
2721 | } |
2722 | |
2723 | if (!(newcon->flags & CON_ENABLED)) |
2724 | return; |
2725 | |
2726 | /* |
2727 | * If we have a bootconsole, and are switching to a real console, |
2728 | * don't print everything out again, since when the boot console, and |
2729 | * the real console are the same physical device, it's annoying to |
2730 | * see the beginning boot messages twice |
2731 | */ |
2732 | if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) |
2733 | newcon->flags &= ~CON_PRINTBUFFER; |
2734 | |
2735 | /* |
2736 | * Put this console in the list - keep the |
2737 | * preferred driver at the head of the list. |
2738 | */ |
2739 | console_lock(); |
2740 | if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { |
2741 | newcon->next = console_drivers; |
2742 | console_drivers = newcon; |
2743 | if (newcon->next) |
2744 | newcon->next->flags &= ~CON_CONSDEV; |
2745 | } else { |
2746 | newcon->next = console_drivers->next; |
2747 | console_drivers->next = newcon; |
2748 | } |
2749 | |
2750 | if (newcon->flags & CON_EXTENDED) |
2751 | if (!nr_ext_console_drivers++) |
2752 | pr_info("printk: continuation disabled due to ext consoles, expect more fragments in /dev/kmsg\n"); |
2753 | |
2754 | if (newcon->flags & CON_PRINTBUFFER) { |
2755 | /* |
2756 | * console_unlock(); will print out the buffered messages |
2757 | * for us. |
2758 | */ |
2759 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
2760 | console_seq = syslog_seq; |
2761 | console_idx = syslog_idx; |
2762 | console_prev = syslog_prev; |
2763 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
2764 | /* |
2765 | * We're about to replay the log buffer. Only do this to the |
2766 | * just-registered console to avoid excessive message spam to |
2767 | * the already-registered consoles. |
2768 | */ |
2769 | exclusive_console = newcon; |
2770 | } |
2771 | console_unlock(); |
2772 | console_sysfs_notify(); |
2773 | |
2774 | /* |
2775 | * By unregistering the bootconsoles after we enable the real console |
2776 | * we get the "console xxx enabled" message on all the consoles - |
2777 | * boot consoles, real consoles, etc - this is to ensure that end |
2778 | * users know there might be something in the kernel's log buffer that |
2779 | * went to the bootconsole (that they do not see on the real console) |
2780 | */ |
2781 | pr_info("%sconsole [%s%d] enabled\n", |
2782 | (newcon->flags & CON_BOOT) ? "boot" : "" , |
2783 | newcon->name, newcon->index); |
2784 | if (bcon && |
2785 | ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && |
2786 | !keep_bootcon) { |
2787 | /* We need to iterate through all boot consoles, to make |
2788 | * sure we print everything out, before we unregister them. |
2789 | */ |
2790 | for_each_console(bcon) |
2791 | if (bcon->flags & CON_BOOT) |
2792 | unregister_console(bcon); |
2793 | } |
2794 | } |
2795 | EXPORT_SYMBOL(register_console); |
2796 | |
2797 | int unregister_console(struct console *console) |
2798 | { |
2799 | struct console *a, *b; |
2800 | int res; |
2801 | |
2802 | pr_info("%sconsole [%s%d] disabled\n", |
2803 | (console->flags & CON_BOOT) ? "boot" : "" , |
2804 | console->name, console->index); |
2805 | |
2806 | res = _braille_unregister_console(console); |
2807 | if (res) |
2808 | return res; |
2809 | |
2810 | res = 1; |
2811 | console_lock(); |
2812 | if (console_drivers == console) { |
2813 | console_drivers=console->next; |
2814 | res = 0; |
2815 | } else if (console_drivers) { |
2816 | for (a=console_drivers->next, b=console_drivers ; |
2817 | a; b=a, a=b->next) { |
2818 | if (a == console) { |
2819 | b->next = a->next; |
2820 | res = 0; |
2821 | break; |
2822 | } |
2823 | } |
2824 | } |
2825 | |
2826 | if (!res && (console->flags & CON_EXTENDED)) |
2827 | nr_ext_console_drivers--; |
2828 | |
2829 | /* |
2830 | * If this isn't the last console and it has CON_CONSDEV set, we |
2831 | * need to set it on the next preferred console. |
2832 | */ |
2833 | if (console_drivers != NULL && console->flags & CON_CONSDEV) |
2834 | console_drivers->flags |= CON_CONSDEV; |
2835 | |
2836 | console->flags &= ~CON_ENABLED; |
2837 | console_unlock(); |
2838 | console_sysfs_notify(); |
2839 | return res; |
2840 | } |
2841 | EXPORT_SYMBOL(unregister_console); |
2842 | |
2843 | /* |
2844 | * Some boot consoles access data that is in the init section and which will |
2845 | * be discarded after the initcalls have been run. To make sure that no code |
2846 | * will access this data, unregister the boot consoles in a late initcall. |
2847 | * |
2848 | * If for some reason, such as deferred probe or the driver being a loadable |
2849 | * module, the real console hasn't registered yet at this point, there will |
2850 | * be a brief interval in which no messages are logged to the console, which |
2851 | * makes it difficult to diagnose problems that occur during this time. |
2852 | * |
2853 | * To mitigate this problem somewhat, only unregister consoles whose memory |
2854 | * intersects with the init section. Note that code exists elsewhere to get |
2855 | * rid of the boot console as soon as the proper console shows up, so there |
2856 | * won't be side-effects from postponing the removal. |
2857 | */ |
2858 | static int __init printk_late_init(void) |
2859 | { |
2860 | struct console *con; |
2861 | |
2862 | for_each_console(con) { |
2863 | if (!keep_bootcon && con->flags & CON_BOOT) { |
2864 | /* |
2865 | * Make sure to unregister boot consoles whose data |
2866 | * resides in the init section before the init section |
2867 | * is discarded. Boot consoles whose data will stick |
2868 | * around will automatically be unregistered when the |
2869 | * proper console replaces them. |
2870 | */ |
2871 | if (init_section_intersects(con, sizeof(*con))) |
2872 | unregister_console(con); |
2873 | } |
2874 | } |
2875 | hotcpu_notifier(console_cpu_notify, 0); |
2876 | return 0; |
2877 | } |
2878 | late_initcall(printk_late_init); |
2879 | |
2880 | #if defined CONFIG_PRINTK |
2881 | /* |
2882 | * Delayed printk version, for scheduler-internal messages: |
2883 | */ |
2884 | #define PRINTK_PENDING_WAKEUP 0x01 |
2885 | #define PRINTK_PENDING_OUTPUT 0x02 |
2886 | |
2887 | static DEFINE_PER_CPU(int, printk_pending); |
2888 | |
2889 | static void wake_up_klogd_work_func(struct irq_work *irq_work) |
2890 | { |
2891 | int pending = __this_cpu_xchg(printk_pending, 0); |
2892 | |
2893 | if (pending & PRINTK_PENDING_OUTPUT) { |
2894 | /* If trylock fails, someone else is doing the printing */ |
2895 | if (console_trylock()) |
2896 | console_unlock(); |
2897 | } |
2898 | |
2899 | if (pending & PRINTK_PENDING_WAKEUP) |
2900 | wake_up_interruptible(&log_wait); |
2901 | } |
2902 | |
2903 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { |
2904 | .func = wake_up_klogd_work_func, |
2905 | .flags = IRQ_WORK_LAZY, |
2906 | }; |
2907 | |
2908 | void wake_up_klogd(void) |
2909 | { |
2910 | preempt_disable(); |
2911 | if (waitqueue_active(&log_wait)) { |
2912 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); |
2913 | irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); |
2914 | } |
2915 | preempt_enable(); |
2916 | } |
2917 | |
2918 | int printk_deferred(const char *fmt, ...) |
2919 | { |
2920 | va_list args; |
2921 | int r; |
2922 | |
2923 | preempt_disable(); |
2924 | va_start(args, fmt); |
2925 | r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); |
2926 | va_end(args); |
2927 | |
2928 | __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); |
2929 | irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); |
2930 | preempt_enable(); |
2931 | |
2932 | return r; |
2933 | } |
2934 | |
2935 | /* |
2936 | * printk rate limiting, lifted from the networking subsystem. |
2937 | * |
2938 | * This enforces a rate limit: not more than 10 kernel messages |
2939 | * every 5s to make a denial-of-service attack impossible. |
2940 | */ |
2941 | DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); |
2942 | |
2943 | int __printk_ratelimit(const char *func) |
2944 | { |
2945 | return ___ratelimit(&printk_ratelimit_state, func); |
2946 | } |
2947 | EXPORT_SYMBOL(__printk_ratelimit); |
2948 | |
2949 | /** |
2950 | * printk_timed_ratelimit - caller-controlled printk ratelimiting |
2951 | * @caller_jiffies: pointer to caller's state |
2952 | * @interval_msecs: minimum interval between prints |
2953 | * |
2954 | * printk_timed_ratelimit() returns true if more than @interval_msecs |
2955 | * milliseconds have elapsed since the last time printk_timed_ratelimit() |
2956 | * returned true. |
2957 | */ |
2958 | bool printk_timed_ratelimit(unsigned long *caller_jiffies, |
2959 | unsigned int interval_msecs) |
2960 | { |
2961 | unsigned long elapsed = jiffies - *caller_jiffies; |
2962 | |
2963 | if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs)) |
2964 | return false; |
2965 | |
2966 | *caller_jiffies = jiffies; |
2967 | return true; |
2968 | } |
2969 | EXPORT_SYMBOL(printk_timed_ratelimit); |
2970 | |
2971 | static DEFINE_SPINLOCK(dump_list_lock); |
2972 | static LIST_HEAD(dump_list); |
2973 | |
2974 | /** |
2975 | * kmsg_dump_register - register a kernel log dumper. |
2976 | * @dumper: pointer to the kmsg_dumper structure |
2977 | * |
2978 | * Adds a kernel log dumper to the system. The dump callback in the |
2979 | * structure will be called when the kernel oopses or panics and must be |
2980 | * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. |
2981 | */ |
2982 | int kmsg_dump_register(struct kmsg_dumper *dumper) |
2983 | { |
2984 | unsigned long flags; |
2985 | int err = -EBUSY; |
2986 | |
2987 | /* The dump callback needs to be set */ |
2988 | if (!dumper->dump) |
2989 | return -EINVAL; |
2990 | |
2991 | spin_lock_irqsave(&dump_list_lock, flags); |
2992 | /* Don't allow registering multiple times */ |
2993 | if (!dumper->registered) { |
2994 | dumper->registered = 1; |
2995 | list_add_tail_rcu(&dumper->list, &dump_list); |
2996 | err = 0; |
2997 | } |
2998 | spin_unlock_irqrestore(&dump_list_lock, flags); |
2999 | |
3000 | return err; |
3001 | } |
3002 | EXPORT_SYMBOL_GPL(kmsg_dump_register); |
3003 | |
3004 | /** |
3005 | * kmsg_dump_unregister - unregister a kmsg dumper. |
3006 | * @dumper: pointer to the kmsg_dumper structure |
3007 | * |
3008 | * Removes a dump device from the system. Returns zero on success and |
3009 | * %-EINVAL otherwise. |
3010 | */ |
3011 | int kmsg_dump_unregister(struct kmsg_dumper *dumper) |
3012 | { |
3013 | unsigned long flags; |
3014 | int err = -EINVAL; |
3015 | |
3016 | spin_lock_irqsave(&dump_list_lock, flags); |
3017 | if (dumper->registered) { |
3018 | dumper->registered = 0; |
3019 | list_del_rcu(&dumper->list); |
3020 | err = 0; |
3021 | } |
3022 | spin_unlock_irqrestore(&dump_list_lock, flags); |
3023 | synchronize_rcu(); |
3024 | |
3025 | return err; |
3026 | } |
3027 | EXPORT_SYMBOL_GPL(kmsg_dump_unregister); |
3028 | |
3029 | static bool always_kmsg_dump; |
3030 | module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); |
3031 | |
3032 | /** |
3033 | * kmsg_dump - dump kernel log to kernel message dumpers. |
3034 | * @reason: the reason (oops, panic etc) for dumping |
3035 | * |
3036 | * Call each of the registered dumper's dump() callback, which can |
3037 | * retrieve the kmsg records with kmsg_dump_get_line() or |
3038 | * kmsg_dump_get_buffer(). |
3039 | */ |
3040 | void kmsg_dump(enum kmsg_dump_reason reason) |
3041 | { |
3042 | struct kmsg_dumper *dumper; |
3043 | unsigned long flags; |
3044 | |
3045 | if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) |
3046 | return; |
3047 | |
3048 | rcu_read_lock(); |
3049 | list_for_each_entry_rcu(dumper, &dump_list, list) { |
3050 | if (dumper->max_reason && reason > dumper->max_reason) |
3051 | continue; |
3052 | |
3053 | /* initialize iterator with data about the stored records */ |
3054 | dumper->active = true; |
3055 | |
3056 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3057 | dumper->cur_seq = clear_seq; |
3058 | dumper->cur_idx = clear_idx; |
3059 | dumper->next_seq = log_next_seq; |
3060 | dumper->next_idx = log_next_idx; |
3061 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3062 | |
3063 | /* invoke dumper which will iterate over records */ |
3064 | dumper->dump(dumper, reason); |
3065 | |
3066 | /* reset iterator */ |
3067 | dumper->active = false; |
3068 | } |
3069 | rcu_read_unlock(); |
3070 | } |
3071 | |
3072 | /** |
3073 | * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version) |
3074 | * @dumper: registered kmsg dumper |
3075 | * @syslog: include the "<4>" prefixes |
3076 | * @line: buffer to copy the line to |
3077 | * @size: maximum size of the buffer |
3078 | * @len: length of line placed into buffer |
3079 | * |
3080 | * Start at the beginning of the kmsg buffer, with the oldest kmsg |
3081 | * record, and copy one record into the provided buffer. |
3082 | * |
3083 | * Consecutive calls will return the next available record moving |
3084 | * towards the end of the buffer with the youngest messages. |
3085 | * |
3086 | * A return value of FALSE indicates that there are no more records to |
3087 | * read. |
3088 | * |
3089 | * The function is similar to kmsg_dump_get_line(), but grabs no locks. |
3090 | */ |
3091 | bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, |
3092 | char *line, size_t size, size_t *len) |
3093 | { |
3094 | struct printk_log *msg; |
3095 | size_t l = 0; |
3096 | bool ret = false; |
3097 | |
3098 | if (!dumper->active) |
3099 | goto out; |
3100 | |
3101 | if (dumper->cur_seq < log_first_seq) { |
3102 | /* messages are gone, move to first available one */ |
3103 | dumper->cur_seq = log_first_seq; |
3104 | dumper->cur_idx = log_first_idx; |
3105 | } |
3106 | |
3107 | /* last entry */ |
3108 | if (dumper->cur_seq >= log_next_seq) |
3109 | goto out; |
3110 | |
3111 | msg = log_from_idx(dumper->cur_idx); |
3112 | l = msg_print_text(msg, 0, syslog, line, size); |
3113 | |
3114 | dumper->cur_idx = log_next(dumper->cur_idx); |
3115 | dumper->cur_seq++; |
3116 | ret = true; |
3117 | out: |
3118 | if (len) |
3119 | *len = l; |
3120 | return ret; |
3121 | } |
3122 | |
3123 | /** |
3124 | * kmsg_dump_get_line - retrieve one kmsg log line |
3125 | * @dumper: registered kmsg dumper |
3126 | * @syslog: include the "<4>" prefixes |
3127 | * @line: buffer to copy the line to |
3128 | * @size: maximum size of the buffer |
3129 | * @len: length of line placed into buffer |
3130 | * |
3131 | * Start at the beginning of the kmsg buffer, with the oldest kmsg |
3132 | * record, and copy one record into the provided buffer. |
3133 | * |
3134 | * Consecutive calls will return the next available record moving |
3135 | * towards the end of the buffer with the youngest messages. |
3136 | * |
3137 | * A return value of FALSE indicates that there are no more records to |
3138 | * read. |
3139 | */ |
3140 | bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, |
3141 | char *line, size_t size, size_t *len) |
3142 | { |
3143 | unsigned long flags; |
3144 | bool ret; |
3145 | |
3146 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3147 | ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); |
3148 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3149 | |
3150 | return ret; |
3151 | } |
3152 | EXPORT_SYMBOL_GPL(kmsg_dump_get_line); |
3153 | |
3154 | /** |
3155 | * kmsg_dump_get_buffer - copy kmsg log lines |
3156 | * @dumper: registered kmsg dumper |
3157 | * @syslog: include the "<4>" prefixes |
3158 | * @buf: buffer to copy the line to |
3159 | * @size: maximum size of the buffer |
3160 | * @len: length of line placed into buffer |
3161 | * |
3162 | * Start at the end of the kmsg buffer and fill the provided buffer |
3163 | * with as many of the the *youngest* kmsg records that fit into it. |
3164 | * If the buffer is large enough, all available kmsg records will be |
3165 | * copied with a single call. |
3166 | * |
3167 | * Consecutive calls will fill the buffer with the next block of |
3168 | * available older records, not including the earlier retrieved ones. |
3169 | * |
3170 | * A return value of FALSE indicates that there are no more records to |
3171 | * read. |
3172 | */ |
3173 | bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, |
3174 | char *buf, size_t size, size_t *len) |
3175 | { |
3176 | unsigned long flags; |
3177 | u64 seq; |
3178 | u32 idx; |
3179 | u64 next_seq; |
3180 | u32 next_idx; |
3181 | enum log_flags prev; |
3182 | size_t l = 0; |
3183 | bool ret = false; |
3184 | |
3185 | if (!dumper->active) |
3186 | goto out; |
3187 | |
3188 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3189 | if (dumper->cur_seq < log_first_seq) { |
3190 | /* messages are gone, move to first available one */ |
3191 | dumper->cur_seq = log_first_seq; |
3192 | dumper->cur_idx = log_first_idx; |
3193 | } |
3194 | |
3195 | /* last entry */ |
3196 | if (dumper->cur_seq >= dumper->next_seq) { |
3197 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3198 | goto out; |
3199 | } |
3200 | |
3201 | /* calculate length of entire buffer */ |
3202 | seq = dumper->cur_seq; |
3203 | idx = dumper->cur_idx; |
3204 | prev = 0; |
3205 | while (seq < dumper->next_seq) { |
3206 | struct printk_log *msg = log_from_idx(idx); |
3207 | |
3208 | l += msg_print_text(msg, prev, true, NULL, 0); |
3209 | idx = log_next(idx); |
3210 | seq++; |
3211 | prev = msg->flags; |
3212 | } |
3213 | |
3214 | /* move first record forward until length fits into the buffer */ |
3215 | seq = dumper->cur_seq; |
3216 | idx = dumper->cur_idx; |
3217 | prev = 0; |
3218 | while (l > size && seq < dumper->next_seq) { |
3219 | struct printk_log *msg = log_from_idx(idx); |
3220 | |
3221 | l -= msg_print_text(msg, prev, true, NULL, 0); |
3222 | idx = log_next(idx); |
3223 | seq++; |
3224 | prev = msg->flags; |
3225 | } |
3226 | |
3227 | /* last message in next interation */ |
3228 | next_seq = seq; |
3229 | next_idx = idx; |
3230 | |
3231 | l = 0; |
3232 | while (seq < dumper->next_seq) { |
3233 | struct printk_log *msg = log_from_idx(idx); |
3234 | |
3235 | l += msg_print_text(msg, prev, syslog, buf + l, size - l); |
3236 | idx = log_next(idx); |
3237 | seq++; |
3238 | prev = msg->flags; |
3239 | } |
3240 | |
3241 | dumper->next_seq = next_seq; |
3242 | dumper->next_idx = next_idx; |
3243 | ret = true; |
3244 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3245 | out: |
3246 | if (len) |
3247 | *len = l; |
3248 | return ret; |
3249 | } |
3250 | EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); |
3251 | |
3252 | /** |
3253 | * kmsg_dump_rewind_nolock - reset the interator (unlocked version) |
3254 | * @dumper: registered kmsg dumper |
3255 | * |
3256 | * Reset the dumper's iterator so that kmsg_dump_get_line() and |
3257 | * kmsg_dump_get_buffer() can be called again and used multiple |
3258 | * times within the same dumper.dump() callback. |
3259 | * |
3260 | * The function is similar to kmsg_dump_rewind(), but grabs no locks. |
3261 | */ |
3262 | void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) |
3263 | { |
3264 | dumper->cur_seq = clear_seq; |
3265 | dumper->cur_idx = clear_idx; |
3266 | dumper->next_seq = log_next_seq; |
3267 | dumper->next_idx = log_next_idx; |
3268 | } |
3269 | |
3270 | /** |
3271 | * kmsg_dump_rewind - reset the interator |
3272 | * @dumper: registered kmsg dumper |
3273 | * |
3274 | * Reset the dumper's iterator so that kmsg_dump_get_line() and |
3275 | * kmsg_dump_get_buffer() can be called again and used multiple |
3276 | * times within the same dumper.dump() callback. |
3277 | */ |
3278 | void kmsg_dump_rewind(struct kmsg_dumper *dumper) |
3279 | { |
3280 | unsigned long flags; |
3281 | |
3282 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3283 | kmsg_dump_rewind_nolock(dumper); |
3284 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3285 | } |
3286 | EXPORT_SYMBOL_GPL(kmsg_dump_rewind); |
3287 | |
3288 | static char dump_stack_arch_desc_str[128]; |
3289 | |
3290 | /** |
3291 | * dump_stack_set_arch_desc - set arch-specific str to show with task dumps |
3292 | * @fmt: printf-style format string |
3293 | * @...: arguments for the format string |
3294 | * |
3295 | * The configured string will be printed right after utsname during task |
3296 | * dumps. Usually used to add arch-specific system identifiers. If an |
3297 | * arch wants to make use of such an ID string, it should initialize this |
3298 | * as soon as possible during boot. |
3299 | */ |
3300 | void __init dump_stack_set_arch_desc(const char *fmt, ...) |
3301 | { |
3302 | va_list args; |
3303 | |
3304 | va_start(args, fmt); |
3305 | vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str), |
3306 | fmt, args); |
3307 | va_end(args); |
3308 | } |
3309 | |
3310 | /** |
3311 | * dump_stack_print_info - print generic debug info for dump_stack() |
3312 | * @log_lvl: log level |
3313 | * |
3314 | * Arch-specific dump_stack() implementations can use this function to |
3315 | * print out the same debug information as the generic dump_stack(). |
3316 | */ |
3317 | void dump_stack_print_info(const char *log_lvl) |
3318 | { |
3319 | printk("%sCPU: %d PID: %d Comm: %.20s %s %s %.*s\n", |
3320 | log_lvl, raw_smp_processor_id(), current->pid, current->comm, |
3321 | print_tainted(), init_utsname()->release, |
3322 | (int)strcspn(init_utsname()->version, " "), |
3323 | init_utsname()->version); |
3324 | |
3325 | if (dump_stack_arch_desc_str[0] != '\0') |
3326 | printk("%sHardware name: %s\n", |
3327 | log_lvl, dump_stack_arch_desc_str); |
3328 | |
3329 | print_worker_info(log_lvl, current); |
3330 | } |
3331 | |
3332 | /** |
3333 | * show_regs_print_info - print generic debug info for show_regs() |
3334 | * @log_lvl: log level |
3335 | * |
3336 | * show_regs() implementations can use this function to print out generic |
3337 | * debug information. |
3338 | */ |
3339 | void show_regs_print_info(const char *log_lvl) |
3340 | { |
3341 | dump_stack_print_info(log_lvl); |
3342 | |
3343 | printk("%stask: %p task.stack: %p\n", |
3344 | log_lvl, current, task_stack_page(current)); |
3345 | } |
3346 | |
3347 | #endif |
3348 |