blob: 9e66449ed91f1b4eb048095ffeefb9e75c5d3024
1 | /* |
2 | * mm/kmemleak.c |
3 | * |
4 | * Copyright (C) 2008 ARM Limited |
5 | * Written by Catalin Marinas <catalin.marinas@arm.com> |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. |
10 | * |
11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | * |
20 | * |
21 | * For more information on the algorithm and kmemleak usage, please see |
22 | * Documentation/kmemleak.txt. |
23 | * |
24 | * Notes on locking |
25 | * ---------------- |
26 | * |
27 | * The following locks and mutexes are used by kmemleak: |
28 | * |
29 | * - kmemleak_lock (rwlock): protects the object_list modifications and |
30 | * accesses to the object_tree_root. The object_list is the main list |
31 | * holding the metadata (struct kmemleak_object) for the allocated memory |
32 | * blocks. The object_tree_root is a red black tree used to look-up |
33 | * metadata based on a pointer to the corresponding memory block. The |
34 | * kmemleak_object structures are added to the object_list and |
35 | * object_tree_root in the create_object() function called from the |
36 | * kmemleak_alloc() callback and removed in delete_object() called from the |
37 | * kmemleak_free() callback |
38 | * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to |
39 | * the metadata (e.g. count) are protected by this lock. Note that some |
40 | * members of this structure may be protected by other means (atomic or |
41 | * kmemleak_lock). This lock is also held when scanning the corresponding |
42 | * memory block to avoid the kernel freeing it via the kmemleak_free() |
43 | * callback. This is less heavyweight than holding a global lock like |
44 | * kmemleak_lock during scanning |
45 | * - scan_mutex (mutex): ensures that only one thread may scan the memory for |
46 | * unreferenced objects at a time. The gray_list contains the objects which |
47 | * are already referenced or marked as false positives and need to be |
48 | * scanned. This list is only modified during a scanning episode when the |
49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. |
50 | * Note that the kmemleak_object.use_count is incremented when an object is |
51 | * added to the gray_list and therefore cannot be freed. This mutex also |
52 | * prevents multiple users of the "kmemleak" debugfs file together with |
53 | * modifications to the memory scanning parameters including the scan_thread |
54 | * pointer |
55 | * |
56 | * Locks and mutexes are acquired/nested in the following order: |
57 | * |
58 | * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING) |
59 | * |
60 | * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex |
61 | * regions. |
62 | * |
63 | * The kmemleak_object structures have a use_count incremented or decremented |
64 | * using the get_object()/put_object() functions. When the use_count becomes |
65 | * 0, this count can no longer be incremented and put_object() schedules the |
66 | * kmemleak_object freeing via an RCU callback. All calls to the get_object() |
67 | * function must be protected by rcu_read_lock() to avoid accessing a freed |
68 | * structure. |
69 | */ |
70 | |
71 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
72 | |
73 | #include <linux/init.h> |
74 | #include <linux/kernel.h> |
75 | #include <linux/list.h> |
76 | #include <linux/sched.h> |
77 | #include <linux/jiffies.h> |
78 | #include <linux/delay.h> |
79 | #include <linux/export.h> |
80 | #include <linux/kthread.h> |
81 | #include <linux/rbtree.h> |
82 | #include <linux/fs.h> |
83 | #include <linux/debugfs.h> |
84 | #include <linux/seq_file.h> |
85 | #include <linux/cpumask.h> |
86 | #include <linux/spinlock.h> |
87 | #include <linux/mutex.h> |
88 | #include <linux/rcupdate.h> |
89 | #include <linux/stacktrace.h> |
90 | #include <linux/cache.h> |
91 | #include <linux/percpu.h> |
92 | #include <linux/hardirq.h> |
93 | #include <linux/bootmem.h> |
94 | #include <linux/pfn.h> |
95 | #include <linux/mmzone.h> |
96 | #include <linux/slab.h> |
97 | #include <linux/thread_info.h> |
98 | #include <linux/err.h> |
99 | #include <linux/uaccess.h> |
100 | #include <linux/string.h> |
101 | #include <linux/nodemask.h> |
102 | #include <linux/mm.h> |
103 | #include <linux/workqueue.h> |
104 | #include <linux/crc32.h> |
105 | |
106 | #include <asm/sections.h> |
107 | #include <asm/processor.h> |
108 | #include <linux/atomic.h> |
109 | |
110 | #include <linux/kasan.h> |
111 | #include <linux/kmemcheck.h> |
112 | #include <linux/kmemleak.h> |
113 | #include <linux/memory_hotplug.h> |
114 | |
115 | /* |
116 | * Kmemleak configuration and common defines. |
117 | */ |
118 | #define MAX_TRACE 16 /* stack trace length */ |
119 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ |
120 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ |
121 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ |
122 | #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ |
123 | |
124 | #define BYTES_PER_POINTER sizeof(void *) |
125 | |
126 | /* GFP bitmask for kmemleak internal allocations */ |
127 | #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ |
128 | __GFP_NORETRY | __GFP_NOMEMALLOC | \ |
129 | __GFP_NOWARN) |
130 | |
131 | /* scanning area inside a memory block */ |
132 | struct kmemleak_scan_area { |
133 | struct hlist_node node; |
134 | unsigned long start; |
135 | size_t size; |
136 | }; |
137 | |
138 | #define KMEMLEAK_GREY 0 |
139 | #define KMEMLEAK_BLACK -1 |
140 | |
141 | /* |
142 | * Structure holding the metadata for each allocated memory block. |
143 | * Modifications to such objects should be made while holding the |
144 | * object->lock. Insertions or deletions from object_list, gray_list or |
145 | * rb_node are already protected by the corresponding locks or mutex (see |
146 | * the notes on locking above). These objects are reference-counted |
147 | * (use_count) and freed using the RCU mechanism. |
148 | */ |
149 | struct kmemleak_object { |
150 | spinlock_t lock; |
151 | unsigned long flags; /* object status flags */ |
152 | struct list_head object_list; |
153 | struct list_head gray_list; |
154 | struct rb_node rb_node; |
155 | struct rcu_head rcu; /* object_list lockless traversal */ |
156 | /* object usage count; object freed when use_count == 0 */ |
157 | atomic_t use_count; |
158 | unsigned long pointer; |
159 | size_t size; |
160 | /* minimum number of a pointers found before it is considered leak */ |
161 | int min_count; |
162 | /* the total number of pointers found pointing to this object */ |
163 | int count; |
164 | /* checksum for detecting modified objects */ |
165 | u32 checksum; |
166 | /* memory ranges to be scanned inside an object (empty for all) */ |
167 | struct hlist_head area_list; |
168 | unsigned long trace[MAX_TRACE]; |
169 | unsigned int trace_len; |
170 | unsigned long jiffies; /* creation timestamp */ |
171 | pid_t pid; /* pid of the current task */ |
172 | char comm[TASK_COMM_LEN]; /* executable name */ |
173 | }; |
174 | |
175 | /* flag representing the memory block allocation status */ |
176 | #define OBJECT_ALLOCATED (1 << 0) |
177 | /* flag set after the first reporting of an unreference object */ |
178 | #define OBJECT_REPORTED (1 << 1) |
179 | /* flag set to not scan the object */ |
180 | #define OBJECT_NO_SCAN (1 << 2) |
181 | |
182 | /* number of bytes to print per line; must be 16 or 32 */ |
183 | #define HEX_ROW_SIZE 16 |
184 | /* number of bytes to print at a time (1, 2, 4, 8) */ |
185 | #define HEX_GROUP_SIZE 1 |
186 | /* include ASCII after the hex output */ |
187 | #define HEX_ASCII 1 |
188 | /* max number of lines to be printed */ |
189 | #define HEX_MAX_LINES 2 |
190 | |
191 | /* the list of all allocated objects */ |
192 | static LIST_HEAD(object_list); |
193 | /* the list of gray-colored objects (see color_gray comment below) */ |
194 | static LIST_HEAD(gray_list); |
195 | /* search tree for object boundaries */ |
196 | static struct rb_root object_tree_root = RB_ROOT; |
197 | /* rw_lock protecting the access to object_list and object_tree_root */ |
198 | static DEFINE_RWLOCK(kmemleak_lock); |
199 | |
200 | /* allocation caches for kmemleak internal data */ |
201 | static struct kmem_cache *object_cache; |
202 | static struct kmem_cache *scan_area_cache; |
203 | |
204 | /* set if tracing memory operations is enabled */ |
205 | static int kmemleak_enabled; |
206 | /* same as above but only for the kmemleak_free() callback */ |
207 | static int kmemleak_free_enabled; |
208 | /* set in the late_initcall if there were no errors */ |
209 | static int kmemleak_initialized; |
210 | /* enables or disables early logging of the memory operations */ |
211 | static int kmemleak_early_log = 1; |
212 | /* set if a kmemleak warning was issued */ |
213 | static int kmemleak_warning; |
214 | /* set if a fatal kmemleak error has occurred */ |
215 | static int kmemleak_error; |
216 | |
217 | /* minimum and maximum address that may be valid pointers */ |
218 | static unsigned long min_addr = ULONG_MAX; |
219 | static unsigned long max_addr; |
220 | |
221 | static struct task_struct *scan_thread; |
222 | /* used to avoid reporting of recently allocated objects */ |
223 | static unsigned long jiffies_min_age; |
224 | static unsigned long jiffies_last_scan; |
225 | /* delay between automatic memory scannings */ |
226 | static signed long jiffies_scan_wait; |
227 | /* enables or disables the task stacks scanning */ |
228 | static int kmemleak_stack_scan = 1; |
229 | /* protects the memory scanning, parameters and debug/kmemleak file access */ |
230 | static DEFINE_MUTEX(scan_mutex); |
231 | /* setting kmemleak=on, will set this var, skipping the disable */ |
232 | static int kmemleak_skip_disable; |
233 | /* If there are leaks that can be reported */ |
234 | static bool kmemleak_found_leaks; |
235 | |
236 | /* |
237 | * Early object allocation/freeing logging. Kmemleak is initialized after the |
238 | * kernel allocator. However, both the kernel allocator and kmemleak may |
239 | * allocate memory blocks which need to be tracked. Kmemleak defines an |
240 | * arbitrary buffer to hold the allocation/freeing information before it is |
241 | * fully initialized. |
242 | */ |
243 | |
244 | /* kmemleak operation type for early logging */ |
245 | enum { |
246 | KMEMLEAK_ALLOC, |
247 | KMEMLEAK_ALLOC_PERCPU, |
248 | KMEMLEAK_FREE, |
249 | KMEMLEAK_FREE_PART, |
250 | KMEMLEAK_FREE_PERCPU, |
251 | KMEMLEAK_NOT_LEAK, |
252 | KMEMLEAK_IGNORE, |
253 | KMEMLEAK_SCAN_AREA, |
254 | KMEMLEAK_NO_SCAN |
255 | }; |
256 | |
257 | /* |
258 | * Structure holding the information passed to kmemleak callbacks during the |
259 | * early logging. |
260 | */ |
261 | struct early_log { |
262 | int op_type; /* kmemleak operation type */ |
263 | const void *ptr; /* allocated/freed memory block */ |
264 | size_t size; /* memory block size */ |
265 | int min_count; /* minimum reference count */ |
266 | unsigned long trace[MAX_TRACE]; /* stack trace */ |
267 | unsigned int trace_len; /* stack trace length */ |
268 | }; |
269 | |
270 | /* early logging buffer and current position */ |
271 | static struct early_log |
272 | early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; |
273 | static int crt_early_log __initdata; |
274 | |
275 | static void kmemleak_disable(void); |
276 | |
277 | /* |
278 | * Print a warning and dump the stack trace. |
279 | */ |
280 | #define kmemleak_warn(x...) do { \ |
281 | pr_warn(x); \ |
282 | dump_stack(); \ |
283 | kmemleak_warning = 1; \ |
284 | } while (0) |
285 | |
286 | /* |
287 | * Macro invoked when a serious kmemleak condition occurred and cannot be |
288 | * recovered from. Kmemleak will be disabled and further allocation/freeing |
289 | * tracing no longer available. |
290 | */ |
291 | #define kmemleak_stop(x...) do { \ |
292 | kmemleak_warn(x); \ |
293 | kmemleak_disable(); \ |
294 | } while (0) |
295 | |
296 | /* |
297 | * Printing of the objects hex dump to the seq file. The number of lines to be |
298 | * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The |
299 | * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called |
300 | * with the object->lock held. |
301 | */ |
302 | static void hex_dump_object(struct seq_file *seq, |
303 | struct kmemleak_object *object) |
304 | { |
305 | const u8 *ptr = (const u8 *)object->pointer; |
306 | size_t len; |
307 | |
308 | /* limit the number of lines to HEX_MAX_LINES */ |
309 | len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); |
310 | |
311 | seq_printf(seq, " hex dump (first %zu bytes):\n", len); |
312 | kasan_disable_current(); |
313 | seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, |
314 | HEX_GROUP_SIZE, ptr, len, HEX_ASCII); |
315 | kasan_enable_current(); |
316 | } |
317 | |
318 | /* |
319 | * Object colors, encoded with count and min_count: |
320 | * - white - orphan object, not enough references to it (count < min_count) |
321 | * - gray - not orphan, not marked as false positive (min_count == 0) or |
322 | * sufficient references to it (count >= min_count) |
323 | * - black - ignore, it doesn't contain references (e.g. text section) |
324 | * (min_count == -1). No function defined for this color. |
325 | * Newly created objects don't have any color assigned (object->count == -1) |
326 | * before the next memory scan when they become white. |
327 | */ |
328 | static bool color_white(const struct kmemleak_object *object) |
329 | { |
330 | return object->count != KMEMLEAK_BLACK && |
331 | object->count < object->min_count; |
332 | } |
333 | |
334 | static bool color_gray(const struct kmemleak_object *object) |
335 | { |
336 | return object->min_count != KMEMLEAK_BLACK && |
337 | object->count >= object->min_count; |
338 | } |
339 | |
340 | /* |
341 | * Objects are considered unreferenced only if their color is white, they have |
342 | * not be deleted and have a minimum age to avoid false positives caused by |
343 | * pointers temporarily stored in CPU registers. |
344 | */ |
345 | static bool unreferenced_object(struct kmemleak_object *object) |
346 | { |
347 | return (color_white(object) && object->flags & OBJECT_ALLOCATED) && |
348 | time_before_eq(object->jiffies + jiffies_min_age, |
349 | jiffies_last_scan); |
350 | } |
351 | |
352 | /* |
353 | * Printing of the unreferenced objects information to the seq file. The |
354 | * print_unreferenced function must be called with the object->lock held. |
355 | */ |
356 | static void print_unreferenced(struct seq_file *seq, |
357 | struct kmemleak_object *object) |
358 | { |
359 | int i; |
360 | unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); |
361 | |
362 | seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", |
363 | object->pointer, object->size); |
364 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", |
365 | object->comm, object->pid, object->jiffies, |
366 | msecs_age / 1000, msecs_age % 1000); |
367 | hex_dump_object(seq, object); |
368 | seq_printf(seq, " backtrace:\n"); |
369 | |
370 | for (i = 0; i < object->trace_len; i++) { |
371 | void *ptr = (void *)object->trace[i]; |
372 | seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); |
373 | } |
374 | } |
375 | |
376 | /* |
377 | * Print the kmemleak_object information. This function is used mainly for |
378 | * debugging special cases when kmemleak operations. It must be called with |
379 | * the object->lock held. |
380 | */ |
381 | static void dump_object_info(struct kmemleak_object *object) |
382 | { |
383 | struct stack_trace trace; |
384 | |
385 | trace.nr_entries = object->trace_len; |
386 | trace.entries = object->trace; |
387 | |
388 | pr_notice("Object 0x%08lx (size %zu):\n", |
389 | object->pointer, object->size); |
390 | pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", |
391 | object->comm, object->pid, object->jiffies); |
392 | pr_notice(" min_count = %d\n", object->min_count); |
393 | pr_notice(" count = %d\n", object->count); |
394 | pr_notice(" flags = 0x%lx\n", object->flags); |
395 | pr_notice(" checksum = %u\n", object->checksum); |
396 | pr_notice(" backtrace:\n"); |
397 | print_stack_trace(&trace, 4); |
398 | } |
399 | |
400 | /* |
401 | * Look-up a memory block metadata (kmemleak_object) in the object search |
402 | * tree based on a pointer value. If alias is 0, only values pointing to the |
403 | * beginning of the memory block are allowed. The kmemleak_lock must be held |
404 | * when calling this function. |
405 | */ |
406 | static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) |
407 | { |
408 | struct rb_node *rb = object_tree_root.rb_node; |
409 | |
410 | while (rb) { |
411 | struct kmemleak_object *object = |
412 | rb_entry(rb, struct kmemleak_object, rb_node); |
413 | if (ptr < object->pointer) |
414 | rb = object->rb_node.rb_left; |
415 | else if (object->pointer + object->size <= ptr) |
416 | rb = object->rb_node.rb_right; |
417 | else if (object->pointer == ptr || alias) |
418 | return object; |
419 | else { |
420 | kmemleak_warn("Found object by alias at 0x%08lx\n", |
421 | ptr); |
422 | dump_object_info(object); |
423 | break; |
424 | } |
425 | } |
426 | return NULL; |
427 | } |
428 | |
429 | /* |
430 | * Increment the object use_count. Return 1 if successful or 0 otherwise. Note |
431 | * that once an object's use_count reached 0, the RCU freeing was already |
432 | * registered and the object should no longer be used. This function must be |
433 | * called under the protection of rcu_read_lock(). |
434 | */ |
435 | static int get_object(struct kmemleak_object *object) |
436 | { |
437 | return atomic_inc_not_zero(&object->use_count); |
438 | } |
439 | |
440 | /* |
441 | * RCU callback to free a kmemleak_object. |
442 | */ |
443 | static void free_object_rcu(struct rcu_head *rcu) |
444 | { |
445 | struct hlist_node *tmp; |
446 | struct kmemleak_scan_area *area; |
447 | struct kmemleak_object *object = |
448 | container_of(rcu, struct kmemleak_object, rcu); |
449 | |
450 | /* |
451 | * Once use_count is 0 (guaranteed by put_object), there is no other |
452 | * code accessing this object, hence no need for locking. |
453 | */ |
454 | hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { |
455 | hlist_del(&area->node); |
456 | kmem_cache_free(scan_area_cache, area); |
457 | } |
458 | kmem_cache_free(object_cache, object); |
459 | } |
460 | |
461 | /* |
462 | * Decrement the object use_count. Once the count is 0, free the object using |
463 | * an RCU callback. Since put_object() may be called via the kmemleak_free() -> |
464 | * delete_object() path, the delayed RCU freeing ensures that there is no |
465 | * recursive call to the kernel allocator. Lock-less RCU object_list traversal |
466 | * is also possible. |
467 | */ |
468 | static void put_object(struct kmemleak_object *object) |
469 | { |
470 | if (!atomic_dec_and_test(&object->use_count)) |
471 | return; |
472 | |
473 | /* should only get here after delete_object was called */ |
474 | WARN_ON(object->flags & OBJECT_ALLOCATED); |
475 | |
476 | call_rcu(&object->rcu, free_object_rcu); |
477 | } |
478 | |
479 | /* |
480 | * Look up an object in the object search tree and increase its use_count. |
481 | */ |
482 | static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) |
483 | { |
484 | unsigned long flags; |
485 | struct kmemleak_object *object; |
486 | |
487 | rcu_read_lock(); |
488 | read_lock_irqsave(&kmemleak_lock, flags); |
489 | object = lookup_object(ptr, alias); |
490 | read_unlock_irqrestore(&kmemleak_lock, flags); |
491 | |
492 | /* check whether the object is still available */ |
493 | if (object && !get_object(object)) |
494 | object = NULL; |
495 | rcu_read_unlock(); |
496 | |
497 | return object; |
498 | } |
499 | |
500 | /* |
501 | * Look up an object in the object search tree and remove it from both |
502 | * object_tree_root and object_list. The returned object's use_count should be |
503 | * at least 1, as initially set by create_object(). |
504 | */ |
505 | static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias) |
506 | { |
507 | unsigned long flags; |
508 | struct kmemleak_object *object; |
509 | |
510 | write_lock_irqsave(&kmemleak_lock, flags); |
511 | object = lookup_object(ptr, alias); |
512 | if (object) { |
513 | rb_erase(&object->rb_node, &object_tree_root); |
514 | list_del_rcu(&object->object_list); |
515 | } |
516 | write_unlock_irqrestore(&kmemleak_lock, flags); |
517 | |
518 | return object; |
519 | } |
520 | |
521 | /* |
522 | * Save stack trace to the given array of MAX_TRACE size. |
523 | */ |
524 | static int __save_stack_trace(unsigned long *trace) |
525 | { |
526 | struct stack_trace stack_trace; |
527 | |
528 | stack_trace.max_entries = MAX_TRACE; |
529 | stack_trace.nr_entries = 0; |
530 | stack_trace.entries = trace; |
531 | stack_trace.skip = 2; |
532 | save_stack_trace(&stack_trace); |
533 | |
534 | return stack_trace.nr_entries; |
535 | } |
536 | |
537 | /* |
538 | * Create the metadata (struct kmemleak_object) corresponding to an allocated |
539 | * memory block and add it to the object_list and object_tree_root. |
540 | */ |
541 | static struct kmemleak_object *create_object(unsigned long ptr, size_t size, |
542 | int min_count, gfp_t gfp) |
543 | { |
544 | unsigned long flags; |
545 | struct kmemleak_object *object, *parent; |
546 | struct rb_node **link, *rb_parent; |
547 | |
548 | object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); |
549 | if (!object) { |
550 | pr_warn("Cannot allocate a kmemleak_object structure\n"); |
551 | kmemleak_disable(); |
552 | return NULL; |
553 | } |
554 | |
555 | INIT_LIST_HEAD(&object->object_list); |
556 | INIT_LIST_HEAD(&object->gray_list); |
557 | INIT_HLIST_HEAD(&object->area_list); |
558 | spin_lock_init(&object->lock); |
559 | atomic_set(&object->use_count, 1); |
560 | object->flags = OBJECT_ALLOCATED; |
561 | object->pointer = ptr; |
562 | object->size = size; |
563 | object->min_count = min_count; |
564 | object->count = 0; /* white color initially */ |
565 | object->jiffies = jiffies; |
566 | object->checksum = 0; |
567 | |
568 | /* task information */ |
569 | if (in_irq()) { |
570 | object->pid = 0; |
571 | strncpy(object->comm, "hardirq", sizeof(object->comm)); |
572 | } else if (in_softirq()) { |
573 | object->pid = 0; |
574 | strncpy(object->comm, "softirq", sizeof(object->comm)); |
575 | } else { |
576 | object->pid = current->pid; |
577 | /* |
578 | * There is a small chance of a race with set_task_comm(), |
579 | * however using get_task_comm() here may cause locking |
580 | * dependency issues with current->alloc_lock. In the worst |
581 | * case, the command line is not correct. |
582 | */ |
583 | strncpy(object->comm, current->comm, sizeof(object->comm)); |
584 | } |
585 | |
586 | /* kernel backtrace */ |
587 | object->trace_len = __save_stack_trace(object->trace); |
588 | |
589 | write_lock_irqsave(&kmemleak_lock, flags); |
590 | |
591 | min_addr = min(min_addr, ptr); |
592 | max_addr = max(max_addr, ptr + size); |
593 | link = &object_tree_root.rb_node; |
594 | rb_parent = NULL; |
595 | while (*link) { |
596 | rb_parent = *link; |
597 | parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); |
598 | if (ptr + size <= parent->pointer) |
599 | link = &parent->rb_node.rb_left; |
600 | else if (parent->pointer + parent->size <= ptr) |
601 | link = &parent->rb_node.rb_right; |
602 | else { |
603 | kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", |
604 | ptr); |
605 | /* |
606 | * No need for parent->lock here since "parent" cannot |
607 | * be freed while the kmemleak_lock is held. |
608 | */ |
609 | dump_object_info(parent); |
610 | kmem_cache_free(object_cache, object); |
611 | object = NULL; |
612 | goto out; |
613 | } |
614 | } |
615 | rb_link_node(&object->rb_node, rb_parent, link); |
616 | rb_insert_color(&object->rb_node, &object_tree_root); |
617 | |
618 | list_add_tail_rcu(&object->object_list, &object_list); |
619 | out: |
620 | write_unlock_irqrestore(&kmemleak_lock, flags); |
621 | return object; |
622 | } |
623 | |
624 | /* |
625 | * Mark the object as not allocated and schedule RCU freeing via put_object(). |
626 | */ |
627 | static void __delete_object(struct kmemleak_object *object) |
628 | { |
629 | unsigned long flags; |
630 | |
631 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); |
632 | WARN_ON(atomic_read(&object->use_count) < 1); |
633 | |
634 | /* |
635 | * Locking here also ensures that the corresponding memory block |
636 | * cannot be freed when it is being scanned. |
637 | */ |
638 | spin_lock_irqsave(&object->lock, flags); |
639 | object->flags &= ~OBJECT_ALLOCATED; |
640 | spin_unlock_irqrestore(&object->lock, flags); |
641 | put_object(object); |
642 | } |
643 | |
644 | /* |
645 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and |
646 | * delete it. |
647 | */ |
648 | static void delete_object_full(unsigned long ptr) |
649 | { |
650 | struct kmemleak_object *object; |
651 | |
652 | object = find_and_remove_object(ptr, 0); |
653 | if (!object) { |
654 | #ifdef DEBUG |
655 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", |
656 | ptr); |
657 | #endif |
658 | return; |
659 | } |
660 | __delete_object(object); |
661 | } |
662 | |
663 | /* |
664 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and |
665 | * delete it. If the memory block is partially freed, the function may create |
666 | * additional metadata for the remaining parts of the block. |
667 | */ |
668 | static void delete_object_part(unsigned long ptr, size_t size) |
669 | { |
670 | struct kmemleak_object *object; |
671 | unsigned long start, end; |
672 | |
673 | object = find_and_remove_object(ptr, 1); |
674 | if (!object) { |
675 | #ifdef DEBUG |
676 | kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", |
677 | ptr, size); |
678 | #endif |
679 | return; |
680 | } |
681 | |
682 | /* |
683 | * Create one or two objects that may result from the memory block |
684 | * split. Note that partial freeing is only done by free_bootmem() and |
685 | * this happens before kmemleak_init() is called. The path below is |
686 | * only executed during early log recording in kmemleak_init(), so |
687 | * GFP_KERNEL is enough. |
688 | */ |
689 | start = object->pointer; |
690 | end = object->pointer + object->size; |
691 | if (ptr > start) |
692 | create_object(start, ptr - start, object->min_count, |
693 | GFP_KERNEL); |
694 | if (ptr + size < end) |
695 | create_object(ptr + size, end - ptr - size, object->min_count, |
696 | GFP_KERNEL); |
697 | |
698 | __delete_object(object); |
699 | } |
700 | |
701 | static void __paint_it(struct kmemleak_object *object, int color) |
702 | { |
703 | object->min_count = color; |
704 | if (color == KMEMLEAK_BLACK) |
705 | object->flags |= OBJECT_NO_SCAN; |
706 | } |
707 | |
708 | static void paint_it(struct kmemleak_object *object, int color) |
709 | { |
710 | unsigned long flags; |
711 | |
712 | spin_lock_irqsave(&object->lock, flags); |
713 | __paint_it(object, color); |
714 | spin_unlock_irqrestore(&object->lock, flags); |
715 | } |
716 | |
717 | static void paint_ptr(unsigned long ptr, int color) |
718 | { |
719 | struct kmemleak_object *object; |
720 | |
721 | object = find_and_get_object(ptr, 0); |
722 | if (!object) { |
723 | kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", |
724 | ptr, |
725 | (color == KMEMLEAK_GREY) ? "Grey" : |
726 | (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); |
727 | return; |
728 | } |
729 | paint_it(object, color); |
730 | put_object(object); |
731 | } |
732 | |
733 | /* |
734 | * Mark an object permanently as gray-colored so that it can no longer be |
735 | * reported as a leak. This is used in general to mark a false positive. |
736 | */ |
737 | static void make_gray_object(unsigned long ptr) |
738 | { |
739 | paint_ptr(ptr, KMEMLEAK_GREY); |
740 | } |
741 | |
742 | /* |
743 | * Mark the object as black-colored so that it is ignored from scans and |
744 | * reporting. |
745 | */ |
746 | static void make_black_object(unsigned long ptr) |
747 | { |
748 | paint_ptr(ptr, KMEMLEAK_BLACK); |
749 | } |
750 | |
751 | /* |
752 | * Add a scanning area to the object. If at least one such area is added, |
753 | * kmemleak will only scan these ranges rather than the whole memory block. |
754 | */ |
755 | static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) |
756 | { |
757 | unsigned long flags; |
758 | struct kmemleak_object *object; |
759 | struct kmemleak_scan_area *area; |
760 | |
761 | object = find_and_get_object(ptr, 1); |
762 | if (!object) { |
763 | kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", |
764 | ptr); |
765 | return; |
766 | } |
767 | |
768 | area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); |
769 | if (!area) { |
770 | pr_warn("Cannot allocate a scan area\n"); |
771 | goto out; |
772 | } |
773 | |
774 | spin_lock_irqsave(&object->lock, flags); |
775 | if (size == SIZE_MAX) { |
776 | size = object->pointer + object->size - ptr; |
777 | } else if (ptr + size > object->pointer + object->size) { |
778 | kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); |
779 | dump_object_info(object); |
780 | kmem_cache_free(scan_area_cache, area); |
781 | goto out_unlock; |
782 | } |
783 | |
784 | INIT_HLIST_NODE(&area->node); |
785 | area->start = ptr; |
786 | area->size = size; |
787 | |
788 | hlist_add_head(&area->node, &object->area_list); |
789 | out_unlock: |
790 | spin_unlock_irqrestore(&object->lock, flags); |
791 | out: |
792 | put_object(object); |
793 | } |
794 | |
795 | /* |
796 | * Set the OBJECT_NO_SCAN flag for the object corresponding to the give |
797 | * pointer. Such object will not be scanned by kmemleak but references to it |
798 | * are searched. |
799 | */ |
800 | static void object_no_scan(unsigned long ptr) |
801 | { |
802 | unsigned long flags; |
803 | struct kmemleak_object *object; |
804 | |
805 | object = find_and_get_object(ptr, 0); |
806 | if (!object) { |
807 | kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); |
808 | return; |
809 | } |
810 | |
811 | spin_lock_irqsave(&object->lock, flags); |
812 | object->flags |= OBJECT_NO_SCAN; |
813 | spin_unlock_irqrestore(&object->lock, flags); |
814 | put_object(object); |
815 | } |
816 | |
817 | /* |
818 | * Log an early kmemleak_* call to the early_log buffer. These calls will be |
819 | * processed later once kmemleak is fully initialized. |
820 | */ |
821 | static void __init log_early(int op_type, const void *ptr, size_t size, |
822 | int min_count) |
823 | { |
824 | unsigned long flags; |
825 | struct early_log *log; |
826 | |
827 | if (kmemleak_error) { |
828 | /* kmemleak stopped recording, just count the requests */ |
829 | crt_early_log++; |
830 | return; |
831 | } |
832 | |
833 | if (crt_early_log >= ARRAY_SIZE(early_log)) { |
834 | crt_early_log++; |
835 | kmemleak_disable(); |
836 | return; |
837 | } |
838 | |
839 | /* |
840 | * There is no need for locking since the kernel is still in UP mode |
841 | * at this stage. Disabling the IRQs is enough. |
842 | */ |
843 | local_irq_save(flags); |
844 | log = &early_log[crt_early_log]; |
845 | log->op_type = op_type; |
846 | log->ptr = ptr; |
847 | log->size = size; |
848 | log->min_count = min_count; |
849 | log->trace_len = __save_stack_trace(log->trace); |
850 | crt_early_log++; |
851 | local_irq_restore(flags); |
852 | } |
853 | |
854 | /* |
855 | * Log an early allocated block and populate the stack trace. |
856 | */ |
857 | static void early_alloc(struct early_log *log) |
858 | { |
859 | struct kmemleak_object *object; |
860 | unsigned long flags; |
861 | int i; |
862 | |
863 | if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) |
864 | return; |
865 | |
866 | /* |
867 | * RCU locking needed to ensure object is not freed via put_object(). |
868 | */ |
869 | rcu_read_lock(); |
870 | object = create_object((unsigned long)log->ptr, log->size, |
871 | log->min_count, GFP_ATOMIC); |
872 | if (!object) |
873 | goto out; |
874 | spin_lock_irqsave(&object->lock, flags); |
875 | for (i = 0; i < log->trace_len; i++) |
876 | object->trace[i] = log->trace[i]; |
877 | object->trace_len = log->trace_len; |
878 | spin_unlock_irqrestore(&object->lock, flags); |
879 | out: |
880 | rcu_read_unlock(); |
881 | } |
882 | |
883 | /* |
884 | * Log an early allocated block and populate the stack trace. |
885 | */ |
886 | static void early_alloc_percpu(struct early_log *log) |
887 | { |
888 | unsigned int cpu; |
889 | const void __percpu *ptr = log->ptr; |
890 | |
891 | for_each_possible_cpu(cpu) { |
892 | log->ptr = per_cpu_ptr(ptr, cpu); |
893 | early_alloc(log); |
894 | } |
895 | } |
896 | |
897 | /** |
898 | * kmemleak_alloc - register a newly allocated object |
899 | * @ptr: pointer to beginning of the object |
900 | * @size: size of the object |
901 | * @min_count: minimum number of references to this object. If during memory |
902 | * scanning a number of references less than @min_count is found, |
903 | * the object is reported as a memory leak. If @min_count is 0, |
904 | * the object is never reported as a leak. If @min_count is -1, |
905 | * the object is ignored (not scanned and not reported as a leak) |
906 | * @gfp: kmalloc() flags used for kmemleak internal memory allocations |
907 | * |
908 | * This function is called from the kernel allocators when a new object |
909 | * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.). |
910 | */ |
911 | void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, |
912 | gfp_t gfp) |
913 | { |
914 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); |
915 | |
916 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
917 | create_object((unsigned long)ptr, size, min_count, gfp); |
918 | else if (kmemleak_early_log) |
919 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count); |
920 | } |
921 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
922 | |
923 | /** |
924 | * kmemleak_alloc_percpu - register a newly allocated __percpu object |
925 | * @ptr: __percpu pointer to beginning of the object |
926 | * @size: size of the object |
927 | * @gfp: flags used for kmemleak internal memory allocations |
928 | * |
929 | * This function is called from the kernel percpu allocator when a new object |
930 | * (memory block) is allocated (alloc_percpu). |
931 | */ |
932 | void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, |
933 | gfp_t gfp) |
934 | { |
935 | unsigned int cpu; |
936 | |
937 | pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); |
938 | |
939 | /* |
940 | * Percpu allocations are only scanned and not reported as leaks |
941 | * (min_count is set to 0). |
942 | */ |
943 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
944 | for_each_possible_cpu(cpu) |
945 | create_object((unsigned long)per_cpu_ptr(ptr, cpu), |
946 | size, 0, gfp); |
947 | else if (kmemleak_early_log) |
948 | log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); |
949 | } |
950 | EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); |
951 | |
952 | /** |
953 | * kmemleak_free - unregister a previously registered object |
954 | * @ptr: pointer to beginning of the object |
955 | * |
956 | * This function is called from the kernel allocators when an object (memory |
957 | * block) is freed (kmem_cache_free, kfree, vfree etc.). |
958 | */ |
959 | void __ref kmemleak_free(const void *ptr) |
960 | { |
961 | pr_debug("%s(0x%p)\n", __func__, ptr); |
962 | |
963 | if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) |
964 | delete_object_full((unsigned long)ptr); |
965 | else if (kmemleak_early_log) |
966 | log_early(KMEMLEAK_FREE, ptr, 0, 0); |
967 | } |
968 | EXPORT_SYMBOL_GPL(kmemleak_free); |
969 | |
970 | /** |
971 | * kmemleak_free_part - partially unregister a previously registered object |
972 | * @ptr: pointer to the beginning or inside the object. This also |
973 | * represents the start of the range to be freed |
974 | * @size: size to be unregistered |
975 | * |
976 | * This function is called when only a part of a memory block is freed |
977 | * (usually from the bootmem allocator). |
978 | */ |
979 | void __ref kmemleak_free_part(const void *ptr, size_t size) |
980 | { |
981 | pr_debug("%s(0x%p)\n", __func__, ptr); |
982 | |
983 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
984 | delete_object_part((unsigned long)ptr, size); |
985 | else if (kmemleak_early_log) |
986 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0); |
987 | } |
988 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
989 | |
990 | /** |
991 | * kmemleak_free_percpu - unregister a previously registered __percpu object |
992 | * @ptr: __percpu pointer to beginning of the object |
993 | * |
994 | * This function is called from the kernel percpu allocator when an object |
995 | * (memory block) is freed (free_percpu). |
996 | */ |
997 | void __ref kmemleak_free_percpu(const void __percpu *ptr) |
998 | { |
999 | unsigned int cpu; |
1000 | |
1001 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1002 | |
1003 | if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) |
1004 | for_each_possible_cpu(cpu) |
1005 | delete_object_full((unsigned long)per_cpu_ptr(ptr, |
1006 | cpu)); |
1007 | else if (kmemleak_early_log) |
1008 | log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); |
1009 | } |
1010 | EXPORT_SYMBOL_GPL(kmemleak_free_percpu); |
1011 | |
1012 | /** |
1013 | * kmemleak_update_trace - update object allocation stack trace |
1014 | * @ptr: pointer to beginning of the object |
1015 | * |
1016 | * Override the object allocation stack trace for cases where the actual |
1017 | * allocation place is not always useful. |
1018 | */ |
1019 | void __ref kmemleak_update_trace(const void *ptr) |
1020 | { |
1021 | struct kmemleak_object *object; |
1022 | unsigned long flags; |
1023 | |
1024 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1025 | |
1026 | if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr)) |
1027 | return; |
1028 | |
1029 | object = find_and_get_object((unsigned long)ptr, 1); |
1030 | if (!object) { |
1031 | #ifdef DEBUG |
1032 | kmemleak_warn("Updating stack trace for unknown object at %p\n", |
1033 | ptr); |
1034 | #endif |
1035 | return; |
1036 | } |
1037 | |
1038 | spin_lock_irqsave(&object->lock, flags); |
1039 | object->trace_len = __save_stack_trace(object->trace); |
1040 | spin_unlock_irqrestore(&object->lock, flags); |
1041 | |
1042 | put_object(object); |
1043 | } |
1044 | EXPORT_SYMBOL(kmemleak_update_trace); |
1045 | |
1046 | /** |
1047 | * kmemleak_not_leak - mark an allocated object as false positive |
1048 | * @ptr: pointer to beginning of the object |
1049 | * |
1050 | * Calling this function on an object will cause the memory block to no longer |
1051 | * be reported as leak and always be scanned. |
1052 | */ |
1053 | void __ref kmemleak_not_leak(const void *ptr) |
1054 | { |
1055 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1056 | |
1057 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
1058 | make_gray_object((unsigned long)ptr); |
1059 | else if (kmemleak_early_log) |
1060 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); |
1061 | } |
1062 | EXPORT_SYMBOL(kmemleak_not_leak); |
1063 | |
1064 | /** |
1065 | * kmemleak_ignore - ignore an allocated object |
1066 | * @ptr: pointer to beginning of the object |
1067 | * |
1068 | * Calling this function on an object will cause the memory block to be |
1069 | * ignored (not scanned and not reported as a leak). This is usually done when |
1070 | * it is known that the corresponding block is not a leak and does not contain |
1071 | * any references to other allocated memory blocks. |
1072 | */ |
1073 | void __ref kmemleak_ignore(const void *ptr) |
1074 | { |
1075 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1076 | |
1077 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
1078 | make_black_object((unsigned long)ptr); |
1079 | else if (kmemleak_early_log) |
1080 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0); |
1081 | } |
1082 | EXPORT_SYMBOL(kmemleak_ignore); |
1083 | |
1084 | /** |
1085 | * kmemleak_scan_area - limit the range to be scanned in an allocated object |
1086 | * @ptr: pointer to beginning or inside the object. This also |
1087 | * represents the start of the scan area |
1088 | * @size: size of the scan area |
1089 | * @gfp: kmalloc() flags used for kmemleak internal memory allocations |
1090 | * |
1091 | * This function is used when it is known that only certain parts of an object |
1092 | * contain references to other objects. Kmemleak will only scan these areas |
1093 | * reducing the number false negatives. |
1094 | */ |
1095 | void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) |
1096 | { |
1097 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1098 | |
1099 | if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) |
1100 | add_scan_area((unsigned long)ptr, size, gfp); |
1101 | else if (kmemleak_early_log) |
1102 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); |
1103 | } |
1104 | EXPORT_SYMBOL(kmemleak_scan_area); |
1105 | |
1106 | /** |
1107 | * kmemleak_no_scan - do not scan an allocated object |
1108 | * @ptr: pointer to beginning of the object |
1109 | * |
1110 | * This function notifies kmemleak not to scan the given memory block. Useful |
1111 | * in situations where it is known that the given object does not contain any |
1112 | * references to other objects. Kmemleak will not scan such objects reducing |
1113 | * the number of false negatives. |
1114 | */ |
1115 | void __ref kmemleak_no_scan(const void *ptr) |
1116 | { |
1117 | pr_debug("%s(0x%p)\n", __func__, ptr); |
1118 | |
1119 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) |
1120 | object_no_scan((unsigned long)ptr); |
1121 | else if (kmemleak_early_log) |
1122 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); |
1123 | } |
1124 | EXPORT_SYMBOL(kmemleak_no_scan); |
1125 | |
1126 | /** |
1127 | * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical |
1128 | * address argument |
1129 | */ |
1130 | void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, |
1131 | gfp_t gfp) |
1132 | { |
1133 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) |
1134 | kmemleak_alloc(__va(phys), size, min_count, gfp); |
1135 | } |
1136 | EXPORT_SYMBOL(kmemleak_alloc_phys); |
1137 | |
1138 | /** |
1139 | * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a |
1140 | * physical address argument |
1141 | */ |
1142 | void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) |
1143 | { |
1144 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) |
1145 | kmemleak_free_part(__va(phys), size); |
1146 | } |
1147 | EXPORT_SYMBOL(kmemleak_free_part_phys); |
1148 | |
1149 | /** |
1150 | * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical |
1151 | * address argument |
1152 | */ |
1153 | void __ref kmemleak_not_leak_phys(phys_addr_t phys) |
1154 | { |
1155 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) |
1156 | kmemleak_not_leak(__va(phys)); |
1157 | } |
1158 | EXPORT_SYMBOL(kmemleak_not_leak_phys); |
1159 | |
1160 | /** |
1161 | * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical |
1162 | * address argument |
1163 | */ |
1164 | void __ref kmemleak_ignore_phys(phys_addr_t phys) |
1165 | { |
1166 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) |
1167 | kmemleak_ignore(__va(phys)); |
1168 | } |
1169 | EXPORT_SYMBOL(kmemleak_ignore_phys); |
1170 | |
1171 | /* |
1172 | * Update an object's checksum and return true if it was modified. |
1173 | */ |
1174 | static bool update_checksum(struct kmemleak_object *object) |
1175 | { |
1176 | u32 old_csum = object->checksum; |
1177 | |
1178 | if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) |
1179 | return false; |
1180 | |
1181 | kasan_disable_current(); |
1182 | object->checksum = crc32(0, (void *)object->pointer, object->size); |
1183 | kasan_enable_current(); |
1184 | |
1185 | return object->checksum != old_csum; |
1186 | } |
1187 | |
1188 | /* |
1189 | * Memory scanning is a long process and it needs to be interruptable. This |
1190 | * function checks whether such interrupt condition occurred. |
1191 | */ |
1192 | static int scan_should_stop(void) |
1193 | { |
1194 | if (!kmemleak_enabled) |
1195 | return 1; |
1196 | |
1197 | /* |
1198 | * This function may be called from either process or kthread context, |
1199 | * hence the need to check for both stop conditions. |
1200 | */ |
1201 | if (current->mm) |
1202 | return signal_pending(current); |
1203 | else |
1204 | return kthread_should_stop(); |
1205 | |
1206 | return 0; |
1207 | } |
1208 | |
1209 | /* |
1210 | * Scan a memory block (exclusive range) for valid pointers and add those |
1211 | * found to the gray list. |
1212 | */ |
1213 | static void scan_block(void *_start, void *_end, |
1214 | struct kmemleak_object *scanned) |
1215 | { |
1216 | unsigned long *ptr; |
1217 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); |
1218 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); |
1219 | unsigned long flags; |
1220 | |
1221 | read_lock_irqsave(&kmemleak_lock, flags); |
1222 | for (ptr = start; ptr < end; ptr++) { |
1223 | struct kmemleak_object *object; |
1224 | unsigned long pointer; |
1225 | |
1226 | if (scan_should_stop()) |
1227 | break; |
1228 | |
1229 | /* don't scan uninitialized memory */ |
1230 | if (!kmemcheck_is_obj_initialized((unsigned long)ptr, |
1231 | BYTES_PER_POINTER)) |
1232 | continue; |
1233 | |
1234 | kasan_disable_current(); |
1235 | pointer = *ptr; |
1236 | kasan_enable_current(); |
1237 | |
1238 | if (pointer < min_addr || pointer >= max_addr) |
1239 | continue; |
1240 | |
1241 | /* |
1242 | * No need for get_object() here since we hold kmemleak_lock. |
1243 | * object->use_count cannot be dropped to 0 while the object |
1244 | * is still present in object_tree_root and object_list |
1245 | * (with updates protected by kmemleak_lock). |
1246 | */ |
1247 | object = lookup_object(pointer, 1); |
1248 | if (!object) |
1249 | continue; |
1250 | if (object == scanned) |
1251 | /* self referenced, ignore */ |
1252 | continue; |
1253 | |
1254 | /* |
1255 | * Avoid the lockdep recursive warning on object->lock being |
1256 | * previously acquired in scan_object(). These locks are |
1257 | * enclosed by scan_mutex. |
1258 | */ |
1259 | spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); |
1260 | if (!color_white(object)) { |
1261 | /* non-orphan, ignored or new */ |
1262 | spin_unlock(&object->lock); |
1263 | continue; |
1264 | } |
1265 | |
1266 | /* |
1267 | * Increase the object's reference count (number of pointers |
1268 | * to the memory block). If this count reaches the required |
1269 | * minimum, the object's color will become gray and it will be |
1270 | * added to the gray_list. |
1271 | */ |
1272 | object->count++; |
1273 | if (color_gray(object)) { |
1274 | /* put_object() called when removing from gray_list */ |
1275 | WARN_ON(!get_object(object)); |
1276 | list_add_tail(&object->gray_list, &gray_list); |
1277 | } |
1278 | spin_unlock(&object->lock); |
1279 | } |
1280 | read_unlock_irqrestore(&kmemleak_lock, flags); |
1281 | } |
1282 | |
1283 | /* |
1284 | * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency. |
1285 | */ |
1286 | static void scan_large_block(void *start, void *end) |
1287 | { |
1288 | void *next; |
1289 | |
1290 | while (start < end) { |
1291 | next = min(start + MAX_SCAN_SIZE, end); |
1292 | scan_block(start, next, NULL); |
1293 | start = next; |
1294 | cond_resched(); |
1295 | } |
1296 | } |
1297 | |
1298 | /* |
1299 | * Scan a memory block corresponding to a kmemleak_object. A condition is |
1300 | * that object->use_count >= 1. |
1301 | */ |
1302 | static void scan_object(struct kmemleak_object *object) |
1303 | { |
1304 | struct kmemleak_scan_area *area; |
1305 | unsigned long flags; |
1306 | |
1307 | /* |
1308 | * Once the object->lock is acquired, the corresponding memory block |
1309 | * cannot be freed (the same lock is acquired in delete_object). |
1310 | */ |
1311 | spin_lock_irqsave(&object->lock, flags); |
1312 | if (object->flags & OBJECT_NO_SCAN) |
1313 | goto out; |
1314 | if (!(object->flags & OBJECT_ALLOCATED)) |
1315 | /* already freed object */ |
1316 | goto out; |
1317 | if (hlist_empty(&object->area_list)) { |
1318 | void *start = (void *)object->pointer; |
1319 | void *end = (void *)(object->pointer + object->size); |
1320 | void *next; |
1321 | |
1322 | do { |
1323 | next = min(start + MAX_SCAN_SIZE, end); |
1324 | scan_block(start, next, object); |
1325 | |
1326 | start = next; |
1327 | if (start >= end) |
1328 | break; |
1329 | |
1330 | spin_unlock_irqrestore(&object->lock, flags); |
1331 | cond_resched(); |
1332 | spin_lock_irqsave(&object->lock, flags); |
1333 | } while (object->flags & OBJECT_ALLOCATED); |
1334 | } else |
1335 | hlist_for_each_entry(area, &object->area_list, node) |
1336 | scan_block((void *)area->start, |
1337 | (void *)(area->start + area->size), |
1338 | object); |
1339 | out: |
1340 | spin_unlock_irqrestore(&object->lock, flags); |
1341 | } |
1342 | |
1343 | /* |
1344 | * Scan the objects already referenced (gray objects). More objects will be |
1345 | * referenced and, if there are no memory leaks, all the objects are scanned. |
1346 | */ |
1347 | static void scan_gray_list(void) |
1348 | { |
1349 | struct kmemleak_object *object, *tmp; |
1350 | |
1351 | /* |
1352 | * The list traversal is safe for both tail additions and removals |
1353 | * from inside the loop. The kmemleak objects cannot be freed from |
1354 | * outside the loop because their use_count was incremented. |
1355 | */ |
1356 | object = list_entry(gray_list.next, typeof(*object), gray_list); |
1357 | while (&object->gray_list != &gray_list) { |
1358 | cond_resched(); |
1359 | |
1360 | /* may add new objects to the list */ |
1361 | if (!scan_should_stop()) |
1362 | scan_object(object); |
1363 | |
1364 | tmp = list_entry(object->gray_list.next, typeof(*object), |
1365 | gray_list); |
1366 | |
1367 | /* remove the object from the list and release it */ |
1368 | list_del(&object->gray_list); |
1369 | put_object(object); |
1370 | |
1371 | object = tmp; |
1372 | } |
1373 | WARN_ON(!list_empty(&gray_list)); |
1374 | } |
1375 | |
1376 | /* |
1377 | * Scan data sections and all the referenced memory blocks allocated via the |
1378 | * kernel's standard allocators. This function must be called with the |
1379 | * scan_mutex held. |
1380 | */ |
1381 | static void kmemleak_scan(void) |
1382 | { |
1383 | unsigned long flags; |
1384 | struct kmemleak_object *object; |
1385 | int i; |
1386 | int new_leaks = 0; |
1387 | |
1388 | jiffies_last_scan = jiffies; |
1389 | |
1390 | /* prepare the kmemleak_object's */ |
1391 | rcu_read_lock(); |
1392 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1393 | spin_lock_irqsave(&object->lock, flags); |
1394 | #ifdef DEBUG |
1395 | /* |
1396 | * With a few exceptions there should be a maximum of |
1397 | * 1 reference to any object at this point. |
1398 | */ |
1399 | if (atomic_read(&object->use_count) > 1) { |
1400 | pr_debug("object->use_count = %d\n", |
1401 | atomic_read(&object->use_count)); |
1402 | dump_object_info(object); |
1403 | } |
1404 | #endif |
1405 | /* reset the reference count (whiten the object) */ |
1406 | object->count = 0; |
1407 | if (color_gray(object) && get_object(object)) |
1408 | list_add_tail(&object->gray_list, &gray_list); |
1409 | |
1410 | spin_unlock_irqrestore(&object->lock, flags); |
1411 | } |
1412 | rcu_read_unlock(); |
1413 | |
1414 | /* data/bss scanning */ |
1415 | scan_large_block(_sdata, _edata); |
1416 | scan_large_block(__bss_start, __bss_stop); |
1417 | scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); |
1418 | |
1419 | #ifdef CONFIG_SMP |
1420 | /* per-cpu sections scanning */ |
1421 | for_each_possible_cpu(i) |
1422 | scan_large_block(__per_cpu_start + per_cpu_offset(i), |
1423 | __per_cpu_end + per_cpu_offset(i)); |
1424 | #endif |
1425 | |
1426 | /* |
1427 | * Struct page scanning for each node. |
1428 | */ |
1429 | get_online_mems(); |
1430 | for_each_online_node(i) { |
1431 | unsigned long start_pfn = node_start_pfn(i); |
1432 | unsigned long end_pfn = node_end_pfn(i); |
1433 | unsigned long pfn; |
1434 | |
1435 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
1436 | struct page *page; |
1437 | |
1438 | if (!pfn_valid(pfn)) |
1439 | continue; |
1440 | page = pfn_to_page(pfn); |
1441 | /* only scan if page is in use */ |
1442 | if (page_count(page) == 0) |
1443 | continue; |
1444 | scan_block(page, page + 1, NULL); |
1445 | if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) |
1446 | cond_resched(); |
1447 | } |
1448 | } |
1449 | put_online_mems(); |
1450 | |
1451 | /* |
1452 | * Scanning the task stacks (may introduce false negatives). |
1453 | */ |
1454 | if (kmemleak_stack_scan) { |
1455 | struct task_struct *p, *g; |
1456 | |
1457 | read_lock(&tasklist_lock); |
1458 | do_each_thread(g, p) { |
1459 | void *stack = try_get_task_stack(p); |
1460 | if (stack) { |
1461 | scan_block(stack, stack + THREAD_SIZE, NULL); |
1462 | put_task_stack(p); |
1463 | } |
1464 | } while_each_thread(g, p); |
1465 | read_unlock(&tasklist_lock); |
1466 | } |
1467 | |
1468 | /* |
1469 | * Scan the objects already referenced from the sections scanned |
1470 | * above. |
1471 | */ |
1472 | scan_gray_list(); |
1473 | |
1474 | /* |
1475 | * Check for new or unreferenced objects modified since the previous |
1476 | * scan and color them gray until the next scan. |
1477 | */ |
1478 | rcu_read_lock(); |
1479 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1480 | spin_lock_irqsave(&object->lock, flags); |
1481 | if (color_white(object) && (object->flags & OBJECT_ALLOCATED) |
1482 | && update_checksum(object) && get_object(object)) { |
1483 | /* color it gray temporarily */ |
1484 | object->count = object->min_count; |
1485 | list_add_tail(&object->gray_list, &gray_list); |
1486 | } |
1487 | spin_unlock_irqrestore(&object->lock, flags); |
1488 | } |
1489 | rcu_read_unlock(); |
1490 | |
1491 | /* |
1492 | * Re-scan the gray list for modified unreferenced objects. |
1493 | */ |
1494 | scan_gray_list(); |
1495 | |
1496 | /* |
1497 | * If scanning was stopped do not report any new unreferenced objects. |
1498 | */ |
1499 | if (scan_should_stop()) |
1500 | return; |
1501 | |
1502 | /* |
1503 | * Scanning result reporting. |
1504 | */ |
1505 | rcu_read_lock(); |
1506 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1507 | spin_lock_irqsave(&object->lock, flags); |
1508 | if (unreferenced_object(object) && |
1509 | !(object->flags & OBJECT_REPORTED)) { |
1510 | object->flags |= OBJECT_REPORTED; |
1511 | new_leaks++; |
1512 | } |
1513 | spin_unlock_irqrestore(&object->lock, flags); |
1514 | } |
1515 | rcu_read_unlock(); |
1516 | |
1517 | if (new_leaks) { |
1518 | kmemleak_found_leaks = true; |
1519 | |
1520 | pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n", |
1521 | new_leaks); |
1522 | } |
1523 | |
1524 | } |
1525 | |
1526 | /* |
1527 | * Thread function performing automatic memory scanning. Unreferenced objects |
1528 | * at the end of a memory scan are reported but only the first time. |
1529 | */ |
1530 | static int kmemleak_scan_thread(void *arg) |
1531 | { |
1532 | static int first_run = 1; |
1533 | |
1534 | pr_info("Automatic memory scanning thread started\n"); |
1535 | set_user_nice(current, 10); |
1536 | |
1537 | /* |
1538 | * Wait before the first scan to allow the system to fully initialize. |
1539 | */ |
1540 | if (first_run) { |
1541 | signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000); |
1542 | first_run = 0; |
1543 | while (timeout && !kthread_should_stop()) |
1544 | timeout = schedule_timeout_interruptible(timeout); |
1545 | } |
1546 | |
1547 | while (!kthread_should_stop()) { |
1548 | signed long timeout = jiffies_scan_wait; |
1549 | |
1550 | mutex_lock(&scan_mutex); |
1551 | kmemleak_scan(); |
1552 | mutex_unlock(&scan_mutex); |
1553 | |
1554 | /* wait before the next scan */ |
1555 | while (timeout && !kthread_should_stop()) |
1556 | timeout = schedule_timeout_interruptible(timeout); |
1557 | } |
1558 | |
1559 | pr_info("Automatic memory scanning thread ended\n"); |
1560 | |
1561 | return 0; |
1562 | } |
1563 | |
1564 | /* |
1565 | * Start the automatic memory scanning thread. This function must be called |
1566 | * with the scan_mutex held. |
1567 | */ |
1568 | static void start_scan_thread(void) |
1569 | { |
1570 | if (scan_thread) |
1571 | return; |
1572 | scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); |
1573 | if (IS_ERR(scan_thread)) { |
1574 | pr_warn("Failed to create the scan thread\n"); |
1575 | scan_thread = NULL; |
1576 | } |
1577 | } |
1578 | |
1579 | /* |
1580 | * Stop the automatic memory scanning thread. |
1581 | */ |
1582 | static void stop_scan_thread(void) |
1583 | { |
1584 | if (scan_thread) { |
1585 | kthread_stop(scan_thread); |
1586 | scan_thread = NULL; |
1587 | } |
1588 | } |
1589 | |
1590 | /* |
1591 | * Iterate over the object_list and return the first valid object at or after |
1592 | * the required position with its use_count incremented. The function triggers |
1593 | * a memory scanning when the pos argument points to the first position. |
1594 | */ |
1595 | static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) |
1596 | { |
1597 | struct kmemleak_object *object; |
1598 | loff_t n = *pos; |
1599 | int err; |
1600 | |
1601 | err = mutex_lock_interruptible(&scan_mutex); |
1602 | if (err < 0) |
1603 | return ERR_PTR(err); |
1604 | |
1605 | rcu_read_lock(); |
1606 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1607 | if (n-- > 0) |
1608 | continue; |
1609 | if (get_object(object)) |
1610 | goto out; |
1611 | } |
1612 | object = NULL; |
1613 | out: |
1614 | return object; |
1615 | } |
1616 | |
1617 | /* |
1618 | * Return the next object in the object_list. The function decrements the |
1619 | * use_count of the previous object and increases that of the next one. |
1620 | */ |
1621 | static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1622 | { |
1623 | struct kmemleak_object *prev_obj = v; |
1624 | struct kmemleak_object *next_obj = NULL; |
1625 | struct kmemleak_object *obj = prev_obj; |
1626 | |
1627 | ++(*pos); |
1628 | |
1629 | list_for_each_entry_continue_rcu(obj, &object_list, object_list) { |
1630 | if (get_object(obj)) { |
1631 | next_obj = obj; |
1632 | break; |
1633 | } |
1634 | } |
1635 | |
1636 | put_object(prev_obj); |
1637 | return next_obj; |
1638 | } |
1639 | |
1640 | /* |
1641 | * Decrement the use_count of the last object required, if any. |
1642 | */ |
1643 | static void kmemleak_seq_stop(struct seq_file *seq, void *v) |
1644 | { |
1645 | if (!IS_ERR(v)) { |
1646 | /* |
1647 | * kmemleak_seq_start may return ERR_PTR if the scan_mutex |
1648 | * waiting was interrupted, so only release it if !IS_ERR. |
1649 | */ |
1650 | rcu_read_unlock(); |
1651 | mutex_unlock(&scan_mutex); |
1652 | if (v) |
1653 | put_object(v); |
1654 | } |
1655 | } |
1656 | |
1657 | /* |
1658 | * Print the information for an unreferenced object to the seq file. |
1659 | */ |
1660 | static int kmemleak_seq_show(struct seq_file *seq, void *v) |
1661 | { |
1662 | struct kmemleak_object *object = v; |
1663 | unsigned long flags; |
1664 | |
1665 | spin_lock_irqsave(&object->lock, flags); |
1666 | if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) |
1667 | print_unreferenced(seq, object); |
1668 | spin_unlock_irqrestore(&object->lock, flags); |
1669 | return 0; |
1670 | } |
1671 | |
1672 | static const struct seq_operations kmemleak_seq_ops = { |
1673 | .start = kmemleak_seq_start, |
1674 | .next = kmemleak_seq_next, |
1675 | .stop = kmemleak_seq_stop, |
1676 | .show = kmemleak_seq_show, |
1677 | }; |
1678 | |
1679 | static int kmemleak_open(struct inode *inode, struct file *file) |
1680 | { |
1681 | return seq_open(file, &kmemleak_seq_ops); |
1682 | } |
1683 | |
1684 | static int dump_str_object_info(const char *str) |
1685 | { |
1686 | unsigned long flags; |
1687 | struct kmemleak_object *object; |
1688 | unsigned long addr; |
1689 | |
1690 | if (kstrtoul(str, 0, &addr)) |
1691 | return -EINVAL; |
1692 | object = find_and_get_object(addr, 0); |
1693 | if (!object) { |
1694 | pr_info("Unknown object at 0x%08lx\n", addr); |
1695 | return -EINVAL; |
1696 | } |
1697 | |
1698 | spin_lock_irqsave(&object->lock, flags); |
1699 | dump_object_info(object); |
1700 | spin_unlock_irqrestore(&object->lock, flags); |
1701 | |
1702 | put_object(object); |
1703 | return 0; |
1704 | } |
1705 | |
1706 | /* |
1707 | * We use grey instead of black to ensure we can do future scans on the same |
1708 | * objects. If we did not do future scans these black objects could |
1709 | * potentially contain references to newly allocated objects in the future and |
1710 | * we'd end up with false positives. |
1711 | */ |
1712 | static void kmemleak_clear(void) |
1713 | { |
1714 | struct kmemleak_object *object; |
1715 | unsigned long flags; |
1716 | |
1717 | rcu_read_lock(); |
1718 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1719 | spin_lock_irqsave(&object->lock, flags); |
1720 | if ((object->flags & OBJECT_REPORTED) && |
1721 | unreferenced_object(object)) |
1722 | __paint_it(object, KMEMLEAK_GREY); |
1723 | spin_unlock_irqrestore(&object->lock, flags); |
1724 | } |
1725 | rcu_read_unlock(); |
1726 | |
1727 | kmemleak_found_leaks = false; |
1728 | } |
1729 | |
1730 | static void __kmemleak_do_cleanup(void); |
1731 | |
1732 | /* |
1733 | * File write operation to configure kmemleak at run-time. The following |
1734 | * commands can be written to the /sys/kernel/debug/kmemleak file: |
1735 | * off - disable kmemleak (irreversible) |
1736 | * stack=on - enable the task stacks scanning |
1737 | * stack=off - disable the tasks stacks scanning |
1738 | * scan=on - start the automatic memory scanning thread |
1739 | * scan=off - stop the automatic memory scanning thread |
1740 | * scan=... - set the automatic memory scanning period in seconds (0 to |
1741 | * disable it) |
1742 | * scan - trigger a memory scan |
1743 | * clear - mark all current reported unreferenced kmemleak objects as |
1744 | * grey to ignore printing them, or free all kmemleak objects |
1745 | * if kmemleak has been disabled. |
1746 | * dump=... - dump information about the object found at the given address |
1747 | */ |
1748 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, |
1749 | size_t size, loff_t *ppos) |
1750 | { |
1751 | char buf[64]; |
1752 | int buf_size; |
1753 | int ret; |
1754 | |
1755 | buf_size = min(size, (sizeof(buf) - 1)); |
1756 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) |
1757 | return -EFAULT; |
1758 | buf[buf_size] = 0; |
1759 | |
1760 | ret = mutex_lock_interruptible(&scan_mutex); |
1761 | if (ret < 0) |
1762 | return ret; |
1763 | |
1764 | if (strncmp(buf, "clear", 5) == 0) { |
1765 | if (kmemleak_enabled) |
1766 | kmemleak_clear(); |
1767 | else |
1768 | __kmemleak_do_cleanup(); |
1769 | goto out; |
1770 | } |
1771 | |
1772 | if (!kmemleak_enabled) { |
1773 | ret = -EBUSY; |
1774 | goto out; |
1775 | } |
1776 | |
1777 | if (strncmp(buf, "off", 3) == 0) |
1778 | kmemleak_disable(); |
1779 | else if (strncmp(buf, "stack=on", 8) == 0) |
1780 | kmemleak_stack_scan = 1; |
1781 | else if (strncmp(buf, "stack=off", 9) == 0) |
1782 | kmemleak_stack_scan = 0; |
1783 | else if (strncmp(buf, "scan=on", 7) == 0) |
1784 | start_scan_thread(); |
1785 | else if (strncmp(buf, "scan=off", 8) == 0) |
1786 | stop_scan_thread(); |
1787 | else if (strncmp(buf, "scan=", 5) == 0) { |
1788 | unsigned long secs; |
1789 | |
1790 | ret = kstrtoul(buf + 5, 0, &secs); |
1791 | if (ret < 0) |
1792 | goto out; |
1793 | stop_scan_thread(); |
1794 | if (secs) { |
1795 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); |
1796 | start_scan_thread(); |
1797 | } |
1798 | } else if (strncmp(buf, "scan", 4) == 0) |
1799 | kmemleak_scan(); |
1800 | else if (strncmp(buf, "dump=", 5) == 0) |
1801 | ret = dump_str_object_info(buf + 5); |
1802 | else |
1803 | ret = -EINVAL; |
1804 | |
1805 | out: |
1806 | mutex_unlock(&scan_mutex); |
1807 | if (ret < 0) |
1808 | return ret; |
1809 | |
1810 | /* ignore the rest of the buffer, only one command at a time */ |
1811 | *ppos += size; |
1812 | return size; |
1813 | } |
1814 | |
1815 | static const struct file_operations kmemleak_fops = { |
1816 | .owner = THIS_MODULE, |
1817 | .open = kmemleak_open, |
1818 | .read = seq_read, |
1819 | .write = kmemleak_write, |
1820 | .llseek = seq_lseek, |
1821 | .release = seq_release, |
1822 | }; |
1823 | |
1824 | static void __kmemleak_do_cleanup(void) |
1825 | { |
1826 | struct kmemleak_object *object; |
1827 | |
1828 | rcu_read_lock(); |
1829 | list_for_each_entry_rcu(object, &object_list, object_list) |
1830 | delete_object_full(object->pointer); |
1831 | rcu_read_unlock(); |
1832 | } |
1833 | |
1834 | /* |
1835 | * Stop the memory scanning thread and free the kmemleak internal objects if |
1836 | * no previous scan thread (otherwise, kmemleak may still have some useful |
1837 | * information on memory leaks). |
1838 | */ |
1839 | static void kmemleak_do_cleanup(struct work_struct *work) |
1840 | { |
1841 | stop_scan_thread(); |
1842 | |
1843 | mutex_lock(&scan_mutex); |
1844 | /* |
1845 | * Once it is made sure that kmemleak_scan has stopped, it is safe to no |
1846 | * longer track object freeing. Ordering of the scan thread stopping and |
1847 | * the memory accesses below is guaranteed by the kthread_stop() |
1848 | * function. |
1849 | */ |
1850 | kmemleak_free_enabled = 0; |
1851 | mutex_unlock(&scan_mutex); |
1852 | |
1853 | if (!kmemleak_found_leaks) |
1854 | __kmemleak_do_cleanup(); |
1855 | else |
1856 | pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n"); |
1857 | } |
1858 | |
1859 | static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); |
1860 | |
1861 | /* |
1862 | * Disable kmemleak. No memory allocation/freeing will be traced once this |
1863 | * function is called. Disabling kmemleak is an irreversible operation. |
1864 | */ |
1865 | static void kmemleak_disable(void) |
1866 | { |
1867 | /* atomically check whether it was already invoked */ |
1868 | if (cmpxchg(&kmemleak_error, 0, 1)) |
1869 | return; |
1870 | |
1871 | /* stop any memory operation tracing */ |
1872 | kmemleak_enabled = 0; |
1873 | |
1874 | /* check whether it is too early for a kernel thread */ |
1875 | if (kmemleak_initialized) |
1876 | schedule_work(&cleanup_work); |
1877 | else |
1878 | kmemleak_free_enabled = 0; |
1879 | |
1880 | pr_info("Kernel memory leak detector disabled\n"); |
1881 | } |
1882 | |
1883 | /* |
1884 | * Allow boot-time kmemleak disabling (enabled by default). |
1885 | */ |
1886 | static int kmemleak_boot_config(char *str) |
1887 | { |
1888 | if (!str) |
1889 | return -EINVAL; |
1890 | if (strcmp(str, "off") == 0) |
1891 | kmemleak_disable(); |
1892 | else if (strcmp(str, "on") == 0) |
1893 | kmemleak_skip_disable = 1; |
1894 | else |
1895 | return -EINVAL; |
1896 | return 0; |
1897 | } |
1898 | early_param("kmemleak", kmemleak_boot_config); |
1899 | |
1900 | static void __init print_log_trace(struct early_log *log) |
1901 | { |
1902 | struct stack_trace trace; |
1903 | |
1904 | trace.nr_entries = log->trace_len; |
1905 | trace.entries = log->trace; |
1906 | |
1907 | pr_notice("Early log backtrace:\n"); |
1908 | print_stack_trace(&trace, 2); |
1909 | } |
1910 | |
1911 | /* |
1912 | * Kmemleak initialization. |
1913 | */ |
1914 | void __init kmemleak_init(void) |
1915 | { |
1916 | int i; |
1917 | unsigned long flags; |
1918 | |
1919 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF |
1920 | if (!kmemleak_skip_disable) { |
1921 | kmemleak_early_log = 0; |
1922 | kmemleak_disable(); |
1923 | return; |
1924 | } |
1925 | #endif |
1926 | |
1927 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); |
1928 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); |
1929 | |
1930 | object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); |
1931 | scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); |
1932 | |
1933 | if (crt_early_log > ARRAY_SIZE(early_log)) |
1934 | pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", |
1935 | crt_early_log); |
1936 | |
1937 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ |
1938 | local_irq_save(flags); |
1939 | kmemleak_early_log = 0; |
1940 | if (kmemleak_error) { |
1941 | local_irq_restore(flags); |
1942 | return; |
1943 | } else { |
1944 | kmemleak_enabled = 1; |
1945 | kmemleak_free_enabled = 1; |
1946 | } |
1947 | local_irq_restore(flags); |
1948 | |
1949 | /* |
1950 | * This is the point where tracking allocations is safe. Automatic |
1951 | * scanning is started during the late initcall. Add the early logged |
1952 | * callbacks to the kmemleak infrastructure. |
1953 | */ |
1954 | for (i = 0; i < crt_early_log; i++) { |
1955 | struct early_log *log = &early_log[i]; |
1956 | |
1957 | switch (log->op_type) { |
1958 | case KMEMLEAK_ALLOC: |
1959 | early_alloc(log); |
1960 | break; |
1961 | case KMEMLEAK_ALLOC_PERCPU: |
1962 | early_alloc_percpu(log); |
1963 | break; |
1964 | case KMEMLEAK_FREE: |
1965 | kmemleak_free(log->ptr); |
1966 | break; |
1967 | case KMEMLEAK_FREE_PART: |
1968 | kmemleak_free_part(log->ptr, log->size); |
1969 | break; |
1970 | case KMEMLEAK_FREE_PERCPU: |
1971 | kmemleak_free_percpu(log->ptr); |
1972 | break; |
1973 | case KMEMLEAK_NOT_LEAK: |
1974 | kmemleak_not_leak(log->ptr); |
1975 | break; |
1976 | case KMEMLEAK_IGNORE: |
1977 | kmemleak_ignore(log->ptr); |
1978 | break; |
1979 | case KMEMLEAK_SCAN_AREA: |
1980 | kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); |
1981 | break; |
1982 | case KMEMLEAK_NO_SCAN: |
1983 | kmemleak_no_scan(log->ptr); |
1984 | break; |
1985 | default: |
1986 | kmemleak_warn("Unknown early log operation: %d\n", |
1987 | log->op_type); |
1988 | } |
1989 | |
1990 | if (kmemleak_warning) { |
1991 | print_log_trace(log); |
1992 | kmemleak_warning = 0; |
1993 | } |
1994 | } |
1995 | } |
1996 | |
1997 | /* |
1998 | * Late initialization function. |
1999 | */ |
2000 | static int __init kmemleak_late_init(void) |
2001 | { |
2002 | struct dentry *dentry; |
2003 | |
2004 | kmemleak_initialized = 1; |
2005 | |
2006 | if (kmemleak_error) { |
2007 | /* |
2008 | * Some error occurred and kmemleak was disabled. There is a |
2009 | * small chance that kmemleak_disable() was called immediately |
2010 | * after setting kmemleak_initialized and we may end up with |
2011 | * two clean-up threads but serialized by scan_mutex. |
2012 | */ |
2013 | schedule_work(&cleanup_work); |
2014 | return -ENOMEM; |
2015 | } |
2016 | |
2017 | dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, |
2018 | &kmemleak_fops); |
2019 | if (!dentry) |
2020 | pr_warn("Failed to create the debugfs kmemleak file\n"); |
2021 | mutex_lock(&scan_mutex); |
2022 | start_scan_thread(); |
2023 | mutex_unlock(&scan_mutex); |
2024 | |
2025 | pr_info("Kernel memory leak detector initialized\n"); |
2026 | |
2027 | return 0; |
2028 | } |
2029 | late_initcall(kmemleak_late_init); |
2030 |