Merge branch 'entropy'
[linux-2.6-block.git] / mm / kmemleak.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (rwlock): protects the object_list modifications and
17 * accesses to the object_tree_root. The object_list is the main list
18 * holding the metadata (struct kmemleak_object) for the allocated memory
19 * blocks. The object_tree_root is a red black tree used to look-up
20 * metadata based on a pointer to the corresponding memory block. The
21 * kmemleak_object structures are added to the object_list and
22 * object_tree_root in the create_object() function called from the
23 * kmemleak_alloc() callback and removed in delete_object() called from the
24 * kmemleak_free() callback
25 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
26 * the metadata (e.g. count) are protected by this lock. Note that some
27 * members of this structure may be protected by other means (atomic or
28 * kmemleak_lock). This lock is also held when scanning the corresponding
29 * memory block to avoid the kernel freeing it via the kmemleak_free()
30 * callback. This is less heavyweight than holding a global lock like
31 * kmemleak_lock during scanning
32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33 * unreferenced objects at a time. The gray_list contains the objects which
34 * are already referenced or marked as false positives and need to be
35 * scanned. This list is only modified during a scanning episode when the
36 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
37 * Note that the kmemleak_object.use_count is incremented when an object is
38 * added to the gray_list and therefore cannot be freed. This mutex also
39 * prevents multiple users of the "kmemleak" debugfs file together with
40 * modifications to the memory scanning parameters including the scan_thread
41 * pointer
42 *
43 * Locks and mutexes are acquired/nested in the following order:
44 *
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46 *
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48 * regions.
49 *
50 * The kmemleak_object structures have a use_count incremented or decremented
51 * using the get_object()/put_object() functions. When the use_count becomes
52 * 0, this count can no longer be incremented and put_object() schedules the
53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54 * function must be protected by rcu_read_lock() to avoid accessing a freed
55 * structure.
56 */
57
58#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60#include <linux/init.h>
61#include <linux/kernel.h>
62#include <linux/list.h>
63#include <linux/sched/signal.h>
64#include <linux/sched/task.h>
65#include <linux/sched/task_stack.h>
66#include <linux/jiffies.h>
67#include <linux/delay.h>
68#include <linux/export.h>
69#include <linux/kthread.h>
70#include <linux/rbtree.h>
71#include <linux/fs.h>
72#include <linux/debugfs.h>
73#include <linux/seq_file.h>
74#include <linux/cpumask.h>
75#include <linux/spinlock.h>
76#include <linux/module.h>
77#include <linux/mutex.h>
78#include <linux/rcupdate.h>
79#include <linux/stacktrace.h>
80#include <linux/cache.h>
81#include <linux/percpu.h>
82#include <linux/memblock.h>
83#include <linux/pfn.h>
84#include <linux/mmzone.h>
85#include <linux/slab.h>
86#include <linux/thread_info.h>
87#include <linux/err.h>
88#include <linux/uaccess.h>
89#include <linux/string.h>
90#include <linux/nodemask.h>
91#include <linux/mm.h>
92#include <linux/workqueue.h>
93#include <linux/crc32.h>
94
95#include <asm/sections.h>
96#include <asm/processor.h>
97#include <linux/atomic.h>
98
99#include <linux/kasan.h>
100#include <linux/kmemleak.h>
101#include <linux/memory_hotplug.h>
102
103/*
104 * Kmemleak configuration and common defines.
105 */
106#define MAX_TRACE 16 /* stack trace length */
107#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
108#define SECS_FIRST_SCAN 60 /* delay before the first scan */
109#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
110#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
111
112#define BYTES_PER_POINTER sizeof(void *)
113
114/* GFP bitmask for kmemleak internal allocations */
115#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
116 __GFP_NORETRY | __GFP_NOMEMALLOC | \
117 __GFP_NOWARN)
118
119/* scanning area inside a memory block */
120struct kmemleak_scan_area {
121 struct hlist_node node;
122 unsigned long start;
123 size_t size;
124};
125
126#define KMEMLEAK_GREY 0
127#define KMEMLEAK_BLACK -1
128
129/*
130 * Structure holding the metadata for each allocated memory block.
131 * Modifications to such objects should be made while holding the
132 * object->lock. Insertions or deletions from object_list, gray_list or
133 * rb_node are already protected by the corresponding locks or mutex (see
134 * the notes on locking above). These objects are reference-counted
135 * (use_count) and freed using the RCU mechanism.
136 */
137struct kmemleak_object {
138 spinlock_t lock;
139 unsigned int flags; /* object status flags */
140 struct list_head object_list;
141 struct list_head gray_list;
142 struct rb_node rb_node;
143 struct rcu_head rcu; /* object_list lockless traversal */
144 /* object usage count; object freed when use_count == 0 */
145 atomic_t use_count;
146 unsigned long pointer;
147 size_t size;
148 /* pass surplus references to this pointer */
149 unsigned long excess_ref;
150 /* minimum number of a pointers found before it is considered leak */
151 int min_count;
152 /* the total number of pointers found pointing to this object */
153 int count;
154 /* checksum for detecting modified objects */
155 u32 checksum;
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list;
158 unsigned long trace[MAX_TRACE];
159 unsigned int trace_len;
160 unsigned long jiffies; /* creation timestamp */
161 pid_t pid; /* pid of the current task */
162 char comm[TASK_COMM_LEN]; /* executable name */
163};
164
165/* flag representing the memory block allocation status */
166#define OBJECT_ALLOCATED (1 << 0)
167/* flag set after the first reporting of an unreference object */
168#define OBJECT_REPORTED (1 << 1)
169/* flag set to not scan the object */
170#define OBJECT_NO_SCAN (1 << 2)
171/* flag set to fully scan the object when scan_area allocation failed */
172#define OBJECT_FULL_SCAN (1 << 3)
173
174#define HEX_PREFIX " "
175/* number of bytes to print per line; must be 16 or 32 */
176#define HEX_ROW_SIZE 16
177/* number of bytes to print at a time (1, 2, 4, 8) */
178#define HEX_GROUP_SIZE 1
179/* include ASCII after the hex output */
180#define HEX_ASCII 1
181/* max number of lines to be printed */
182#define HEX_MAX_LINES 2
183
184/* the list of all allocated objects */
185static LIST_HEAD(object_list);
186/* the list of gray-colored objects (see color_gray comment below) */
187static LIST_HEAD(gray_list);
188/* memory pool allocation */
189static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
190static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
191static LIST_HEAD(mem_pool_free_list);
192/* search tree for object boundaries */
193static struct rb_root object_tree_root = RB_ROOT;
194/* rw_lock protecting the access to object_list and object_tree_root */
195static DEFINE_RWLOCK(kmemleak_lock);
196
197/* allocation caches for kmemleak internal data */
198static struct kmem_cache *object_cache;
199static struct kmem_cache *scan_area_cache;
200
201/* set if tracing memory operations is enabled */
202static int kmemleak_enabled = 1;
203/* same as above but only for the kmemleak_free() callback */
204static int kmemleak_free_enabled = 1;
205/* set in the late_initcall if there were no errors */
206static int kmemleak_initialized;
207/* set if a kmemleak warning was issued */
208static int kmemleak_warning;
209/* set if a fatal kmemleak error has occurred */
210static int kmemleak_error;
211
212/* minimum and maximum address that may be valid pointers */
213static unsigned long min_addr = ULONG_MAX;
214static unsigned long max_addr;
215
216static struct task_struct *scan_thread;
217/* used to avoid reporting of recently allocated objects */
218static unsigned long jiffies_min_age;
219static unsigned long jiffies_last_scan;
220/* delay between automatic memory scannings */
221static signed long jiffies_scan_wait;
222/* enables or disables the task stacks scanning */
223static int kmemleak_stack_scan = 1;
224/* protects the memory scanning, parameters and debug/kmemleak file access */
225static DEFINE_MUTEX(scan_mutex);
226/* setting kmemleak=on, will set this var, skipping the disable */
227static int kmemleak_skip_disable;
228/* If there are leaks that can be reported */
229static bool kmemleak_found_leaks;
230
231static bool kmemleak_verbose;
232module_param_named(verbose, kmemleak_verbose, bool, 0600);
233
234static void kmemleak_disable(void);
235
236/*
237 * Print a warning and dump the stack trace.
238 */
239#define kmemleak_warn(x...) do { \
240 pr_warn(x); \
241 dump_stack(); \
242 kmemleak_warning = 1; \
243} while (0)
244
245/*
246 * Macro invoked when a serious kmemleak condition occurred and cannot be
247 * recovered from. Kmemleak will be disabled and further allocation/freeing
248 * tracing no longer available.
249 */
250#define kmemleak_stop(x...) do { \
251 kmemleak_warn(x); \
252 kmemleak_disable(); \
253} while (0)
254
255#define warn_or_seq_printf(seq, fmt, ...) do { \
256 if (seq) \
257 seq_printf(seq, fmt, ##__VA_ARGS__); \
258 else \
259 pr_warn(fmt, ##__VA_ARGS__); \
260} while (0)
261
262static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
263 int rowsize, int groupsize, const void *buf,
264 size_t len, bool ascii)
265{
266 if (seq)
267 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
268 buf, len, ascii);
269 else
270 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
271 rowsize, groupsize, buf, len, ascii);
272}
273
274/*
275 * Printing of the objects hex dump to the seq file. The number of lines to be
276 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
277 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
278 * with the object->lock held.
279 */
280static void hex_dump_object(struct seq_file *seq,
281 struct kmemleak_object *object)
282{
283 const u8 *ptr = (const u8 *)object->pointer;
284 size_t len;
285
286 /* limit the number of lines to HEX_MAX_LINES */
287 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
288
289 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
290 kasan_disable_current();
291 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
292 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
293 kasan_enable_current();
294}
295
296/*
297 * Object colors, encoded with count and min_count:
298 * - white - orphan object, not enough references to it (count < min_count)
299 * - gray - not orphan, not marked as false positive (min_count == 0) or
300 * sufficient references to it (count >= min_count)
301 * - black - ignore, it doesn't contain references (e.g. text section)
302 * (min_count == -1). No function defined for this color.
303 * Newly created objects don't have any color assigned (object->count == -1)
304 * before the next memory scan when they become white.
305 */
306static bool color_white(const struct kmemleak_object *object)
307{
308 return object->count != KMEMLEAK_BLACK &&
309 object->count < object->min_count;
310}
311
312static bool color_gray(const struct kmemleak_object *object)
313{
314 return object->min_count != KMEMLEAK_BLACK &&
315 object->count >= object->min_count;
316}
317
318/*
319 * Objects are considered unreferenced only if their color is white, they have
320 * not be deleted and have a minimum age to avoid false positives caused by
321 * pointers temporarily stored in CPU registers.
322 */
323static bool unreferenced_object(struct kmemleak_object *object)
324{
325 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
326 time_before_eq(object->jiffies + jiffies_min_age,
327 jiffies_last_scan);
328}
329
330/*
331 * Printing of the unreferenced objects information to the seq file. The
332 * print_unreferenced function must be called with the object->lock held.
333 */
334static void print_unreferenced(struct seq_file *seq,
335 struct kmemleak_object *object)
336{
337 int i;
338 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
339
340 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
341 object->pointer, object->size);
342 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
343 object->comm, object->pid, object->jiffies,
344 msecs_age / 1000, msecs_age % 1000);
345 hex_dump_object(seq, object);
346 warn_or_seq_printf(seq, " backtrace:\n");
347
348 for (i = 0; i < object->trace_len; i++) {
349 void *ptr = (void *)object->trace[i];
350 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
351 }
352}
353
354/*
355 * Print the kmemleak_object information. This function is used mainly for
356 * debugging special cases when kmemleak operations. It must be called with
357 * the object->lock held.
358 */
359static void dump_object_info(struct kmemleak_object *object)
360{
361 pr_notice("Object 0x%08lx (size %zu):\n",
362 object->pointer, object->size);
363 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
364 object->comm, object->pid, object->jiffies);
365 pr_notice(" min_count = %d\n", object->min_count);
366 pr_notice(" count = %d\n", object->count);
367 pr_notice(" flags = 0x%x\n", object->flags);
368 pr_notice(" checksum = %u\n", object->checksum);
369 pr_notice(" backtrace:\n");
370 stack_trace_print(object->trace, object->trace_len, 4);
371}
372
373/*
374 * Look-up a memory block metadata (kmemleak_object) in the object search
375 * tree based on a pointer value. If alias is 0, only values pointing to the
376 * beginning of the memory block are allowed. The kmemleak_lock must be held
377 * when calling this function.
378 */
379static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
380{
381 struct rb_node *rb = object_tree_root.rb_node;
382
383 while (rb) {
384 struct kmemleak_object *object =
385 rb_entry(rb, struct kmemleak_object, rb_node);
386 if (ptr < object->pointer)
387 rb = object->rb_node.rb_left;
388 else if (object->pointer + object->size <= ptr)
389 rb = object->rb_node.rb_right;
390 else if (object->pointer == ptr || alias)
391 return object;
392 else {
393 kmemleak_warn("Found object by alias at 0x%08lx\n",
394 ptr);
395 dump_object_info(object);
396 break;
397 }
398 }
399 return NULL;
400}
401
402/*
403 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
404 * that once an object's use_count reached 0, the RCU freeing was already
405 * registered and the object should no longer be used. This function must be
406 * called under the protection of rcu_read_lock().
407 */
408static int get_object(struct kmemleak_object *object)
409{
410 return atomic_inc_not_zero(&object->use_count);
411}
412
413/*
414 * Memory pool allocation and freeing. kmemleak_lock must not be held.
415 */
416static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
417{
418 unsigned long flags;
419 struct kmemleak_object *object;
420
421 /* try the slab allocator first */
422 if (object_cache) {
423 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
424 if (object)
425 return object;
426 }
427
428 /* slab allocation failed, try the memory pool */
429 write_lock_irqsave(&kmemleak_lock, flags);
430 object = list_first_entry_or_null(&mem_pool_free_list,
431 typeof(*object), object_list);
432 if (object)
433 list_del(&object->object_list);
434 else if (mem_pool_free_count)
435 object = &mem_pool[--mem_pool_free_count];
436 else
437 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
438 write_unlock_irqrestore(&kmemleak_lock, flags);
439
440 return object;
441}
442
443/*
444 * Return the object to either the slab allocator or the memory pool.
445 */
446static void mem_pool_free(struct kmemleak_object *object)
447{
448 unsigned long flags;
449
450 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
451 kmem_cache_free(object_cache, object);
452 return;
453 }
454
455 /* add the object to the memory pool free list */
456 write_lock_irqsave(&kmemleak_lock, flags);
457 list_add(&object->object_list, &mem_pool_free_list);
458 write_unlock_irqrestore(&kmemleak_lock, flags);
459}
460
461/*
462 * RCU callback to free a kmemleak_object.
463 */
464static void free_object_rcu(struct rcu_head *rcu)
465{
466 struct hlist_node *tmp;
467 struct kmemleak_scan_area *area;
468 struct kmemleak_object *object =
469 container_of(rcu, struct kmemleak_object, rcu);
470
471 /*
472 * Once use_count is 0 (guaranteed by put_object), there is no other
473 * code accessing this object, hence no need for locking.
474 */
475 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
476 hlist_del(&area->node);
477 kmem_cache_free(scan_area_cache, area);
478 }
479 mem_pool_free(object);
480}
481
482/*
483 * Decrement the object use_count. Once the count is 0, free the object using
484 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
485 * delete_object() path, the delayed RCU freeing ensures that there is no
486 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
487 * is also possible.
488 */
489static void put_object(struct kmemleak_object *object)
490{
491 if (!atomic_dec_and_test(&object->use_count))
492 return;
493
494 /* should only get here after delete_object was called */
495 WARN_ON(object->flags & OBJECT_ALLOCATED);
496
497 /*
498 * It may be too early for the RCU callbacks, however, there is no
499 * concurrent object_list traversal when !object_cache and all objects
500 * came from the memory pool. Free the object directly.
501 */
502 if (object_cache)
503 call_rcu(&object->rcu, free_object_rcu);
504 else
505 free_object_rcu(&object->rcu);
506}
507
508/*
509 * Look up an object in the object search tree and increase its use_count.
510 */
511static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
512{
513 unsigned long flags;
514 struct kmemleak_object *object;
515
516 rcu_read_lock();
517 read_lock_irqsave(&kmemleak_lock, flags);
518 object = lookup_object(ptr, alias);
519 read_unlock_irqrestore(&kmemleak_lock, flags);
520
521 /* check whether the object is still available */
522 if (object && !get_object(object))
523 object = NULL;
524 rcu_read_unlock();
525
526 return object;
527}
528
529/*
530 * Look up an object in the object search tree and remove it from both
531 * object_tree_root and object_list. The returned object's use_count should be
532 * at least 1, as initially set by create_object().
533 */
534static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
535{
536 unsigned long flags;
537 struct kmemleak_object *object;
538
539 write_lock_irqsave(&kmemleak_lock, flags);
540 object = lookup_object(ptr, alias);
541 if (object) {
542 rb_erase(&object->rb_node, &object_tree_root);
543 list_del_rcu(&object->object_list);
544 }
545 write_unlock_irqrestore(&kmemleak_lock, flags);
546
547 return object;
548}
549
550/*
551 * Save stack trace to the given array of MAX_TRACE size.
552 */
553static int __save_stack_trace(unsigned long *trace)
554{
555 return stack_trace_save(trace, MAX_TRACE, 2);
556}
557
558/*
559 * Create the metadata (struct kmemleak_object) corresponding to an allocated
560 * memory block and add it to the object_list and object_tree_root.
561 */
562static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
563 int min_count, gfp_t gfp)
564{
565 unsigned long flags;
566 struct kmemleak_object *object, *parent;
567 struct rb_node **link, *rb_parent;
568 unsigned long untagged_ptr;
569
570 object = mem_pool_alloc(gfp);
571 if (!object) {
572 pr_warn("Cannot allocate a kmemleak_object structure\n");
573 kmemleak_disable();
574 return NULL;
575 }
576
577 INIT_LIST_HEAD(&object->object_list);
578 INIT_LIST_HEAD(&object->gray_list);
579 INIT_HLIST_HEAD(&object->area_list);
580 spin_lock_init(&object->lock);
581 atomic_set(&object->use_count, 1);
582 object->flags = OBJECT_ALLOCATED;
583 object->pointer = ptr;
584 object->size = size;
585 object->excess_ref = 0;
586 object->min_count = min_count;
587 object->count = 0; /* white color initially */
588 object->jiffies = jiffies;
589 object->checksum = 0;
590
591 /* task information */
592 if (in_irq()) {
593 object->pid = 0;
594 strncpy(object->comm, "hardirq", sizeof(object->comm));
595 } else if (in_serving_softirq()) {
596 object->pid = 0;
597 strncpy(object->comm, "softirq", sizeof(object->comm));
598 } else {
599 object->pid = current->pid;
600 /*
601 * There is a small chance of a race with set_task_comm(),
602 * however using get_task_comm() here may cause locking
603 * dependency issues with current->alloc_lock. In the worst
604 * case, the command line is not correct.
605 */
606 strncpy(object->comm, current->comm, sizeof(object->comm));
607 }
608
609 /* kernel backtrace */
610 object->trace_len = __save_stack_trace(object->trace);
611
612 write_lock_irqsave(&kmemleak_lock, flags);
613
614 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
615 min_addr = min(min_addr, untagged_ptr);
616 max_addr = max(max_addr, untagged_ptr + size);
617 link = &object_tree_root.rb_node;
618 rb_parent = NULL;
619 while (*link) {
620 rb_parent = *link;
621 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
622 if (ptr + size <= parent->pointer)
623 link = &parent->rb_node.rb_left;
624 else if (parent->pointer + parent->size <= ptr)
625 link = &parent->rb_node.rb_right;
626 else {
627 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
628 ptr);
629 /*
630 * No need for parent->lock here since "parent" cannot
631 * be freed while the kmemleak_lock is held.
632 */
633 dump_object_info(parent);
634 kmem_cache_free(object_cache, object);
635 object = NULL;
636 goto out;
637 }
638 }
639 rb_link_node(&object->rb_node, rb_parent, link);
640 rb_insert_color(&object->rb_node, &object_tree_root);
641
642 list_add_tail_rcu(&object->object_list, &object_list);
643out:
644 write_unlock_irqrestore(&kmemleak_lock, flags);
645 return object;
646}
647
648/*
649 * Mark the object as not allocated and schedule RCU freeing via put_object().
650 */
651static void __delete_object(struct kmemleak_object *object)
652{
653 unsigned long flags;
654
655 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
656 WARN_ON(atomic_read(&object->use_count) < 1);
657
658 /*
659 * Locking here also ensures that the corresponding memory block
660 * cannot be freed when it is being scanned.
661 */
662 spin_lock_irqsave(&object->lock, flags);
663 object->flags &= ~OBJECT_ALLOCATED;
664 spin_unlock_irqrestore(&object->lock, flags);
665 put_object(object);
666}
667
668/*
669 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
670 * delete it.
671 */
672static void delete_object_full(unsigned long ptr)
673{
674 struct kmemleak_object *object;
675
676 object = find_and_remove_object(ptr, 0);
677 if (!object) {
678#ifdef DEBUG
679 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
680 ptr);
681#endif
682 return;
683 }
684 __delete_object(object);
685}
686
687/*
688 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
689 * delete it. If the memory block is partially freed, the function may create
690 * additional metadata for the remaining parts of the block.
691 */
692static void delete_object_part(unsigned long ptr, size_t size)
693{
694 struct kmemleak_object *object;
695 unsigned long start, end;
696
697 object = find_and_remove_object(ptr, 1);
698 if (!object) {
699#ifdef DEBUG
700 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
701 ptr, size);
702#endif
703 return;
704 }
705
706 /*
707 * Create one or two objects that may result from the memory block
708 * split. Note that partial freeing is only done by free_bootmem() and
709 * this happens before kmemleak_init() is called.
710 */
711 start = object->pointer;
712 end = object->pointer + object->size;
713 if (ptr > start)
714 create_object(start, ptr - start, object->min_count,
715 GFP_KERNEL);
716 if (ptr + size < end)
717 create_object(ptr + size, end - ptr - size, object->min_count,
718 GFP_KERNEL);
719
720 __delete_object(object);
721}
722
723static void __paint_it(struct kmemleak_object *object, int color)
724{
725 object->min_count = color;
726 if (color == KMEMLEAK_BLACK)
727 object->flags |= OBJECT_NO_SCAN;
728}
729
730static void paint_it(struct kmemleak_object *object, int color)
731{
732 unsigned long flags;
733
734 spin_lock_irqsave(&object->lock, flags);
735 __paint_it(object, color);
736 spin_unlock_irqrestore(&object->lock, flags);
737}
738
739static void paint_ptr(unsigned long ptr, int color)
740{
741 struct kmemleak_object *object;
742
743 object = find_and_get_object(ptr, 0);
744 if (!object) {
745 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
746 ptr,
747 (color == KMEMLEAK_GREY) ? "Grey" :
748 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
749 return;
750 }
751 paint_it(object, color);
752 put_object(object);
753}
754
755/*
756 * Mark an object permanently as gray-colored so that it can no longer be
757 * reported as a leak. This is used in general to mark a false positive.
758 */
759static void make_gray_object(unsigned long ptr)
760{
761 paint_ptr(ptr, KMEMLEAK_GREY);
762}
763
764/*
765 * Mark the object as black-colored so that it is ignored from scans and
766 * reporting.
767 */
768static void make_black_object(unsigned long ptr)
769{
770 paint_ptr(ptr, KMEMLEAK_BLACK);
771}
772
773/*
774 * Add a scanning area to the object. If at least one such area is added,
775 * kmemleak will only scan these ranges rather than the whole memory block.
776 */
777static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
778{
779 unsigned long flags;
780 struct kmemleak_object *object;
781 struct kmemleak_scan_area *area = NULL;
782
783 object = find_and_get_object(ptr, 1);
784 if (!object) {
785 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
786 ptr);
787 return;
788 }
789
790 if (scan_area_cache)
791 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
792
793 spin_lock_irqsave(&object->lock, flags);
794 if (!area) {
795 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
796 /* mark the object for full scan to avoid false positives */
797 object->flags |= OBJECT_FULL_SCAN;
798 goto out_unlock;
799 }
800 if (size == SIZE_MAX) {
801 size = object->pointer + object->size - ptr;
802 } else if (ptr + size > object->pointer + object->size) {
803 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
804 dump_object_info(object);
805 kmem_cache_free(scan_area_cache, area);
806 goto out_unlock;
807 }
808
809 INIT_HLIST_NODE(&area->node);
810 area->start = ptr;
811 area->size = size;
812
813 hlist_add_head(&area->node, &object->area_list);
814out_unlock:
815 spin_unlock_irqrestore(&object->lock, flags);
816 put_object(object);
817}
818
819/*
820 * Any surplus references (object already gray) to 'ptr' are passed to
821 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
822 * vm_struct may be used as an alternative reference to the vmalloc'ed object
823 * (see free_thread_stack()).
824 */
825static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
826{
827 unsigned long flags;
828 struct kmemleak_object *object;
829
830 object = find_and_get_object(ptr, 0);
831 if (!object) {
832 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
833 ptr);
834 return;
835 }
836
837 spin_lock_irqsave(&object->lock, flags);
838 object->excess_ref = excess_ref;
839 spin_unlock_irqrestore(&object->lock, flags);
840 put_object(object);
841}
842
843/*
844 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
845 * pointer. Such object will not be scanned by kmemleak but references to it
846 * are searched.
847 */
848static void object_no_scan(unsigned long ptr)
849{
850 unsigned long flags;
851 struct kmemleak_object *object;
852
853 object = find_and_get_object(ptr, 0);
854 if (!object) {
855 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
856 return;
857 }
858
859 spin_lock_irqsave(&object->lock, flags);
860 object->flags |= OBJECT_NO_SCAN;
861 spin_unlock_irqrestore(&object->lock, flags);
862 put_object(object);
863}
864
865/**
866 * kmemleak_alloc - register a newly allocated object
867 * @ptr: pointer to beginning of the object
868 * @size: size of the object
869 * @min_count: minimum number of references to this object. If during memory
870 * scanning a number of references less than @min_count is found,
871 * the object is reported as a memory leak. If @min_count is 0,
872 * the object is never reported as a leak. If @min_count is -1,
873 * the object is ignored (not scanned and not reported as a leak)
874 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
875 *
876 * This function is called from the kernel allocators when a new object
877 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
878 */
879void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
880 gfp_t gfp)
881{
882 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
883
884 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
885 create_object((unsigned long)ptr, size, min_count, gfp);
886}
887EXPORT_SYMBOL_GPL(kmemleak_alloc);
888
889/**
890 * kmemleak_alloc_percpu - register a newly allocated __percpu object
891 * @ptr: __percpu pointer to beginning of the object
892 * @size: size of the object
893 * @gfp: flags used for kmemleak internal memory allocations
894 *
895 * This function is called from the kernel percpu allocator when a new object
896 * (memory block) is allocated (alloc_percpu).
897 */
898void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
899 gfp_t gfp)
900{
901 unsigned int cpu;
902
903 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
904
905 /*
906 * Percpu allocations are only scanned and not reported as leaks
907 * (min_count is set to 0).
908 */
909 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
910 for_each_possible_cpu(cpu)
911 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
912 size, 0, gfp);
913}
914EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
915
916/**
917 * kmemleak_vmalloc - register a newly vmalloc'ed object
918 * @area: pointer to vm_struct
919 * @size: size of the object
920 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
921 *
922 * This function is called from the vmalloc() kernel allocator when a new
923 * object (memory block) is allocated.
924 */
925void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
926{
927 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
928
929 /*
930 * A min_count = 2 is needed because vm_struct contains a reference to
931 * the virtual address of the vmalloc'ed block.
932 */
933 if (kmemleak_enabled) {
934 create_object((unsigned long)area->addr, size, 2, gfp);
935 object_set_excess_ref((unsigned long)area,
936 (unsigned long)area->addr);
937 }
938}
939EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
940
941/**
942 * kmemleak_free - unregister a previously registered object
943 * @ptr: pointer to beginning of the object
944 *
945 * This function is called from the kernel allocators when an object (memory
946 * block) is freed (kmem_cache_free, kfree, vfree etc.).
947 */
948void __ref kmemleak_free(const void *ptr)
949{
950 pr_debug("%s(0x%p)\n", __func__, ptr);
951
952 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
953 delete_object_full((unsigned long)ptr);
954}
955EXPORT_SYMBOL_GPL(kmemleak_free);
956
957/**
958 * kmemleak_free_part - partially unregister a previously registered object
959 * @ptr: pointer to the beginning or inside the object. This also
960 * represents the start of the range to be freed
961 * @size: size to be unregistered
962 *
963 * This function is called when only a part of a memory block is freed
964 * (usually from the bootmem allocator).
965 */
966void __ref kmemleak_free_part(const void *ptr, size_t size)
967{
968 pr_debug("%s(0x%p)\n", __func__, ptr);
969
970 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
971 delete_object_part((unsigned long)ptr, size);
972}
973EXPORT_SYMBOL_GPL(kmemleak_free_part);
974
975/**
976 * kmemleak_free_percpu - unregister a previously registered __percpu object
977 * @ptr: __percpu pointer to beginning of the object
978 *
979 * This function is called from the kernel percpu allocator when an object
980 * (memory block) is freed (free_percpu).
981 */
982void __ref kmemleak_free_percpu(const void __percpu *ptr)
983{
984 unsigned int cpu;
985
986 pr_debug("%s(0x%p)\n", __func__, ptr);
987
988 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
989 for_each_possible_cpu(cpu)
990 delete_object_full((unsigned long)per_cpu_ptr(ptr,
991 cpu));
992}
993EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
994
995/**
996 * kmemleak_update_trace - update object allocation stack trace
997 * @ptr: pointer to beginning of the object
998 *
999 * Override the object allocation stack trace for cases where the actual
1000 * allocation place is not always useful.
1001 */
1002void __ref kmemleak_update_trace(const void *ptr)
1003{
1004 struct kmemleak_object *object;
1005 unsigned long flags;
1006
1007 pr_debug("%s(0x%p)\n", __func__, ptr);
1008
1009 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1010 return;
1011
1012 object = find_and_get_object((unsigned long)ptr, 1);
1013 if (!object) {
1014#ifdef DEBUG
1015 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1016 ptr);
1017#endif
1018 return;
1019 }
1020
1021 spin_lock_irqsave(&object->lock, flags);
1022 object->trace_len = __save_stack_trace(object->trace);
1023 spin_unlock_irqrestore(&object->lock, flags);
1024
1025 put_object(object);
1026}
1027EXPORT_SYMBOL(kmemleak_update_trace);
1028
1029/**
1030 * kmemleak_not_leak - mark an allocated object as false positive
1031 * @ptr: pointer to beginning of the object
1032 *
1033 * Calling this function on an object will cause the memory block to no longer
1034 * be reported as leak and always be scanned.
1035 */
1036void __ref kmemleak_not_leak(const void *ptr)
1037{
1038 pr_debug("%s(0x%p)\n", __func__, ptr);
1039
1040 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1041 make_gray_object((unsigned long)ptr);
1042}
1043EXPORT_SYMBOL(kmemleak_not_leak);
1044
1045/**
1046 * kmemleak_ignore - ignore an allocated object
1047 * @ptr: pointer to beginning of the object
1048 *
1049 * Calling this function on an object will cause the memory block to be
1050 * ignored (not scanned and not reported as a leak). This is usually done when
1051 * it is known that the corresponding block is not a leak and does not contain
1052 * any references to other allocated memory blocks.
1053 */
1054void __ref kmemleak_ignore(const void *ptr)
1055{
1056 pr_debug("%s(0x%p)\n", __func__, ptr);
1057
1058 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1059 make_black_object((unsigned long)ptr);
1060}
1061EXPORT_SYMBOL(kmemleak_ignore);
1062
1063/**
1064 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1065 * @ptr: pointer to beginning or inside the object. This also
1066 * represents the start of the scan area
1067 * @size: size of the scan area
1068 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1069 *
1070 * This function is used when it is known that only certain parts of an object
1071 * contain references to other objects. Kmemleak will only scan these areas
1072 * reducing the number false negatives.
1073 */
1074void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1075{
1076 pr_debug("%s(0x%p)\n", __func__, ptr);
1077
1078 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1079 add_scan_area((unsigned long)ptr, size, gfp);
1080}
1081EXPORT_SYMBOL(kmemleak_scan_area);
1082
1083/**
1084 * kmemleak_no_scan - do not scan an allocated object
1085 * @ptr: pointer to beginning of the object
1086 *
1087 * This function notifies kmemleak not to scan the given memory block. Useful
1088 * in situations where it is known that the given object does not contain any
1089 * references to other objects. Kmemleak will not scan such objects reducing
1090 * the number of false negatives.
1091 */
1092void __ref kmemleak_no_scan(const void *ptr)
1093{
1094 pr_debug("%s(0x%p)\n", __func__, ptr);
1095
1096 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1097 object_no_scan((unsigned long)ptr);
1098}
1099EXPORT_SYMBOL(kmemleak_no_scan);
1100
1101/**
1102 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1103 * address argument
1104 * @phys: physical address of the object
1105 * @size: size of the object
1106 * @min_count: minimum number of references to this object.
1107 * See kmemleak_alloc()
1108 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1109 */
1110void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1111 gfp_t gfp)
1112{
1113 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1114 kmemleak_alloc(__va(phys), size, min_count, gfp);
1115}
1116EXPORT_SYMBOL(kmemleak_alloc_phys);
1117
1118/**
1119 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1120 * physical address argument
1121 * @phys: physical address if the beginning or inside an object. This
1122 * also represents the start of the range to be freed
1123 * @size: size to be unregistered
1124 */
1125void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1126{
1127 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1128 kmemleak_free_part(__va(phys), size);
1129}
1130EXPORT_SYMBOL(kmemleak_free_part_phys);
1131
1132/**
1133 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1134 * address argument
1135 * @phys: physical address of the object
1136 */
1137void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1138{
1139 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1140 kmemleak_not_leak(__va(phys));
1141}
1142EXPORT_SYMBOL(kmemleak_not_leak_phys);
1143
1144/**
1145 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1146 * address argument
1147 * @phys: physical address of the object
1148 */
1149void __ref kmemleak_ignore_phys(phys_addr_t phys)
1150{
1151 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1152 kmemleak_ignore(__va(phys));
1153}
1154EXPORT_SYMBOL(kmemleak_ignore_phys);
1155
1156/*
1157 * Update an object's checksum and return true if it was modified.
1158 */
1159static bool update_checksum(struct kmemleak_object *object)
1160{
1161 u32 old_csum = object->checksum;
1162
1163 kasan_disable_current();
1164 object->checksum = crc32(0, (void *)object->pointer, object->size);
1165 kasan_enable_current();
1166
1167 return object->checksum != old_csum;
1168}
1169
1170/*
1171 * Update an object's references. object->lock must be held by the caller.
1172 */
1173static void update_refs(struct kmemleak_object *object)
1174{
1175 if (!color_white(object)) {
1176 /* non-orphan, ignored or new */
1177 return;
1178 }
1179
1180 /*
1181 * Increase the object's reference count (number of pointers to the
1182 * memory block). If this count reaches the required minimum, the
1183 * object's color will become gray and it will be added to the
1184 * gray_list.
1185 */
1186 object->count++;
1187 if (color_gray(object)) {
1188 /* put_object() called when removing from gray_list */
1189 WARN_ON(!get_object(object));
1190 list_add_tail(&object->gray_list, &gray_list);
1191 }
1192}
1193
1194/*
1195 * Memory scanning is a long process and it needs to be interruptable. This
1196 * function checks whether such interrupt condition occurred.
1197 */
1198static int scan_should_stop(void)
1199{
1200 if (!kmemleak_enabled)
1201 return 1;
1202
1203 /*
1204 * This function may be called from either process or kthread context,
1205 * hence the need to check for both stop conditions.
1206 */
1207 if (current->mm)
1208 return signal_pending(current);
1209 else
1210 return kthread_should_stop();
1211
1212 return 0;
1213}
1214
1215/*
1216 * Scan a memory block (exclusive range) for valid pointers and add those
1217 * found to the gray list.
1218 */
1219static void scan_block(void *_start, void *_end,
1220 struct kmemleak_object *scanned)
1221{
1222 unsigned long *ptr;
1223 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1224 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1225 unsigned long flags;
1226 unsigned long untagged_ptr;
1227
1228 read_lock_irqsave(&kmemleak_lock, flags);
1229 for (ptr = start; ptr < end; ptr++) {
1230 struct kmemleak_object *object;
1231 unsigned long pointer;
1232 unsigned long excess_ref;
1233
1234 if (scan_should_stop())
1235 break;
1236
1237 kasan_disable_current();
1238 pointer = *ptr;
1239 kasan_enable_current();
1240
1241 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1242 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1243 continue;
1244
1245 /*
1246 * No need for get_object() here since we hold kmemleak_lock.
1247 * object->use_count cannot be dropped to 0 while the object
1248 * is still present in object_tree_root and object_list
1249 * (with updates protected by kmemleak_lock).
1250 */
1251 object = lookup_object(pointer, 1);
1252 if (!object)
1253 continue;
1254 if (object == scanned)
1255 /* self referenced, ignore */
1256 continue;
1257
1258 /*
1259 * Avoid the lockdep recursive warning on object->lock being
1260 * previously acquired in scan_object(). These locks are
1261 * enclosed by scan_mutex.
1262 */
1263 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1264 /* only pass surplus references (object already gray) */
1265 if (color_gray(object)) {
1266 excess_ref = object->excess_ref;
1267 /* no need for update_refs() if object already gray */
1268 } else {
1269 excess_ref = 0;
1270 update_refs(object);
1271 }
1272 spin_unlock(&object->lock);
1273
1274 if (excess_ref) {
1275 object = lookup_object(excess_ref, 0);
1276 if (!object)
1277 continue;
1278 if (object == scanned)
1279 /* circular reference, ignore */
1280 continue;
1281 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1282 update_refs(object);
1283 spin_unlock(&object->lock);
1284 }
1285 }
1286 read_unlock_irqrestore(&kmemleak_lock, flags);
1287}
1288
1289/*
1290 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1291 */
1292#ifdef CONFIG_SMP
1293static void scan_large_block(void *start, void *end)
1294{
1295 void *next;
1296
1297 while (start < end) {
1298 next = min(start + MAX_SCAN_SIZE, end);
1299 scan_block(start, next, NULL);
1300 start = next;
1301 cond_resched();
1302 }
1303}
1304#endif
1305
1306/*
1307 * Scan a memory block corresponding to a kmemleak_object. A condition is
1308 * that object->use_count >= 1.
1309 */
1310static void scan_object(struct kmemleak_object *object)
1311{
1312 struct kmemleak_scan_area *area;
1313 unsigned long flags;
1314
1315 /*
1316 * Once the object->lock is acquired, the corresponding memory block
1317 * cannot be freed (the same lock is acquired in delete_object).
1318 */
1319 spin_lock_irqsave(&object->lock, flags);
1320 if (object->flags & OBJECT_NO_SCAN)
1321 goto out;
1322 if (!(object->flags & OBJECT_ALLOCATED))
1323 /* already freed object */
1324 goto out;
1325 if (hlist_empty(&object->area_list) ||
1326 object->flags & OBJECT_FULL_SCAN) {
1327 void *start = (void *)object->pointer;
1328 void *end = (void *)(object->pointer + object->size);
1329 void *next;
1330
1331 do {
1332 next = min(start + MAX_SCAN_SIZE, end);
1333 scan_block(start, next, object);
1334
1335 start = next;
1336 if (start >= end)
1337 break;
1338
1339 spin_unlock_irqrestore(&object->lock, flags);
1340 cond_resched();
1341 spin_lock_irqsave(&object->lock, flags);
1342 } while (object->flags & OBJECT_ALLOCATED);
1343 } else
1344 hlist_for_each_entry(area, &object->area_list, node)
1345 scan_block((void *)area->start,
1346 (void *)(area->start + area->size),
1347 object);
1348out:
1349 spin_unlock_irqrestore(&object->lock, flags);
1350}
1351
1352/*
1353 * Scan the objects already referenced (gray objects). More objects will be
1354 * referenced and, if there are no memory leaks, all the objects are scanned.
1355 */
1356static void scan_gray_list(void)
1357{
1358 struct kmemleak_object *object, *tmp;
1359
1360 /*
1361 * The list traversal is safe for both tail additions and removals
1362 * from inside the loop. The kmemleak objects cannot be freed from
1363 * outside the loop because their use_count was incremented.
1364 */
1365 object = list_entry(gray_list.next, typeof(*object), gray_list);
1366 while (&object->gray_list != &gray_list) {
1367 cond_resched();
1368
1369 /* may add new objects to the list */
1370 if (!scan_should_stop())
1371 scan_object(object);
1372
1373 tmp = list_entry(object->gray_list.next, typeof(*object),
1374 gray_list);
1375
1376 /* remove the object from the list and release it */
1377 list_del(&object->gray_list);
1378 put_object(object);
1379
1380 object = tmp;
1381 }
1382 WARN_ON(!list_empty(&gray_list));
1383}
1384
1385/*
1386 * Scan data sections and all the referenced memory blocks allocated via the
1387 * kernel's standard allocators. This function must be called with the
1388 * scan_mutex held.
1389 */
1390static void kmemleak_scan(void)
1391{
1392 unsigned long flags;
1393 struct kmemleak_object *object;
1394 int i;
1395 int new_leaks = 0;
1396
1397 jiffies_last_scan = jiffies;
1398
1399 /* prepare the kmemleak_object's */
1400 rcu_read_lock();
1401 list_for_each_entry_rcu(object, &object_list, object_list) {
1402 spin_lock_irqsave(&object->lock, flags);
1403#ifdef DEBUG
1404 /*
1405 * With a few exceptions there should be a maximum of
1406 * 1 reference to any object at this point.
1407 */
1408 if (atomic_read(&object->use_count) > 1) {
1409 pr_debug("object->use_count = %d\n",
1410 atomic_read(&object->use_count));
1411 dump_object_info(object);
1412 }
1413#endif
1414 /* reset the reference count (whiten the object) */
1415 object->count = 0;
1416 if (color_gray(object) && get_object(object))
1417 list_add_tail(&object->gray_list, &gray_list);
1418
1419 spin_unlock_irqrestore(&object->lock, flags);
1420 }
1421 rcu_read_unlock();
1422
1423#ifdef CONFIG_SMP
1424 /* per-cpu sections scanning */
1425 for_each_possible_cpu(i)
1426 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1427 __per_cpu_end + per_cpu_offset(i));
1428#endif
1429
1430 /*
1431 * Struct page scanning for each node.
1432 */
1433 get_online_mems();
1434 for_each_online_node(i) {
1435 unsigned long start_pfn = node_start_pfn(i);
1436 unsigned long end_pfn = node_end_pfn(i);
1437 unsigned long pfn;
1438
1439 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1440 struct page *page = pfn_to_online_page(pfn);
1441
1442 if (!page)
1443 continue;
1444
1445 /* only scan pages belonging to this node */
1446 if (page_to_nid(page) != i)
1447 continue;
1448 /* only scan if page is in use */
1449 if (page_count(page) == 0)
1450 continue;
1451 scan_block(page, page + 1, NULL);
1452 if (!(pfn & 63))
1453 cond_resched();
1454 }
1455 }
1456 put_online_mems();
1457
1458 /*
1459 * Scanning the task stacks (may introduce false negatives).
1460 */
1461 if (kmemleak_stack_scan) {
1462 struct task_struct *p, *g;
1463
1464 read_lock(&tasklist_lock);
1465 do_each_thread(g, p) {
1466 void *stack = try_get_task_stack(p);
1467 if (stack) {
1468 scan_block(stack, stack + THREAD_SIZE, NULL);
1469 put_task_stack(p);
1470 }
1471 } while_each_thread(g, p);
1472 read_unlock(&tasklist_lock);
1473 }
1474
1475 /*
1476 * Scan the objects already referenced from the sections scanned
1477 * above.
1478 */
1479 scan_gray_list();
1480
1481 /*
1482 * Check for new or unreferenced objects modified since the previous
1483 * scan and color them gray until the next scan.
1484 */
1485 rcu_read_lock();
1486 list_for_each_entry_rcu(object, &object_list, object_list) {
1487 spin_lock_irqsave(&object->lock, flags);
1488 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1489 && update_checksum(object) && get_object(object)) {
1490 /* color it gray temporarily */
1491 object->count = object->min_count;
1492 list_add_tail(&object->gray_list, &gray_list);
1493 }
1494 spin_unlock_irqrestore(&object->lock, flags);
1495 }
1496 rcu_read_unlock();
1497
1498 /*
1499 * Re-scan the gray list for modified unreferenced objects.
1500 */
1501 scan_gray_list();
1502
1503 /*
1504 * If scanning was stopped do not report any new unreferenced objects.
1505 */
1506 if (scan_should_stop())
1507 return;
1508
1509 /*
1510 * Scanning result reporting.
1511 */
1512 rcu_read_lock();
1513 list_for_each_entry_rcu(object, &object_list, object_list) {
1514 spin_lock_irqsave(&object->lock, flags);
1515 if (unreferenced_object(object) &&
1516 !(object->flags & OBJECT_REPORTED)) {
1517 object->flags |= OBJECT_REPORTED;
1518
1519 if (kmemleak_verbose)
1520 print_unreferenced(NULL, object);
1521
1522 new_leaks++;
1523 }
1524 spin_unlock_irqrestore(&object->lock, flags);
1525 }
1526 rcu_read_unlock();
1527
1528 if (new_leaks) {
1529 kmemleak_found_leaks = true;
1530
1531 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1532 new_leaks);
1533 }
1534
1535}
1536
1537/*
1538 * Thread function performing automatic memory scanning. Unreferenced objects
1539 * at the end of a memory scan are reported but only the first time.
1540 */
1541static int kmemleak_scan_thread(void *arg)
1542{
1543 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1544
1545 pr_info("Automatic memory scanning thread started\n");
1546 set_user_nice(current, 10);
1547
1548 /*
1549 * Wait before the first scan to allow the system to fully initialize.
1550 */
1551 if (first_run) {
1552 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1553 first_run = 0;
1554 while (timeout && !kthread_should_stop())
1555 timeout = schedule_timeout_interruptible(timeout);
1556 }
1557
1558 while (!kthread_should_stop()) {
1559 signed long timeout = jiffies_scan_wait;
1560
1561 mutex_lock(&scan_mutex);
1562 kmemleak_scan();
1563 mutex_unlock(&scan_mutex);
1564
1565 /* wait before the next scan */
1566 while (timeout && !kthread_should_stop())
1567 timeout = schedule_timeout_interruptible(timeout);
1568 }
1569
1570 pr_info("Automatic memory scanning thread ended\n");
1571
1572 return 0;
1573}
1574
1575/*
1576 * Start the automatic memory scanning thread. This function must be called
1577 * with the scan_mutex held.
1578 */
1579static void start_scan_thread(void)
1580{
1581 if (scan_thread)
1582 return;
1583 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1584 if (IS_ERR(scan_thread)) {
1585 pr_warn("Failed to create the scan thread\n");
1586 scan_thread = NULL;
1587 }
1588}
1589
1590/*
1591 * Stop the automatic memory scanning thread.
1592 */
1593static void stop_scan_thread(void)
1594{
1595 if (scan_thread) {
1596 kthread_stop(scan_thread);
1597 scan_thread = NULL;
1598 }
1599}
1600
1601/*
1602 * Iterate over the object_list and return the first valid object at or after
1603 * the required position with its use_count incremented. The function triggers
1604 * a memory scanning when the pos argument points to the first position.
1605 */
1606static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1607{
1608 struct kmemleak_object *object;
1609 loff_t n = *pos;
1610 int err;
1611
1612 err = mutex_lock_interruptible(&scan_mutex);
1613 if (err < 0)
1614 return ERR_PTR(err);
1615
1616 rcu_read_lock();
1617 list_for_each_entry_rcu(object, &object_list, object_list) {
1618 if (n-- > 0)
1619 continue;
1620 if (get_object(object))
1621 goto out;
1622 }
1623 object = NULL;
1624out:
1625 return object;
1626}
1627
1628/*
1629 * Return the next object in the object_list. The function decrements the
1630 * use_count of the previous object and increases that of the next one.
1631 */
1632static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1633{
1634 struct kmemleak_object *prev_obj = v;
1635 struct kmemleak_object *next_obj = NULL;
1636 struct kmemleak_object *obj = prev_obj;
1637
1638 ++(*pos);
1639
1640 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1641 if (get_object(obj)) {
1642 next_obj = obj;
1643 break;
1644 }
1645 }
1646
1647 put_object(prev_obj);
1648 return next_obj;
1649}
1650
1651/*
1652 * Decrement the use_count of the last object required, if any.
1653 */
1654static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1655{
1656 if (!IS_ERR(v)) {
1657 /*
1658 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1659 * waiting was interrupted, so only release it if !IS_ERR.
1660 */
1661 rcu_read_unlock();
1662 mutex_unlock(&scan_mutex);
1663 if (v)
1664 put_object(v);
1665 }
1666}
1667
1668/*
1669 * Print the information for an unreferenced object to the seq file.
1670 */
1671static int kmemleak_seq_show(struct seq_file *seq, void *v)
1672{
1673 struct kmemleak_object *object = v;
1674 unsigned long flags;
1675
1676 spin_lock_irqsave(&object->lock, flags);
1677 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1678 print_unreferenced(seq, object);
1679 spin_unlock_irqrestore(&object->lock, flags);
1680 return 0;
1681}
1682
1683static const struct seq_operations kmemleak_seq_ops = {
1684 .start = kmemleak_seq_start,
1685 .next = kmemleak_seq_next,
1686 .stop = kmemleak_seq_stop,
1687 .show = kmemleak_seq_show,
1688};
1689
1690static int kmemleak_open(struct inode *inode, struct file *file)
1691{
1692 return seq_open(file, &kmemleak_seq_ops);
1693}
1694
1695static int dump_str_object_info(const char *str)
1696{
1697 unsigned long flags;
1698 struct kmemleak_object *object;
1699 unsigned long addr;
1700
1701 if (kstrtoul(str, 0, &addr))
1702 return -EINVAL;
1703 object = find_and_get_object(addr, 0);
1704 if (!object) {
1705 pr_info("Unknown object at 0x%08lx\n", addr);
1706 return -EINVAL;
1707 }
1708
1709 spin_lock_irqsave(&object->lock, flags);
1710 dump_object_info(object);
1711 spin_unlock_irqrestore(&object->lock, flags);
1712
1713 put_object(object);
1714 return 0;
1715}
1716
1717/*
1718 * We use grey instead of black to ensure we can do future scans on the same
1719 * objects. If we did not do future scans these black objects could
1720 * potentially contain references to newly allocated objects in the future and
1721 * we'd end up with false positives.
1722 */
1723static void kmemleak_clear(void)
1724{
1725 struct kmemleak_object *object;
1726 unsigned long flags;
1727
1728 rcu_read_lock();
1729 list_for_each_entry_rcu(object, &object_list, object_list) {
1730 spin_lock_irqsave(&object->lock, flags);
1731 if ((object->flags & OBJECT_REPORTED) &&
1732 unreferenced_object(object))
1733 __paint_it(object, KMEMLEAK_GREY);
1734 spin_unlock_irqrestore(&object->lock, flags);
1735 }
1736 rcu_read_unlock();
1737
1738 kmemleak_found_leaks = false;
1739}
1740
1741static void __kmemleak_do_cleanup(void);
1742
1743/*
1744 * File write operation to configure kmemleak at run-time. The following
1745 * commands can be written to the /sys/kernel/debug/kmemleak file:
1746 * off - disable kmemleak (irreversible)
1747 * stack=on - enable the task stacks scanning
1748 * stack=off - disable the tasks stacks scanning
1749 * scan=on - start the automatic memory scanning thread
1750 * scan=off - stop the automatic memory scanning thread
1751 * scan=... - set the automatic memory scanning period in seconds (0 to
1752 * disable it)
1753 * scan - trigger a memory scan
1754 * clear - mark all current reported unreferenced kmemleak objects as
1755 * grey to ignore printing them, or free all kmemleak objects
1756 * if kmemleak has been disabled.
1757 * dump=... - dump information about the object found at the given address
1758 */
1759static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1760 size_t size, loff_t *ppos)
1761{
1762 char buf[64];
1763 int buf_size;
1764 int ret;
1765
1766 buf_size = min(size, (sizeof(buf) - 1));
1767 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1768 return -EFAULT;
1769 buf[buf_size] = 0;
1770
1771 ret = mutex_lock_interruptible(&scan_mutex);
1772 if (ret < 0)
1773 return ret;
1774
1775 if (strncmp(buf, "clear", 5) == 0) {
1776 if (kmemleak_enabled)
1777 kmemleak_clear();
1778 else
1779 __kmemleak_do_cleanup();
1780 goto out;
1781 }
1782
1783 if (!kmemleak_enabled) {
1784 ret = -EPERM;
1785 goto out;
1786 }
1787
1788 if (strncmp(buf, "off", 3) == 0)
1789 kmemleak_disable();
1790 else if (strncmp(buf, "stack=on", 8) == 0)
1791 kmemleak_stack_scan = 1;
1792 else if (strncmp(buf, "stack=off", 9) == 0)
1793 kmemleak_stack_scan = 0;
1794 else if (strncmp(buf, "scan=on", 7) == 0)
1795 start_scan_thread();
1796 else if (strncmp(buf, "scan=off", 8) == 0)
1797 stop_scan_thread();
1798 else if (strncmp(buf, "scan=", 5) == 0) {
1799 unsigned long secs;
1800
1801 ret = kstrtoul(buf + 5, 0, &secs);
1802 if (ret < 0)
1803 goto out;
1804 stop_scan_thread();
1805 if (secs) {
1806 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1807 start_scan_thread();
1808 }
1809 } else if (strncmp(buf, "scan", 4) == 0)
1810 kmemleak_scan();
1811 else if (strncmp(buf, "dump=", 5) == 0)
1812 ret = dump_str_object_info(buf + 5);
1813 else
1814 ret = -EINVAL;
1815
1816out:
1817 mutex_unlock(&scan_mutex);
1818 if (ret < 0)
1819 return ret;
1820
1821 /* ignore the rest of the buffer, only one command at a time */
1822 *ppos += size;
1823 return size;
1824}
1825
1826static const struct file_operations kmemleak_fops = {
1827 .owner = THIS_MODULE,
1828 .open = kmemleak_open,
1829 .read = seq_read,
1830 .write = kmemleak_write,
1831 .llseek = seq_lseek,
1832 .release = seq_release,
1833};
1834
1835static void __kmemleak_do_cleanup(void)
1836{
1837 struct kmemleak_object *object;
1838
1839 rcu_read_lock();
1840 list_for_each_entry_rcu(object, &object_list, object_list)
1841 delete_object_full(object->pointer);
1842 rcu_read_unlock();
1843}
1844
1845/*
1846 * Stop the memory scanning thread and free the kmemleak internal objects if
1847 * no previous scan thread (otherwise, kmemleak may still have some useful
1848 * information on memory leaks).
1849 */
1850static void kmemleak_do_cleanup(struct work_struct *work)
1851{
1852 stop_scan_thread();
1853
1854 mutex_lock(&scan_mutex);
1855 /*
1856 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1857 * longer track object freeing. Ordering of the scan thread stopping and
1858 * the memory accesses below is guaranteed by the kthread_stop()
1859 * function.
1860 */
1861 kmemleak_free_enabled = 0;
1862 mutex_unlock(&scan_mutex);
1863
1864 if (!kmemleak_found_leaks)
1865 __kmemleak_do_cleanup();
1866 else
1867 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1868}
1869
1870static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1871
1872/*
1873 * Disable kmemleak. No memory allocation/freeing will be traced once this
1874 * function is called. Disabling kmemleak is an irreversible operation.
1875 */
1876static void kmemleak_disable(void)
1877{
1878 /* atomically check whether it was already invoked */
1879 if (cmpxchg(&kmemleak_error, 0, 1))
1880 return;
1881
1882 /* stop any memory operation tracing */
1883 kmemleak_enabled = 0;
1884
1885 /* check whether it is too early for a kernel thread */
1886 if (kmemleak_initialized)
1887 schedule_work(&cleanup_work);
1888 else
1889 kmemleak_free_enabled = 0;
1890
1891 pr_info("Kernel memory leak detector disabled\n");
1892}
1893
1894/*
1895 * Allow boot-time kmemleak disabling (enabled by default).
1896 */
1897static int __init kmemleak_boot_config(char *str)
1898{
1899 if (!str)
1900 return -EINVAL;
1901 if (strcmp(str, "off") == 0)
1902 kmemleak_disable();
1903 else if (strcmp(str, "on") == 0)
1904 kmemleak_skip_disable = 1;
1905 else
1906 return -EINVAL;
1907 return 0;
1908}
1909early_param("kmemleak", kmemleak_boot_config);
1910
1911/*
1912 * Kmemleak initialization.
1913 */
1914void __init kmemleak_init(void)
1915{
1916#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1917 if (!kmemleak_skip_disable) {
1918 kmemleak_disable();
1919 return;
1920 }
1921#endif
1922
1923 if (kmemleak_error)
1924 return;
1925
1926 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1927 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1928
1929 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1930 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1931
1932 /* register the data/bss sections */
1933 create_object((unsigned long)_sdata, _edata - _sdata,
1934 KMEMLEAK_GREY, GFP_ATOMIC);
1935 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1936 KMEMLEAK_GREY, GFP_ATOMIC);
1937 /* only register .data..ro_after_init if not within .data */
1938 if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
1939 create_object((unsigned long)__start_ro_after_init,
1940 __end_ro_after_init - __start_ro_after_init,
1941 KMEMLEAK_GREY, GFP_ATOMIC);
1942}
1943
1944/*
1945 * Late initialization function.
1946 */
1947static int __init kmemleak_late_init(void)
1948{
1949 kmemleak_initialized = 1;
1950
1951 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1952
1953 if (kmemleak_error) {
1954 /*
1955 * Some error occurred and kmemleak was disabled. There is a
1956 * small chance that kmemleak_disable() was called immediately
1957 * after setting kmemleak_initialized and we may end up with
1958 * two clean-up threads but serialized by scan_mutex.
1959 */
1960 schedule_work(&cleanup_work);
1961 return -ENOMEM;
1962 }
1963
1964 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1965 mutex_lock(&scan_mutex);
1966 start_scan_thread();
1967 mutex_unlock(&scan_mutex);
1968 }
1969
1970 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1971 mem_pool_free_count);
1972
1973 return 0;
1974}
1975late_initcall(kmemleak_late_init);