sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[linux-2.6-block.git] / mm / kmemleak.c
CommitLineData
3c7b4e6b
CM
1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22901c6c 22 * Documentation/dev-tools/kmemleak.rst.
3c7b4e6b
CM
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
85d3a316 32 * blocks. The object_tree_root is a red black tree used to look-up
3c7b4e6b
CM
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
4698c1f2
CM
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
3c7b4e6b 55 *
93ada579 56 * Locks and mutexes are acquired/nested in the following order:
9d5a4c73 57 *
93ada579
CM
58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59 *
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61 * regions.
9d5a4c73 62 *
3c7b4e6b
CM
63 * The kmemleak_object structures have a use_count incremented or decremented
64 * using the get_object()/put_object() functions. When the use_count becomes
65 * 0, this count can no longer be incremented and put_object() schedules the
66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67 * function must be protected by rcu_read_lock() to avoid accessing a freed
68 * structure.
69 */
70
ae281064
JP
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
3c7b4e6b
CM
73#include <linux/init.h>
74#include <linux/kernel.h>
75#include <linux/list.h>
3f07c014 76#include <linux/sched/signal.h>
29930025 77#include <linux/sched/task.h>
3c7b4e6b
CM
78#include <linux/jiffies.h>
79#include <linux/delay.h>
b95f1b31 80#include <linux/export.h>
3c7b4e6b 81#include <linux/kthread.h>
85d3a316 82#include <linux/rbtree.h>
3c7b4e6b
CM
83#include <linux/fs.h>
84#include <linux/debugfs.h>
85#include <linux/seq_file.h>
86#include <linux/cpumask.h>
87#include <linux/spinlock.h>
88#include <linux/mutex.h>
89#include <linux/rcupdate.h>
90#include <linux/stacktrace.h>
91#include <linux/cache.h>
92#include <linux/percpu.h>
93#include <linux/hardirq.h>
9099daed
CM
94#include <linux/bootmem.h>
95#include <linux/pfn.h>
3c7b4e6b
CM
96#include <linux/mmzone.h>
97#include <linux/slab.h>
98#include <linux/thread_info.h>
99#include <linux/err.h>
100#include <linux/uaccess.h>
101#include <linux/string.h>
102#include <linux/nodemask.h>
103#include <linux/mm.h>
179a8100 104#include <linux/workqueue.h>
04609ccc 105#include <linux/crc32.h>
3c7b4e6b
CM
106
107#include <asm/sections.h>
108#include <asm/processor.h>
60063497 109#include <linux/atomic.h>
3c7b4e6b 110
e79ed2f1 111#include <linux/kasan.h>
8e019366 112#include <linux/kmemcheck.h>
3c7b4e6b 113#include <linux/kmemleak.h>
029aeff5 114#include <linux/memory_hotplug.h>
3c7b4e6b
CM
115
116/*
117 * Kmemleak configuration and common defines.
118 */
119#define MAX_TRACE 16 /* stack trace length */
3c7b4e6b 120#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
3c7b4e6b
CM
121#define SECS_FIRST_SCAN 60 /* delay before the first scan */
122#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
af98603d 123#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
3c7b4e6b
CM
124
125#define BYTES_PER_POINTER sizeof(void *)
126
216c04b0 127/* GFP bitmask for kmemleak internal allocations */
20b5c303 128#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
6ae4bd1f
CM
129 __GFP_NORETRY | __GFP_NOMEMALLOC | \
130 __GFP_NOWARN)
216c04b0 131
3c7b4e6b
CM
132/* scanning area inside a memory block */
133struct kmemleak_scan_area {
134 struct hlist_node node;
c017b4be
CM
135 unsigned long start;
136 size_t size;
3c7b4e6b
CM
137};
138
a1084c87
LR
139#define KMEMLEAK_GREY 0
140#define KMEMLEAK_BLACK -1
141
3c7b4e6b
CM
142/*
143 * Structure holding the metadata for each allocated memory block.
144 * Modifications to such objects should be made while holding the
145 * object->lock. Insertions or deletions from object_list, gray_list or
85d3a316 146 * rb_node are already protected by the corresponding locks or mutex (see
3c7b4e6b
CM
147 * the notes on locking above). These objects are reference-counted
148 * (use_count) and freed using the RCU mechanism.
149 */
150struct kmemleak_object {
151 spinlock_t lock;
152 unsigned long flags; /* object status flags */
153 struct list_head object_list;
154 struct list_head gray_list;
85d3a316 155 struct rb_node rb_node;
3c7b4e6b
CM
156 struct rcu_head rcu; /* object_list lockless traversal */
157 /* object usage count; object freed when use_count == 0 */
158 atomic_t use_count;
159 unsigned long pointer;
160 size_t size;
161 /* minimum number of a pointers found before it is considered leak */
162 int min_count;
163 /* the total number of pointers found pointing to this object */
164 int count;
04609ccc
CM
165 /* checksum for detecting modified objects */
166 u32 checksum;
3c7b4e6b
CM
167 /* memory ranges to be scanned inside an object (empty for all) */
168 struct hlist_head area_list;
169 unsigned long trace[MAX_TRACE];
170 unsigned int trace_len;
171 unsigned long jiffies; /* creation timestamp */
172 pid_t pid; /* pid of the current task */
173 char comm[TASK_COMM_LEN]; /* executable name */
174};
175
176/* flag representing the memory block allocation status */
177#define OBJECT_ALLOCATED (1 << 0)
178/* flag set after the first reporting of an unreference object */
179#define OBJECT_REPORTED (1 << 1)
180/* flag set to not scan the object */
181#define OBJECT_NO_SCAN (1 << 2)
182
0494e082
SS
183/* number of bytes to print per line; must be 16 or 32 */
184#define HEX_ROW_SIZE 16
185/* number of bytes to print at a time (1, 2, 4, 8) */
186#define HEX_GROUP_SIZE 1
187/* include ASCII after the hex output */
188#define HEX_ASCII 1
189/* max number of lines to be printed */
190#define HEX_MAX_LINES 2
191
3c7b4e6b
CM
192/* the list of all allocated objects */
193static LIST_HEAD(object_list);
194/* the list of gray-colored objects (see color_gray comment below) */
195static LIST_HEAD(gray_list);
85d3a316
ML
196/* search tree for object boundaries */
197static struct rb_root object_tree_root = RB_ROOT;
198/* rw_lock protecting the access to object_list and object_tree_root */
3c7b4e6b
CM
199static DEFINE_RWLOCK(kmemleak_lock);
200
201/* allocation caches for kmemleak internal data */
202static struct kmem_cache *object_cache;
203static struct kmem_cache *scan_area_cache;
204
205/* set if tracing memory operations is enabled */
8910ae89 206static int kmemleak_enabled;
c5f3b1a5
CM
207/* same as above but only for the kmemleak_free() callback */
208static int kmemleak_free_enabled;
3c7b4e6b 209/* set in the late_initcall if there were no errors */
8910ae89 210static int kmemleak_initialized;
3c7b4e6b 211/* enables or disables early logging of the memory operations */
8910ae89 212static int kmemleak_early_log = 1;
5f79020c 213/* set if a kmemleak warning was issued */
8910ae89 214static int kmemleak_warning;
5f79020c 215/* set if a fatal kmemleak error has occurred */
8910ae89 216static int kmemleak_error;
3c7b4e6b
CM
217
218/* minimum and maximum address that may be valid pointers */
219static unsigned long min_addr = ULONG_MAX;
220static unsigned long max_addr;
221
3c7b4e6b 222static struct task_struct *scan_thread;
acf4968e 223/* used to avoid reporting of recently allocated objects */
3c7b4e6b 224static unsigned long jiffies_min_age;
acf4968e 225static unsigned long jiffies_last_scan;
3c7b4e6b
CM
226/* delay between automatic memory scannings */
227static signed long jiffies_scan_wait;
228/* enables or disables the task stacks scanning */
e0a2a160 229static int kmemleak_stack_scan = 1;
4698c1f2 230/* protects the memory scanning, parameters and debug/kmemleak file access */
3c7b4e6b 231static DEFINE_MUTEX(scan_mutex);
ab0155a2
JB
232/* setting kmemleak=on, will set this var, skipping the disable */
233static int kmemleak_skip_disable;
dc9b3f42
LZ
234/* If there are leaks that can be reported */
235static bool kmemleak_found_leaks;
3c7b4e6b 236
3c7b4e6b 237/*
2030117d 238 * Early object allocation/freeing logging. Kmemleak is initialized after the
3c7b4e6b 239 * kernel allocator. However, both the kernel allocator and kmemleak may
2030117d 240 * allocate memory blocks which need to be tracked. Kmemleak defines an
3c7b4e6b
CM
241 * arbitrary buffer to hold the allocation/freeing information before it is
242 * fully initialized.
243 */
244
245/* kmemleak operation type for early logging */
246enum {
247 KMEMLEAK_ALLOC,
f528f0b8 248 KMEMLEAK_ALLOC_PERCPU,
3c7b4e6b 249 KMEMLEAK_FREE,
53238a60 250 KMEMLEAK_FREE_PART,
f528f0b8 251 KMEMLEAK_FREE_PERCPU,
3c7b4e6b
CM
252 KMEMLEAK_NOT_LEAK,
253 KMEMLEAK_IGNORE,
254 KMEMLEAK_SCAN_AREA,
255 KMEMLEAK_NO_SCAN
256};
257
258/*
259 * Structure holding the information passed to kmemleak callbacks during the
260 * early logging.
261 */
262struct early_log {
263 int op_type; /* kmemleak operation type */
264 const void *ptr; /* allocated/freed memory block */
265 size_t size; /* memory block size */
266 int min_count; /* minimum reference count */
fd678967
CM
267 unsigned long trace[MAX_TRACE]; /* stack trace */
268 unsigned int trace_len; /* stack trace length */
3c7b4e6b
CM
269};
270
271/* early logging buffer and current position */
a6186d89
CM
272static struct early_log
273 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
274static int crt_early_log __initdata;
3c7b4e6b
CM
275
276static void kmemleak_disable(void);
277
278/*
279 * Print a warning and dump the stack trace.
280 */
5f79020c 281#define kmemleak_warn(x...) do { \
598d8091 282 pr_warn(x); \
5f79020c 283 dump_stack(); \
8910ae89 284 kmemleak_warning = 1; \
3c7b4e6b
CM
285} while (0)
286
287/*
25985edc 288 * Macro invoked when a serious kmemleak condition occurred and cannot be
2030117d 289 * recovered from. Kmemleak will be disabled and further allocation/freeing
3c7b4e6b
CM
290 * tracing no longer available.
291 */
000814f4 292#define kmemleak_stop(x...) do { \
3c7b4e6b
CM
293 kmemleak_warn(x); \
294 kmemleak_disable(); \
295} while (0)
296
0494e082
SS
297/*
298 * Printing of the objects hex dump to the seq file. The number of lines to be
299 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
300 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
301 * with the object->lock held.
302 */
303static void hex_dump_object(struct seq_file *seq,
304 struct kmemleak_object *object)
305{
306 const u8 *ptr = (const u8 *)object->pointer;
6fc37c49 307 size_t len;
0494e082
SS
308
309 /* limit the number of lines to HEX_MAX_LINES */
6fc37c49 310 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
0494e082 311
6fc37c49 312 seq_printf(seq, " hex dump (first %zu bytes):\n", len);
5c335fe0 313 kasan_disable_current();
6fc37c49
AS
314 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
315 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
5c335fe0 316 kasan_enable_current();
0494e082
SS
317}
318
3c7b4e6b
CM
319/*
320 * Object colors, encoded with count and min_count:
321 * - white - orphan object, not enough references to it (count < min_count)
322 * - gray - not orphan, not marked as false positive (min_count == 0) or
323 * sufficient references to it (count >= min_count)
324 * - black - ignore, it doesn't contain references (e.g. text section)
325 * (min_count == -1). No function defined for this color.
326 * Newly created objects don't have any color assigned (object->count == -1)
327 * before the next memory scan when they become white.
328 */
4a558dd6 329static bool color_white(const struct kmemleak_object *object)
3c7b4e6b 330{
a1084c87
LR
331 return object->count != KMEMLEAK_BLACK &&
332 object->count < object->min_count;
3c7b4e6b
CM
333}
334
4a558dd6 335static bool color_gray(const struct kmemleak_object *object)
3c7b4e6b 336{
a1084c87
LR
337 return object->min_count != KMEMLEAK_BLACK &&
338 object->count >= object->min_count;
3c7b4e6b
CM
339}
340
3c7b4e6b
CM
341/*
342 * Objects are considered unreferenced only if their color is white, they have
343 * not be deleted and have a minimum age to avoid false positives caused by
344 * pointers temporarily stored in CPU registers.
345 */
4a558dd6 346static bool unreferenced_object(struct kmemleak_object *object)
3c7b4e6b 347{
04609ccc 348 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
acf4968e
CM
349 time_before_eq(object->jiffies + jiffies_min_age,
350 jiffies_last_scan);
3c7b4e6b
CM
351}
352
353/*
bab4a34a
CM
354 * Printing of the unreferenced objects information to the seq file. The
355 * print_unreferenced function must be called with the object->lock held.
3c7b4e6b 356 */
3c7b4e6b
CM
357static void print_unreferenced(struct seq_file *seq,
358 struct kmemleak_object *object)
359{
360 int i;
fefdd336 361 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
3c7b4e6b 362
bab4a34a
CM
363 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
364 object->pointer, object->size);
fefdd336
CM
365 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
366 object->comm, object->pid, object->jiffies,
367 msecs_age / 1000, msecs_age % 1000);
0494e082 368 hex_dump_object(seq, object);
bab4a34a 369 seq_printf(seq, " backtrace:\n");
3c7b4e6b
CM
370
371 for (i = 0; i < object->trace_len; i++) {
372 void *ptr = (void *)object->trace[i];
bab4a34a 373 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
3c7b4e6b
CM
374 }
375}
376
377/*
378 * Print the kmemleak_object information. This function is used mainly for
379 * debugging special cases when kmemleak operations. It must be called with
380 * the object->lock held.
381 */
382static void dump_object_info(struct kmemleak_object *object)
383{
384 struct stack_trace trace;
385
386 trace.nr_entries = object->trace_len;
387 trace.entries = object->trace;
388
ae281064 389 pr_notice("Object 0x%08lx (size %zu):\n",
85d3a316 390 object->pointer, object->size);
3c7b4e6b
CM
391 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
392 object->comm, object->pid, object->jiffies);
393 pr_notice(" min_count = %d\n", object->min_count);
394 pr_notice(" count = %d\n", object->count);
189d84ed 395 pr_notice(" flags = 0x%lx\n", object->flags);
aae0ad7a 396 pr_notice(" checksum = %u\n", object->checksum);
3c7b4e6b
CM
397 pr_notice(" backtrace:\n");
398 print_stack_trace(&trace, 4);
399}
400
401/*
85d3a316 402 * Look-up a memory block metadata (kmemleak_object) in the object search
3c7b4e6b
CM
403 * tree based on a pointer value. If alias is 0, only values pointing to the
404 * beginning of the memory block are allowed. The kmemleak_lock must be held
405 * when calling this function.
406 */
407static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
408{
85d3a316
ML
409 struct rb_node *rb = object_tree_root.rb_node;
410
411 while (rb) {
412 struct kmemleak_object *object =
413 rb_entry(rb, struct kmemleak_object, rb_node);
414 if (ptr < object->pointer)
415 rb = object->rb_node.rb_left;
416 else if (object->pointer + object->size <= ptr)
417 rb = object->rb_node.rb_right;
418 else if (object->pointer == ptr || alias)
419 return object;
420 else {
5f79020c
CM
421 kmemleak_warn("Found object by alias at 0x%08lx\n",
422 ptr);
a7686a45 423 dump_object_info(object);
85d3a316 424 break;
3c7b4e6b 425 }
85d3a316
ML
426 }
427 return NULL;
3c7b4e6b
CM
428}
429
430/*
431 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
432 * that once an object's use_count reached 0, the RCU freeing was already
433 * registered and the object should no longer be used. This function must be
434 * called under the protection of rcu_read_lock().
435 */
436static int get_object(struct kmemleak_object *object)
437{
438 return atomic_inc_not_zero(&object->use_count);
439}
440
441/*
442 * RCU callback to free a kmemleak_object.
443 */
444static void free_object_rcu(struct rcu_head *rcu)
445{
b67bfe0d 446 struct hlist_node *tmp;
3c7b4e6b
CM
447 struct kmemleak_scan_area *area;
448 struct kmemleak_object *object =
449 container_of(rcu, struct kmemleak_object, rcu);
450
451 /*
452 * Once use_count is 0 (guaranteed by put_object), there is no other
453 * code accessing this object, hence no need for locking.
454 */
b67bfe0d
SL
455 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
456 hlist_del(&area->node);
3c7b4e6b
CM
457 kmem_cache_free(scan_area_cache, area);
458 }
459 kmem_cache_free(object_cache, object);
460}
461
462/*
463 * Decrement the object use_count. Once the count is 0, free the object using
464 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
465 * delete_object() path, the delayed RCU freeing ensures that there is no
466 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
467 * is also possible.
468 */
469static void put_object(struct kmemleak_object *object)
470{
471 if (!atomic_dec_and_test(&object->use_count))
472 return;
473
474 /* should only get here after delete_object was called */
475 WARN_ON(object->flags & OBJECT_ALLOCATED);
476
477 call_rcu(&object->rcu, free_object_rcu);
478}
479
480/*
85d3a316 481 * Look up an object in the object search tree and increase its use_count.
3c7b4e6b
CM
482 */
483static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
484{
485 unsigned long flags;
9fbed254 486 struct kmemleak_object *object;
3c7b4e6b
CM
487
488 rcu_read_lock();
489 read_lock_irqsave(&kmemleak_lock, flags);
93ada579 490 object = lookup_object(ptr, alias);
3c7b4e6b
CM
491 read_unlock_irqrestore(&kmemleak_lock, flags);
492
493 /* check whether the object is still available */
494 if (object && !get_object(object))
495 object = NULL;
496 rcu_read_unlock();
497
498 return object;
499}
500
e781a9ab
CM
501/*
502 * Look up an object in the object search tree and remove it from both
503 * object_tree_root and object_list. The returned object's use_count should be
504 * at least 1, as initially set by create_object().
505 */
506static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
507{
508 unsigned long flags;
509 struct kmemleak_object *object;
510
511 write_lock_irqsave(&kmemleak_lock, flags);
512 object = lookup_object(ptr, alias);
513 if (object) {
514 rb_erase(&object->rb_node, &object_tree_root);
515 list_del_rcu(&object->object_list);
516 }
517 write_unlock_irqrestore(&kmemleak_lock, flags);
518
519 return object;
520}
521
fd678967
CM
522/*
523 * Save stack trace to the given array of MAX_TRACE size.
524 */
525static int __save_stack_trace(unsigned long *trace)
526{
527 struct stack_trace stack_trace;
528
529 stack_trace.max_entries = MAX_TRACE;
530 stack_trace.nr_entries = 0;
531 stack_trace.entries = trace;
532 stack_trace.skip = 2;
533 save_stack_trace(&stack_trace);
534
535 return stack_trace.nr_entries;
536}
537
3c7b4e6b
CM
538/*
539 * Create the metadata (struct kmemleak_object) corresponding to an allocated
540 * memory block and add it to the object_list and object_tree_root.
541 */
fd678967
CM
542static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
543 int min_count, gfp_t gfp)
3c7b4e6b
CM
544{
545 unsigned long flags;
85d3a316
ML
546 struct kmemleak_object *object, *parent;
547 struct rb_node **link, *rb_parent;
3c7b4e6b 548
6ae4bd1f 549 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
3c7b4e6b 550 if (!object) {
598d8091 551 pr_warn("Cannot allocate a kmemleak_object structure\n");
6ae4bd1f 552 kmemleak_disable();
fd678967 553 return NULL;
3c7b4e6b
CM
554 }
555
556 INIT_LIST_HEAD(&object->object_list);
557 INIT_LIST_HEAD(&object->gray_list);
558 INIT_HLIST_HEAD(&object->area_list);
559 spin_lock_init(&object->lock);
560 atomic_set(&object->use_count, 1);
04609ccc 561 object->flags = OBJECT_ALLOCATED;
3c7b4e6b
CM
562 object->pointer = ptr;
563 object->size = size;
564 object->min_count = min_count;
04609ccc 565 object->count = 0; /* white color initially */
3c7b4e6b 566 object->jiffies = jiffies;
04609ccc 567 object->checksum = 0;
3c7b4e6b
CM
568
569 /* task information */
570 if (in_irq()) {
571 object->pid = 0;
572 strncpy(object->comm, "hardirq", sizeof(object->comm));
573 } else if (in_softirq()) {
574 object->pid = 0;
575 strncpy(object->comm, "softirq", sizeof(object->comm));
576 } else {
577 object->pid = current->pid;
578 /*
579 * There is a small chance of a race with set_task_comm(),
580 * however using get_task_comm() here may cause locking
581 * dependency issues with current->alloc_lock. In the worst
582 * case, the command line is not correct.
583 */
584 strncpy(object->comm, current->comm, sizeof(object->comm));
585 }
586
587 /* kernel backtrace */
fd678967 588 object->trace_len = __save_stack_trace(object->trace);
3c7b4e6b 589
3c7b4e6b 590 write_lock_irqsave(&kmemleak_lock, flags);
0580a181 591
3c7b4e6b
CM
592 min_addr = min(min_addr, ptr);
593 max_addr = max(max_addr, ptr + size);
85d3a316
ML
594 link = &object_tree_root.rb_node;
595 rb_parent = NULL;
596 while (*link) {
597 rb_parent = *link;
598 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
599 if (ptr + size <= parent->pointer)
600 link = &parent->rb_node.rb_left;
601 else if (parent->pointer + parent->size <= ptr)
602 link = &parent->rb_node.rb_right;
603 else {
756a025f 604 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
85d3a316 605 ptr);
9d5a4c73
CM
606 /*
607 * No need for parent->lock here since "parent" cannot
608 * be freed while the kmemleak_lock is held.
609 */
610 dump_object_info(parent);
85d3a316 611 kmem_cache_free(object_cache, object);
9d5a4c73 612 object = NULL;
85d3a316
ML
613 goto out;
614 }
3c7b4e6b 615 }
85d3a316
ML
616 rb_link_node(&object->rb_node, rb_parent, link);
617 rb_insert_color(&object->rb_node, &object_tree_root);
618
3c7b4e6b
CM
619 list_add_tail_rcu(&object->object_list, &object_list);
620out:
621 write_unlock_irqrestore(&kmemleak_lock, flags);
fd678967 622 return object;
3c7b4e6b
CM
623}
624
625/*
e781a9ab 626 * Mark the object as not allocated and schedule RCU freeing via put_object().
3c7b4e6b 627 */
53238a60 628static void __delete_object(struct kmemleak_object *object)
3c7b4e6b
CM
629{
630 unsigned long flags;
3c7b4e6b 631
3c7b4e6b 632 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
e781a9ab 633 WARN_ON(atomic_read(&object->use_count) < 1);
3c7b4e6b
CM
634
635 /*
636 * Locking here also ensures that the corresponding memory block
637 * cannot be freed when it is being scanned.
638 */
639 spin_lock_irqsave(&object->lock, flags);
3c7b4e6b
CM
640 object->flags &= ~OBJECT_ALLOCATED;
641 spin_unlock_irqrestore(&object->lock, flags);
642 put_object(object);
643}
644
53238a60
CM
645/*
646 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
647 * delete it.
648 */
649static void delete_object_full(unsigned long ptr)
650{
651 struct kmemleak_object *object;
652
e781a9ab 653 object = find_and_remove_object(ptr, 0);
53238a60
CM
654 if (!object) {
655#ifdef DEBUG
656 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
657 ptr);
658#endif
659 return;
660 }
661 __delete_object(object);
53238a60
CM
662}
663
664/*
665 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
666 * delete it. If the memory block is partially freed, the function may create
667 * additional metadata for the remaining parts of the block.
668 */
669static void delete_object_part(unsigned long ptr, size_t size)
670{
671 struct kmemleak_object *object;
672 unsigned long start, end;
673
e781a9ab 674 object = find_and_remove_object(ptr, 1);
53238a60
CM
675 if (!object) {
676#ifdef DEBUG
756a025f
JP
677 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
678 ptr, size);
53238a60
CM
679#endif
680 return;
681 }
53238a60
CM
682
683 /*
684 * Create one or two objects that may result from the memory block
685 * split. Note that partial freeing is only done by free_bootmem() and
686 * this happens before kmemleak_init() is called. The path below is
687 * only executed during early log recording in kmemleak_init(), so
688 * GFP_KERNEL is enough.
689 */
690 start = object->pointer;
691 end = object->pointer + object->size;
692 if (ptr > start)
693 create_object(start, ptr - start, object->min_count,
694 GFP_KERNEL);
695 if (ptr + size < end)
696 create_object(ptr + size, end - ptr - size, object->min_count,
697 GFP_KERNEL);
698
e781a9ab 699 __delete_object(object);
53238a60 700}
a1084c87
LR
701
702static void __paint_it(struct kmemleak_object *object, int color)
703{
704 object->min_count = color;
705 if (color == KMEMLEAK_BLACK)
706 object->flags |= OBJECT_NO_SCAN;
707}
708
709static void paint_it(struct kmemleak_object *object, int color)
3c7b4e6b
CM
710{
711 unsigned long flags;
a1084c87
LR
712
713 spin_lock_irqsave(&object->lock, flags);
714 __paint_it(object, color);
715 spin_unlock_irqrestore(&object->lock, flags);
716}
717
718static void paint_ptr(unsigned long ptr, int color)
719{
3c7b4e6b
CM
720 struct kmemleak_object *object;
721
722 object = find_and_get_object(ptr, 0);
723 if (!object) {
756a025f
JP
724 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
725 ptr,
a1084c87
LR
726 (color == KMEMLEAK_GREY) ? "Grey" :
727 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
3c7b4e6b
CM
728 return;
729 }
a1084c87 730 paint_it(object, color);
3c7b4e6b
CM
731 put_object(object);
732}
733
a1084c87 734/*
145b64b9 735 * Mark an object permanently as gray-colored so that it can no longer be
a1084c87
LR
736 * reported as a leak. This is used in general to mark a false positive.
737 */
738static void make_gray_object(unsigned long ptr)
739{
740 paint_ptr(ptr, KMEMLEAK_GREY);
741}
742
3c7b4e6b
CM
743/*
744 * Mark the object as black-colored so that it is ignored from scans and
745 * reporting.
746 */
747static void make_black_object(unsigned long ptr)
748{
a1084c87 749 paint_ptr(ptr, KMEMLEAK_BLACK);
3c7b4e6b
CM
750}
751
752/*
753 * Add a scanning area to the object. If at least one such area is added,
754 * kmemleak will only scan these ranges rather than the whole memory block.
755 */
c017b4be 756static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
3c7b4e6b
CM
757{
758 unsigned long flags;
759 struct kmemleak_object *object;
760 struct kmemleak_scan_area *area;
761
c017b4be 762 object = find_and_get_object(ptr, 1);
3c7b4e6b 763 if (!object) {
ae281064
JP
764 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
765 ptr);
3c7b4e6b
CM
766 return;
767 }
768
6ae4bd1f 769 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
3c7b4e6b 770 if (!area) {
598d8091 771 pr_warn("Cannot allocate a scan area\n");
3c7b4e6b
CM
772 goto out;
773 }
774
775 spin_lock_irqsave(&object->lock, flags);
7f88f88f
CM
776 if (size == SIZE_MAX) {
777 size = object->pointer + object->size - ptr;
778 } else if (ptr + size > object->pointer + object->size) {
ae281064 779 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
3c7b4e6b
CM
780 dump_object_info(object);
781 kmem_cache_free(scan_area_cache, area);
782 goto out_unlock;
783 }
784
785 INIT_HLIST_NODE(&area->node);
c017b4be
CM
786 area->start = ptr;
787 area->size = size;
3c7b4e6b
CM
788
789 hlist_add_head(&area->node, &object->area_list);
790out_unlock:
791 spin_unlock_irqrestore(&object->lock, flags);
792out:
793 put_object(object);
794}
795
796/*
797 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
798 * pointer. Such object will not be scanned by kmemleak but references to it
799 * are searched.
800 */
801static void object_no_scan(unsigned long ptr)
802{
803 unsigned long flags;
804 struct kmemleak_object *object;
805
806 object = find_and_get_object(ptr, 0);
807 if (!object) {
ae281064 808 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
3c7b4e6b
CM
809 return;
810 }
811
812 spin_lock_irqsave(&object->lock, flags);
813 object->flags |= OBJECT_NO_SCAN;
814 spin_unlock_irqrestore(&object->lock, flags);
815 put_object(object);
816}
817
818/*
819 * Log an early kmemleak_* call to the early_log buffer. These calls will be
820 * processed later once kmemleak is fully initialized.
821 */
a6186d89 822static void __init log_early(int op_type, const void *ptr, size_t size,
c017b4be 823 int min_count)
3c7b4e6b
CM
824{
825 unsigned long flags;
826 struct early_log *log;
827
8910ae89 828 if (kmemleak_error) {
b6693005
CM
829 /* kmemleak stopped recording, just count the requests */
830 crt_early_log++;
831 return;
832 }
833
3c7b4e6b 834 if (crt_early_log >= ARRAY_SIZE(early_log)) {
21cd3a60 835 crt_early_log++;
a9d9058a 836 kmemleak_disable();
3c7b4e6b
CM
837 return;
838 }
839
840 /*
841 * There is no need for locking since the kernel is still in UP mode
842 * at this stage. Disabling the IRQs is enough.
843 */
844 local_irq_save(flags);
845 log = &early_log[crt_early_log];
846 log->op_type = op_type;
847 log->ptr = ptr;
848 log->size = size;
849 log->min_count = min_count;
5f79020c 850 log->trace_len = __save_stack_trace(log->trace);
3c7b4e6b
CM
851 crt_early_log++;
852 local_irq_restore(flags);
853}
854
fd678967
CM
855/*
856 * Log an early allocated block and populate the stack trace.
857 */
858static void early_alloc(struct early_log *log)
859{
860 struct kmemleak_object *object;
861 unsigned long flags;
862 int i;
863
8910ae89 864 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
fd678967
CM
865 return;
866
867 /*
868 * RCU locking needed to ensure object is not freed via put_object().
869 */
870 rcu_read_lock();
871 object = create_object((unsigned long)log->ptr, log->size,
c1bcd6b3 872 log->min_count, GFP_ATOMIC);
0d5d1aad
CM
873 if (!object)
874 goto out;
fd678967
CM
875 spin_lock_irqsave(&object->lock, flags);
876 for (i = 0; i < log->trace_len; i++)
877 object->trace[i] = log->trace[i];
878 object->trace_len = log->trace_len;
879 spin_unlock_irqrestore(&object->lock, flags);
0d5d1aad 880out:
fd678967
CM
881 rcu_read_unlock();
882}
883
f528f0b8
CM
884/*
885 * Log an early allocated block and populate the stack trace.
886 */
887static void early_alloc_percpu(struct early_log *log)
888{
889 unsigned int cpu;
890 const void __percpu *ptr = log->ptr;
891
892 for_each_possible_cpu(cpu) {
893 log->ptr = per_cpu_ptr(ptr, cpu);
894 early_alloc(log);
895 }
896}
897
a2b6bf63
CM
898/**
899 * kmemleak_alloc - register a newly allocated object
900 * @ptr: pointer to beginning of the object
901 * @size: size of the object
902 * @min_count: minimum number of references to this object. If during memory
903 * scanning a number of references less than @min_count is found,
904 * the object is reported as a memory leak. If @min_count is 0,
905 * the object is never reported as a leak. If @min_count is -1,
906 * the object is ignored (not scanned and not reported as a leak)
907 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
908 *
909 * This function is called from the kernel allocators when a new object
910 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
3c7b4e6b 911 */
a6186d89
CM
912void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
913 gfp_t gfp)
3c7b4e6b
CM
914{
915 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
916
8910ae89 917 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 918 create_object((unsigned long)ptr, size, min_count, gfp);
8910ae89 919 else if (kmemleak_early_log)
c017b4be 920 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
3c7b4e6b
CM
921}
922EXPORT_SYMBOL_GPL(kmemleak_alloc);
923
f528f0b8
CM
924/**
925 * kmemleak_alloc_percpu - register a newly allocated __percpu object
926 * @ptr: __percpu pointer to beginning of the object
927 * @size: size of the object
8a8c35fa 928 * @gfp: flags used for kmemleak internal memory allocations
f528f0b8
CM
929 *
930 * This function is called from the kernel percpu allocator when a new object
8a8c35fa 931 * (memory block) is allocated (alloc_percpu).
f528f0b8 932 */
8a8c35fa
LF
933void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
934 gfp_t gfp)
f528f0b8
CM
935{
936 unsigned int cpu;
937
938 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
939
940 /*
941 * Percpu allocations are only scanned and not reported as leaks
942 * (min_count is set to 0).
943 */
8910ae89 944 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
f528f0b8
CM
945 for_each_possible_cpu(cpu)
946 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
8a8c35fa 947 size, 0, gfp);
8910ae89 948 else if (kmemleak_early_log)
f528f0b8
CM
949 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
950}
951EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
952
a2b6bf63
CM
953/**
954 * kmemleak_free - unregister a previously registered object
955 * @ptr: pointer to beginning of the object
956 *
957 * This function is called from the kernel allocators when an object (memory
958 * block) is freed (kmem_cache_free, kfree, vfree etc.).
3c7b4e6b 959 */
a6186d89 960void __ref kmemleak_free(const void *ptr)
3c7b4e6b
CM
961{
962 pr_debug("%s(0x%p)\n", __func__, ptr);
963
c5f3b1a5 964 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
53238a60 965 delete_object_full((unsigned long)ptr);
8910ae89 966 else if (kmemleak_early_log)
c017b4be 967 log_early(KMEMLEAK_FREE, ptr, 0, 0);
3c7b4e6b
CM
968}
969EXPORT_SYMBOL_GPL(kmemleak_free);
970
a2b6bf63
CM
971/**
972 * kmemleak_free_part - partially unregister a previously registered object
973 * @ptr: pointer to the beginning or inside the object. This also
974 * represents the start of the range to be freed
975 * @size: size to be unregistered
976 *
977 * This function is called when only a part of a memory block is freed
978 * (usually from the bootmem allocator).
53238a60 979 */
a6186d89 980void __ref kmemleak_free_part(const void *ptr, size_t size)
53238a60
CM
981{
982 pr_debug("%s(0x%p)\n", __func__, ptr);
983
8910ae89 984 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
53238a60 985 delete_object_part((unsigned long)ptr, size);
8910ae89 986 else if (kmemleak_early_log)
c017b4be 987 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
53238a60
CM
988}
989EXPORT_SYMBOL_GPL(kmemleak_free_part);
990
f528f0b8
CM
991/**
992 * kmemleak_free_percpu - unregister a previously registered __percpu object
993 * @ptr: __percpu pointer to beginning of the object
994 *
995 * This function is called from the kernel percpu allocator when an object
996 * (memory block) is freed (free_percpu).
997 */
998void __ref kmemleak_free_percpu(const void __percpu *ptr)
999{
1000 unsigned int cpu;
1001
1002 pr_debug("%s(0x%p)\n", __func__, ptr);
1003
c5f3b1a5 1004 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
f528f0b8
CM
1005 for_each_possible_cpu(cpu)
1006 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1007 cpu));
8910ae89 1008 else if (kmemleak_early_log)
f528f0b8
CM
1009 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1010}
1011EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1012
ffe2c748
CM
1013/**
1014 * kmemleak_update_trace - update object allocation stack trace
1015 * @ptr: pointer to beginning of the object
1016 *
1017 * Override the object allocation stack trace for cases where the actual
1018 * allocation place is not always useful.
1019 */
1020void __ref kmemleak_update_trace(const void *ptr)
1021{
1022 struct kmemleak_object *object;
1023 unsigned long flags;
1024
1025 pr_debug("%s(0x%p)\n", __func__, ptr);
1026
1027 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1028 return;
1029
1030 object = find_and_get_object((unsigned long)ptr, 1);
1031 if (!object) {
1032#ifdef DEBUG
1033 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1034 ptr);
1035#endif
1036 return;
1037 }
1038
1039 spin_lock_irqsave(&object->lock, flags);
1040 object->trace_len = __save_stack_trace(object->trace);
1041 spin_unlock_irqrestore(&object->lock, flags);
1042
1043 put_object(object);
1044}
1045EXPORT_SYMBOL(kmemleak_update_trace);
1046
a2b6bf63
CM
1047/**
1048 * kmemleak_not_leak - mark an allocated object as false positive
1049 * @ptr: pointer to beginning of the object
1050 *
1051 * Calling this function on an object will cause the memory block to no longer
1052 * be reported as leak and always be scanned.
3c7b4e6b 1053 */
a6186d89 1054void __ref kmemleak_not_leak(const void *ptr)
3c7b4e6b
CM
1055{
1056 pr_debug("%s(0x%p)\n", __func__, ptr);
1057
8910ae89 1058 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1059 make_gray_object((unsigned long)ptr);
8910ae89 1060 else if (kmemleak_early_log)
c017b4be 1061 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
3c7b4e6b
CM
1062}
1063EXPORT_SYMBOL(kmemleak_not_leak);
1064
a2b6bf63
CM
1065/**
1066 * kmemleak_ignore - ignore an allocated object
1067 * @ptr: pointer to beginning of the object
1068 *
1069 * Calling this function on an object will cause the memory block to be
1070 * ignored (not scanned and not reported as a leak). This is usually done when
1071 * it is known that the corresponding block is not a leak and does not contain
1072 * any references to other allocated memory blocks.
3c7b4e6b 1073 */
a6186d89 1074void __ref kmemleak_ignore(const void *ptr)
3c7b4e6b
CM
1075{
1076 pr_debug("%s(0x%p)\n", __func__, ptr);
1077
8910ae89 1078 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1079 make_black_object((unsigned long)ptr);
8910ae89 1080 else if (kmemleak_early_log)
c017b4be 1081 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
3c7b4e6b
CM
1082}
1083EXPORT_SYMBOL(kmemleak_ignore);
1084
a2b6bf63
CM
1085/**
1086 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1087 * @ptr: pointer to beginning or inside the object. This also
1088 * represents the start of the scan area
1089 * @size: size of the scan area
1090 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1091 *
1092 * This function is used when it is known that only certain parts of an object
1093 * contain references to other objects. Kmemleak will only scan these areas
1094 * reducing the number false negatives.
3c7b4e6b 1095 */
c017b4be 1096void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
3c7b4e6b
CM
1097{
1098 pr_debug("%s(0x%p)\n", __func__, ptr);
1099
8910ae89 1100 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
c017b4be 1101 add_scan_area((unsigned long)ptr, size, gfp);
8910ae89 1102 else if (kmemleak_early_log)
c017b4be 1103 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
3c7b4e6b
CM
1104}
1105EXPORT_SYMBOL(kmemleak_scan_area);
1106
a2b6bf63
CM
1107/**
1108 * kmemleak_no_scan - do not scan an allocated object
1109 * @ptr: pointer to beginning of the object
1110 *
1111 * This function notifies kmemleak not to scan the given memory block. Useful
1112 * in situations where it is known that the given object does not contain any
1113 * references to other objects. Kmemleak will not scan such objects reducing
1114 * the number of false negatives.
3c7b4e6b 1115 */
a6186d89 1116void __ref kmemleak_no_scan(const void *ptr)
3c7b4e6b
CM
1117{
1118 pr_debug("%s(0x%p)\n", __func__, ptr);
1119
8910ae89 1120 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1121 object_no_scan((unsigned long)ptr);
8910ae89 1122 else if (kmemleak_early_log)
c017b4be 1123 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
3c7b4e6b
CM
1124}
1125EXPORT_SYMBOL(kmemleak_no_scan);
1126
9099daed
CM
1127/**
1128 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1129 * address argument
1130 */
1131void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1132 gfp_t gfp)
1133{
1134 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1135 kmemleak_alloc(__va(phys), size, min_count, gfp);
1136}
1137EXPORT_SYMBOL(kmemleak_alloc_phys);
1138
1139/**
1140 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1141 * physical address argument
1142 */
1143void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1144{
1145 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1146 kmemleak_free_part(__va(phys), size);
1147}
1148EXPORT_SYMBOL(kmemleak_free_part_phys);
1149
1150/**
1151 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1152 * address argument
1153 */
1154void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1155{
1156 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1157 kmemleak_not_leak(__va(phys));
1158}
1159EXPORT_SYMBOL(kmemleak_not_leak_phys);
1160
1161/**
1162 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1163 * address argument
1164 */
1165void __ref kmemleak_ignore_phys(phys_addr_t phys)
1166{
1167 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1168 kmemleak_ignore(__va(phys));
1169}
1170EXPORT_SYMBOL(kmemleak_ignore_phys);
1171
04609ccc
CM
1172/*
1173 * Update an object's checksum and return true if it was modified.
1174 */
1175static bool update_checksum(struct kmemleak_object *object)
1176{
1177 u32 old_csum = object->checksum;
1178
1179 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1180 return false;
1181
e79ed2f1 1182 kasan_disable_current();
04609ccc 1183 object->checksum = crc32(0, (void *)object->pointer, object->size);
e79ed2f1
AR
1184 kasan_enable_current();
1185
04609ccc
CM
1186 return object->checksum != old_csum;
1187}
1188
3c7b4e6b
CM
1189/*
1190 * Memory scanning is a long process and it needs to be interruptable. This
25985edc 1191 * function checks whether such interrupt condition occurred.
3c7b4e6b
CM
1192 */
1193static int scan_should_stop(void)
1194{
8910ae89 1195 if (!kmemleak_enabled)
3c7b4e6b
CM
1196 return 1;
1197
1198 /*
1199 * This function may be called from either process or kthread context,
1200 * hence the need to check for both stop conditions.
1201 */
1202 if (current->mm)
1203 return signal_pending(current);
1204 else
1205 return kthread_should_stop();
1206
1207 return 0;
1208}
1209
1210/*
1211 * Scan a memory block (exclusive range) for valid pointers and add those
1212 * found to the gray list.
1213 */
1214static void scan_block(void *_start, void *_end,
93ada579 1215 struct kmemleak_object *scanned)
3c7b4e6b
CM
1216{
1217 unsigned long *ptr;
1218 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1219 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
93ada579 1220 unsigned long flags;
3c7b4e6b 1221
93ada579 1222 read_lock_irqsave(&kmemleak_lock, flags);
3c7b4e6b 1223 for (ptr = start; ptr < end; ptr++) {
3c7b4e6b 1224 struct kmemleak_object *object;
8e019366 1225 unsigned long pointer;
3c7b4e6b
CM
1226
1227 if (scan_should_stop())
1228 break;
1229
8e019366
PE
1230 /* don't scan uninitialized memory */
1231 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1232 BYTES_PER_POINTER))
1233 continue;
1234
e79ed2f1 1235 kasan_disable_current();
8e019366 1236 pointer = *ptr;
e79ed2f1 1237 kasan_enable_current();
8e019366 1238
93ada579
CM
1239 if (pointer < min_addr || pointer >= max_addr)
1240 continue;
1241
1242 /*
1243 * No need for get_object() here since we hold kmemleak_lock.
1244 * object->use_count cannot be dropped to 0 while the object
1245 * is still present in object_tree_root and object_list
1246 * (with updates protected by kmemleak_lock).
1247 */
1248 object = lookup_object(pointer, 1);
3c7b4e6b
CM
1249 if (!object)
1250 continue;
93ada579 1251 if (object == scanned)
3c7b4e6b 1252 /* self referenced, ignore */
3c7b4e6b 1253 continue;
3c7b4e6b
CM
1254
1255 /*
1256 * Avoid the lockdep recursive warning on object->lock being
1257 * previously acquired in scan_object(). These locks are
1258 * enclosed by scan_mutex.
1259 */
93ada579 1260 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
3c7b4e6b
CM
1261 if (!color_white(object)) {
1262 /* non-orphan, ignored or new */
93ada579 1263 spin_unlock(&object->lock);
3c7b4e6b
CM
1264 continue;
1265 }
1266
1267 /*
1268 * Increase the object's reference count (number of pointers
1269 * to the memory block). If this count reaches the required
1270 * minimum, the object's color will become gray and it will be
1271 * added to the gray_list.
1272 */
1273 object->count++;
0587da40 1274 if (color_gray(object)) {
93ada579
CM
1275 /* put_object() called when removing from gray_list */
1276 WARN_ON(!get_object(object));
3c7b4e6b 1277 list_add_tail(&object->gray_list, &gray_list);
0587da40 1278 }
93ada579
CM
1279 spin_unlock(&object->lock);
1280 }
1281 read_unlock_irqrestore(&kmemleak_lock, flags);
1282}
0587da40 1283
93ada579
CM
1284/*
1285 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1286 */
1287static void scan_large_block(void *start, void *end)
1288{
1289 void *next;
1290
1291 while (start < end) {
1292 next = min(start + MAX_SCAN_SIZE, end);
1293 scan_block(start, next, NULL);
1294 start = next;
1295 cond_resched();
3c7b4e6b
CM
1296 }
1297}
1298
1299/*
1300 * Scan a memory block corresponding to a kmemleak_object. A condition is
1301 * that object->use_count >= 1.
1302 */
1303static void scan_object(struct kmemleak_object *object)
1304{
1305 struct kmemleak_scan_area *area;
3c7b4e6b
CM
1306 unsigned long flags;
1307
1308 /*
21ae2956
UKK
1309 * Once the object->lock is acquired, the corresponding memory block
1310 * cannot be freed (the same lock is acquired in delete_object).
3c7b4e6b
CM
1311 */
1312 spin_lock_irqsave(&object->lock, flags);
1313 if (object->flags & OBJECT_NO_SCAN)
1314 goto out;
1315 if (!(object->flags & OBJECT_ALLOCATED))
1316 /* already freed object */
1317 goto out;
af98603d
CM
1318 if (hlist_empty(&object->area_list)) {
1319 void *start = (void *)object->pointer;
1320 void *end = (void *)(object->pointer + object->size);
93ada579
CM
1321 void *next;
1322
1323 do {
1324 next = min(start + MAX_SCAN_SIZE, end);
1325 scan_block(start, next, object);
af98603d 1326
93ada579
CM
1327 start = next;
1328 if (start >= end)
1329 break;
af98603d
CM
1330
1331 spin_unlock_irqrestore(&object->lock, flags);
1332 cond_resched();
1333 spin_lock_irqsave(&object->lock, flags);
93ada579 1334 } while (object->flags & OBJECT_ALLOCATED);
af98603d 1335 } else
b67bfe0d 1336 hlist_for_each_entry(area, &object->area_list, node)
c017b4be
CM
1337 scan_block((void *)area->start,
1338 (void *)(area->start + area->size),
93ada579 1339 object);
3c7b4e6b
CM
1340out:
1341 spin_unlock_irqrestore(&object->lock, flags);
1342}
1343
04609ccc
CM
1344/*
1345 * Scan the objects already referenced (gray objects). More objects will be
1346 * referenced and, if there are no memory leaks, all the objects are scanned.
1347 */
1348static void scan_gray_list(void)
1349{
1350 struct kmemleak_object *object, *tmp;
1351
1352 /*
1353 * The list traversal is safe for both tail additions and removals
1354 * from inside the loop. The kmemleak objects cannot be freed from
1355 * outside the loop because their use_count was incremented.
1356 */
1357 object = list_entry(gray_list.next, typeof(*object), gray_list);
1358 while (&object->gray_list != &gray_list) {
1359 cond_resched();
1360
1361 /* may add new objects to the list */
1362 if (!scan_should_stop())
1363 scan_object(object);
1364
1365 tmp = list_entry(object->gray_list.next, typeof(*object),
1366 gray_list);
1367
1368 /* remove the object from the list and release it */
1369 list_del(&object->gray_list);
1370 put_object(object);
1371
1372 object = tmp;
1373 }
1374 WARN_ON(!list_empty(&gray_list));
1375}
1376
3c7b4e6b
CM
1377/*
1378 * Scan data sections and all the referenced memory blocks allocated via the
1379 * kernel's standard allocators. This function must be called with the
1380 * scan_mutex held.
1381 */
1382static void kmemleak_scan(void)
1383{
1384 unsigned long flags;
04609ccc 1385 struct kmemleak_object *object;
3c7b4e6b 1386 int i;
4698c1f2 1387 int new_leaks = 0;
3c7b4e6b 1388
acf4968e
CM
1389 jiffies_last_scan = jiffies;
1390
3c7b4e6b
CM
1391 /* prepare the kmemleak_object's */
1392 rcu_read_lock();
1393 list_for_each_entry_rcu(object, &object_list, object_list) {
1394 spin_lock_irqsave(&object->lock, flags);
1395#ifdef DEBUG
1396 /*
1397 * With a few exceptions there should be a maximum of
1398 * 1 reference to any object at this point.
1399 */
1400 if (atomic_read(&object->use_count) > 1) {
ae281064 1401 pr_debug("object->use_count = %d\n",
3c7b4e6b
CM
1402 atomic_read(&object->use_count));
1403 dump_object_info(object);
1404 }
1405#endif
1406 /* reset the reference count (whiten the object) */
1407 object->count = 0;
1408 if (color_gray(object) && get_object(object))
1409 list_add_tail(&object->gray_list, &gray_list);
1410
1411 spin_unlock_irqrestore(&object->lock, flags);
1412 }
1413 rcu_read_unlock();
1414
1415 /* data/bss scanning */
93ada579
CM
1416 scan_large_block(_sdata, _edata);
1417 scan_large_block(__bss_start, __bss_stop);
d7c19b06 1418 scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init);
3c7b4e6b
CM
1419
1420#ifdef CONFIG_SMP
1421 /* per-cpu sections scanning */
1422 for_each_possible_cpu(i)
93ada579
CM
1423 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1424 __per_cpu_end + per_cpu_offset(i));
3c7b4e6b
CM
1425#endif
1426
1427 /*
029aeff5 1428 * Struct page scanning for each node.
3c7b4e6b 1429 */
bfc8c901 1430 get_online_mems();
3c7b4e6b 1431 for_each_online_node(i) {
108bcc96
CS
1432 unsigned long start_pfn = node_start_pfn(i);
1433 unsigned long end_pfn = node_end_pfn(i);
3c7b4e6b
CM
1434 unsigned long pfn;
1435
1436 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1437 struct page *page;
1438
1439 if (!pfn_valid(pfn))
1440 continue;
1441 page = pfn_to_page(pfn);
1442 /* only scan if page is in use */
1443 if (page_count(page) == 0)
1444 continue;
93ada579 1445 scan_block(page, page + 1, NULL);
3c7b4e6b
CM
1446 }
1447 }
bfc8c901 1448 put_online_mems();
3c7b4e6b
CM
1449
1450 /*
43ed5d6e 1451 * Scanning the task stacks (may introduce false negatives).
3c7b4e6b
CM
1452 */
1453 if (kmemleak_stack_scan) {
43ed5d6e
CM
1454 struct task_struct *p, *g;
1455
3c7b4e6b 1456 read_lock(&tasklist_lock);
43ed5d6e 1457 do_each_thread(g, p) {
37df49f4
CM
1458 void *stack = try_get_task_stack(p);
1459 if (stack) {
1460 scan_block(stack, stack + THREAD_SIZE, NULL);
1461 put_task_stack(p);
1462 }
43ed5d6e 1463 } while_each_thread(g, p);
3c7b4e6b
CM
1464 read_unlock(&tasklist_lock);
1465 }
1466
1467 /*
1468 * Scan the objects already referenced from the sections scanned
04609ccc 1469 * above.
3c7b4e6b 1470 */
04609ccc 1471 scan_gray_list();
2587362e
CM
1472
1473 /*
04609ccc
CM
1474 * Check for new or unreferenced objects modified since the previous
1475 * scan and color them gray until the next scan.
2587362e
CM
1476 */
1477 rcu_read_lock();
1478 list_for_each_entry_rcu(object, &object_list, object_list) {
1479 spin_lock_irqsave(&object->lock, flags);
04609ccc
CM
1480 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1481 && update_checksum(object) && get_object(object)) {
1482 /* color it gray temporarily */
1483 object->count = object->min_count;
2587362e
CM
1484 list_add_tail(&object->gray_list, &gray_list);
1485 }
1486 spin_unlock_irqrestore(&object->lock, flags);
1487 }
1488 rcu_read_unlock();
1489
04609ccc
CM
1490 /*
1491 * Re-scan the gray list for modified unreferenced objects.
1492 */
1493 scan_gray_list();
4698c1f2 1494
17bb9e0d 1495 /*
04609ccc 1496 * If scanning was stopped do not report any new unreferenced objects.
17bb9e0d 1497 */
04609ccc 1498 if (scan_should_stop())
17bb9e0d
CM
1499 return;
1500
4698c1f2
CM
1501 /*
1502 * Scanning result reporting.
1503 */
1504 rcu_read_lock();
1505 list_for_each_entry_rcu(object, &object_list, object_list) {
1506 spin_lock_irqsave(&object->lock, flags);
1507 if (unreferenced_object(object) &&
1508 !(object->flags & OBJECT_REPORTED)) {
1509 object->flags |= OBJECT_REPORTED;
1510 new_leaks++;
1511 }
1512 spin_unlock_irqrestore(&object->lock, flags);
1513 }
1514 rcu_read_unlock();
1515
dc9b3f42
LZ
1516 if (new_leaks) {
1517 kmemleak_found_leaks = true;
1518
756a025f
JP
1519 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1520 new_leaks);
dc9b3f42 1521 }
4698c1f2 1522
3c7b4e6b
CM
1523}
1524
1525/*
1526 * Thread function performing automatic memory scanning. Unreferenced objects
1527 * at the end of a memory scan are reported but only the first time.
1528 */
1529static int kmemleak_scan_thread(void *arg)
1530{
1531 static int first_run = 1;
1532
ae281064 1533 pr_info("Automatic memory scanning thread started\n");
bf2a76b3 1534 set_user_nice(current, 10);
3c7b4e6b
CM
1535
1536 /*
1537 * Wait before the first scan to allow the system to fully initialize.
1538 */
1539 if (first_run) {
98c42d94 1540 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
3c7b4e6b 1541 first_run = 0;
98c42d94
VN
1542 while (timeout && !kthread_should_stop())
1543 timeout = schedule_timeout_interruptible(timeout);
3c7b4e6b
CM
1544 }
1545
1546 while (!kthread_should_stop()) {
3c7b4e6b
CM
1547 signed long timeout = jiffies_scan_wait;
1548
1549 mutex_lock(&scan_mutex);
3c7b4e6b 1550 kmemleak_scan();
3c7b4e6b 1551 mutex_unlock(&scan_mutex);
4698c1f2 1552
3c7b4e6b
CM
1553 /* wait before the next scan */
1554 while (timeout && !kthread_should_stop())
1555 timeout = schedule_timeout_interruptible(timeout);
1556 }
1557
ae281064 1558 pr_info("Automatic memory scanning thread ended\n");
3c7b4e6b
CM
1559
1560 return 0;
1561}
1562
1563/*
1564 * Start the automatic memory scanning thread. This function must be called
4698c1f2 1565 * with the scan_mutex held.
3c7b4e6b 1566 */
7eb0d5e5 1567static void start_scan_thread(void)
3c7b4e6b
CM
1568{
1569 if (scan_thread)
1570 return;
1571 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1572 if (IS_ERR(scan_thread)) {
598d8091 1573 pr_warn("Failed to create the scan thread\n");
3c7b4e6b
CM
1574 scan_thread = NULL;
1575 }
1576}
1577
1578/*
1579 * Stop the automatic memory scanning thread. This function must be called
4698c1f2 1580 * with the scan_mutex held.
3c7b4e6b 1581 */
7eb0d5e5 1582static void stop_scan_thread(void)
3c7b4e6b
CM
1583{
1584 if (scan_thread) {
1585 kthread_stop(scan_thread);
1586 scan_thread = NULL;
1587 }
1588}
1589
1590/*
1591 * Iterate over the object_list and return the first valid object at or after
1592 * the required position with its use_count incremented. The function triggers
1593 * a memory scanning when the pos argument points to the first position.
1594 */
1595static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1596{
1597 struct kmemleak_object *object;
1598 loff_t n = *pos;
b87324d0
CM
1599 int err;
1600
1601 err = mutex_lock_interruptible(&scan_mutex);
1602 if (err < 0)
1603 return ERR_PTR(err);
3c7b4e6b 1604
3c7b4e6b
CM
1605 rcu_read_lock();
1606 list_for_each_entry_rcu(object, &object_list, object_list) {
1607 if (n-- > 0)
1608 continue;
1609 if (get_object(object))
1610 goto out;
1611 }
1612 object = NULL;
1613out:
3c7b4e6b
CM
1614 return object;
1615}
1616
1617/*
1618 * Return the next object in the object_list. The function decrements the
1619 * use_count of the previous object and increases that of the next one.
1620 */
1621static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1622{
1623 struct kmemleak_object *prev_obj = v;
1624 struct kmemleak_object *next_obj = NULL;
58fac095 1625 struct kmemleak_object *obj = prev_obj;
3c7b4e6b
CM
1626
1627 ++(*pos);
3c7b4e6b 1628
58fac095 1629 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
52c3ce4e
CM
1630 if (get_object(obj)) {
1631 next_obj = obj;
3c7b4e6b 1632 break;
52c3ce4e 1633 }
3c7b4e6b 1634 }
288c857d 1635
3c7b4e6b
CM
1636 put_object(prev_obj);
1637 return next_obj;
1638}
1639
1640/*
1641 * Decrement the use_count of the last object required, if any.
1642 */
1643static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1644{
b87324d0
CM
1645 if (!IS_ERR(v)) {
1646 /*
1647 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1648 * waiting was interrupted, so only release it if !IS_ERR.
1649 */
f5886c7f 1650 rcu_read_unlock();
b87324d0
CM
1651 mutex_unlock(&scan_mutex);
1652 if (v)
1653 put_object(v);
1654 }
3c7b4e6b
CM
1655}
1656
1657/*
1658 * Print the information for an unreferenced object to the seq file.
1659 */
1660static int kmemleak_seq_show(struct seq_file *seq, void *v)
1661{
1662 struct kmemleak_object *object = v;
1663 unsigned long flags;
1664
1665 spin_lock_irqsave(&object->lock, flags);
288c857d 1666 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
17bb9e0d 1667 print_unreferenced(seq, object);
3c7b4e6b
CM
1668 spin_unlock_irqrestore(&object->lock, flags);
1669 return 0;
1670}
1671
1672static const struct seq_operations kmemleak_seq_ops = {
1673 .start = kmemleak_seq_start,
1674 .next = kmemleak_seq_next,
1675 .stop = kmemleak_seq_stop,
1676 .show = kmemleak_seq_show,
1677};
1678
1679static int kmemleak_open(struct inode *inode, struct file *file)
1680{
b87324d0 1681 return seq_open(file, &kmemleak_seq_ops);
3c7b4e6b
CM
1682}
1683
189d84ed
CM
1684static int dump_str_object_info(const char *str)
1685{
1686 unsigned long flags;
1687 struct kmemleak_object *object;
1688 unsigned long addr;
1689
dc053733
AP
1690 if (kstrtoul(str, 0, &addr))
1691 return -EINVAL;
189d84ed
CM
1692 object = find_and_get_object(addr, 0);
1693 if (!object) {
1694 pr_info("Unknown object at 0x%08lx\n", addr);
1695 return -EINVAL;
1696 }
1697
1698 spin_lock_irqsave(&object->lock, flags);
1699 dump_object_info(object);
1700 spin_unlock_irqrestore(&object->lock, flags);
1701
1702 put_object(object);
1703 return 0;
1704}
1705
30b37101
LR
1706/*
1707 * We use grey instead of black to ensure we can do future scans on the same
1708 * objects. If we did not do future scans these black objects could
1709 * potentially contain references to newly allocated objects in the future and
1710 * we'd end up with false positives.
1711 */
1712static void kmemleak_clear(void)
1713{
1714 struct kmemleak_object *object;
1715 unsigned long flags;
1716
1717 rcu_read_lock();
1718 list_for_each_entry_rcu(object, &object_list, object_list) {
1719 spin_lock_irqsave(&object->lock, flags);
1720 if ((object->flags & OBJECT_REPORTED) &&
1721 unreferenced_object(object))
a1084c87 1722 __paint_it(object, KMEMLEAK_GREY);
30b37101
LR
1723 spin_unlock_irqrestore(&object->lock, flags);
1724 }
1725 rcu_read_unlock();
dc9b3f42
LZ
1726
1727 kmemleak_found_leaks = false;
30b37101
LR
1728}
1729
c89da70c
LZ
1730static void __kmemleak_do_cleanup(void);
1731
3c7b4e6b
CM
1732/*
1733 * File write operation to configure kmemleak at run-time. The following
1734 * commands can be written to the /sys/kernel/debug/kmemleak file:
1735 * off - disable kmemleak (irreversible)
1736 * stack=on - enable the task stacks scanning
1737 * stack=off - disable the tasks stacks scanning
1738 * scan=on - start the automatic memory scanning thread
1739 * scan=off - stop the automatic memory scanning thread
1740 * scan=... - set the automatic memory scanning period in seconds (0 to
1741 * disable it)
4698c1f2 1742 * scan - trigger a memory scan
30b37101 1743 * clear - mark all current reported unreferenced kmemleak objects as
c89da70c
LZ
1744 * grey to ignore printing them, or free all kmemleak objects
1745 * if kmemleak has been disabled.
189d84ed 1746 * dump=... - dump information about the object found at the given address
3c7b4e6b
CM
1747 */
1748static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1749 size_t size, loff_t *ppos)
1750{
1751 char buf[64];
1752 int buf_size;
b87324d0 1753 int ret;
3c7b4e6b
CM
1754
1755 buf_size = min(size, (sizeof(buf) - 1));
1756 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1757 return -EFAULT;
1758 buf[buf_size] = 0;
1759
b87324d0
CM
1760 ret = mutex_lock_interruptible(&scan_mutex);
1761 if (ret < 0)
1762 return ret;
1763
c89da70c 1764 if (strncmp(buf, "clear", 5) == 0) {
8910ae89 1765 if (kmemleak_enabled)
c89da70c
LZ
1766 kmemleak_clear();
1767 else
1768 __kmemleak_do_cleanup();
1769 goto out;
1770 }
1771
8910ae89 1772 if (!kmemleak_enabled) {
c89da70c
LZ
1773 ret = -EBUSY;
1774 goto out;
1775 }
1776
3c7b4e6b
CM
1777 if (strncmp(buf, "off", 3) == 0)
1778 kmemleak_disable();
1779 else if (strncmp(buf, "stack=on", 8) == 0)
1780 kmemleak_stack_scan = 1;
1781 else if (strncmp(buf, "stack=off", 9) == 0)
1782 kmemleak_stack_scan = 0;
1783 else if (strncmp(buf, "scan=on", 7) == 0)
1784 start_scan_thread();
1785 else if (strncmp(buf, "scan=off", 8) == 0)
1786 stop_scan_thread();
1787 else if (strncmp(buf, "scan=", 5) == 0) {
1788 unsigned long secs;
3c7b4e6b 1789
3dbb95f7 1790 ret = kstrtoul(buf + 5, 0, &secs);
b87324d0
CM
1791 if (ret < 0)
1792 goto out;
3c7b4e6b
CM
1793 stop_scan_thread();
1794 if (secs) {
1795 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1796 start_scan_thread();
1797 }
4698c1f2
CM
1798 } else if (strncmp(buf, "scan", 4) == 0)
1799 kmemleak_scan();
189d84ed
CM
1800 else if (strncmp(buf, "dump=", 5) == 0)
1801 ret = dump_str_object_info(buf + 5);
4698c1f2 1802 else
b87324d0
CM
1803 ret = -EINVAL;
1804
1805out:
1806 mutex_unlock(&scan_mutex);
1807 if (ret < 0)
1808 return ret;
3c7b4e6b
CM
1809
1810 /* ignore the rest of the buffer, only one command at a time */
1811 *ppos += size;
1812 return size;
1813}
1814
1815static const struct file_operations kmemleak_fops = {
1816 .owner = THIS_MODULE,
1817 .open = kmemleak_open,
1818 .read = seq_read,
1819 .write = kmemleak_write,
1820 .llseek = seq_lseek,
5f3bf19a 1821 .release = seq_release,
3c7b4e6b
CM
1822};
1823
c89da70c
LZ
1824static void __kmemleak_do_cleanup(void)
1825{
1826 struct kmemleak_object *object;
1827
1828 rcu_read_lock();
1829 list_for_each_entry_rcu(object, &object_list, object_list)
1830 delete_object_full(object->pointer);
1831 rcu_read_unlock();
1832}
1833
3c7b4e6b 1834/*
74341703
CM
1835 * Stop the memory scanning thread and free the kmemleak internal objects if
1836 * no previous scan thread (otherwise, kmemleak may still have some useful
1837 * information on memory leaks).
3c7b4e6b 1838 */
179a8100 1839static void kmemleak_do_cleanup(struct work_struct *work)
3c7b4e6b 1840{
3c7b4e6b 1841 stop_scan_thread();
3c7b4e6b 1842
c5f3b1a5
CM
1843 /*
1844 * Once the scan thread has stopped, it is safe to no longer track
1845 * object freeing. Ordering of the scan thread stopping and the memory
1846 * accesses below is guaranteed by the kthread_stop() function.
1847 */
1848 kmemleak_free_enabled = 0;
1849
c89da70c
LZ
1850 if (!kmemleak_found_leaks)
1851 __kmemleak_do_cleanup();
1852 else
756a025f 1853 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
3c7b4e6b
CM
1854}
1855
179a8100 1856static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
3c7b4e6b
CM
1857
1858/*
1859 * Disable kmemleak. No memory allocation/freeing will be traced once this
1860 * function is called. Disabling kmemleak is an irreversible operation.
1861 */
1862static void kmemleak_disable(void)
1863{
1864 /* atomically check whether it was already invoked */
8910ae89 1865 if (cmpxchg(&kmemleak_error, 0, 1))
3c7b4e6b
CM
1866 return;
1867
1868 /* stop any memory operation tracing */
8910ae89 1869 kmemleak_enabled = 0;
3c7b4e6b
CM
1870
1871 /* check whether it is too early for a kernel thread */
8910ae89 1872 if (kmemleak_initialized)
179a8100 1873 schedule_work(&cleanup_work);
c5f3b1a5
CM
1874 else
1875 kmemleak_free_enabled = 0;
3c7b4e6b
CM
1876
1877 pr_info("Kernel memory leak detector disabled\n");
1878}
1879
1880/*
1881 * Allow boot-time kmemleak disabling (enabled by default).
1882 */
1883static int kmemleak_boot_config(char *str)
1884{
1885 if (!str)
1886 return -EINVAL;
1887 if (strcmp(str, "off") == 0)
1888 kmemleak_disable();
ab0155a2
JB
1889 else if (strcmp(str, "on") == 0)
1890 kmemleak_skip_disable = 1;
1891 else
3c7b4e6b
CM
1892 return -EINVAL;
1893 return 0;
1894}
1895early_param("kmemleak", kmemleak_boot_config);
1896
5f79020c
CM
1897static void __init print_log_trace(struct early_log *log)
1898{
1899 struct stack_trace trace;
1900
1901 trace.nr_entries = log->trace_len;
1902 trace.entries = log->trace;
1903
1904 pr_notice("Early log backtrace:\n");
1905 print_stack_trace(&trace, 2);
1906}
1907
3c7b4e6b 1908/*
2030117d 1909 * Kmemleak initialization.
3c7b4e6b
CM
1910 */
1911void __init kmemleak_init(void)
1912{
1913 int i;
1914 unsigned long flags;
1915
ab0155a2
JB
1916#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1917 if (!kmemleak_skip_disable) {
3551a928 1918 kmemleak_early_log = 0;
ab0155a2
JB
1919 kmemleak_disable();
1920 return;
1921 }
1922#endif
1923
3c7b4e6b
CM
1924 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1925 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1926
1927 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1928 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
3c7b4e6b 1929
21cd3a60 1930 if (crt_early_log > ARRAY_SIZE(early_log))
598d8091
JP
1931 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
1932 crt_early_log);
b6693005 1933
3c7b4e6b
CM
1934 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1935 local_irq_save(flags);
3551a928 1936 kmemleak_early_log = 0;
8910ae89 1937 if (kmemleak_error) {
b6693005
CM
1938 local_irq_restore(flags);
1939 return;
c5f3b1a5 1940 } else {
8910ae89 1941 kmemleak_enabled = 1;
c5f3b1a5
CM
1942 kmemleak_free_enabled = 1;
1943 }
3c7b4e6b
CM
1944 local_irq_restore(flags);
1945
1946 /*
1947 * This is the point where tracking allocations is safe. Automatic
1948 * scanning is started during the late initcall. Add the early logged
1949 * callbacks to the kmemleak infrastructure.
1950 */
1951 for (i = 0; i < crt_early_log; i++) {
1952 struct early_log *log = &early_log[i];
1953
1954 switch (log->op_type) {
1955 case KMEMLEAK_ALLOC:
fd678967 1956 early_alloc(log);
3c7b4e6b 1957 break;
f528f0b8
CM
1958 case KMEMLEAK_ALLOC_PERCPU:
1959 early_alloc_percpu(log);
1960 break;
3c7b4e6b
CM
1961 case KMEMLEAK_FREE:
1962 kmemleak_free(log->ptr);
1963 break;
53238a60
CM
1964 case KMEMLEAK_FREE_PART:
1965 kmemleak_free_part(log->ptr, log->size);
1966 break;
f528f0b8
CM
1967 case KMEMLEAK_FREE_PERCPU:
1968 kmemleak_free_percpu(log->ptr);
1969 break;
3c7b4e6b
CM
1970 case KMEMLEAK_NOT_LEAK:
1971 kmemleak_not_leak(log->ptr);
1972 break;
1973 case KMEMLEAK_IGNORE:
1974 kmemleak_ignore(log->ptr);
1975 break;
1976 case KMEMLEAK_SCAN_AREA:
c017b4be 1977 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
3c7b4e6b
CM
1978 break;
1979 case KMEMLEAK_NO_SCAN:
1980 kmemleak_no_scan(log->ptr);
1981 break;
1982 default:
5f79020c
CM
1983 kmemleak_warn("Unknown early log operation: %d\n",
1984 log->op_type);
1985 }
1986
8910ae89 1987 if (kmemleak_warning) {
5f79020c 1988 print_log_trace(log);
8910ae89 1989 kmemleak_warning = 0;
3c7b4e6b
CM
1990 }
1991 }
1992}
1993
1994/*
1995 * Late initialization function.
1996 */
1997static int __init kmemleak_late_init(void)
1998{
1999 struct dentry *dentry;
2000
8910ae89 2001 kmemleak_initialized = 1;
3c7b4e6b 2002
8910ae89 2003 if (kmemleak_error) {
3c7b4e6b 2004 /*
25985edc 2005 * Some error occurred and kmemleak was disabled. There is a
3c7b4e6b
CM
2006 * small chance that kmemleak_disable() was called immediately
2007 * after setting kmemleak_initialized and we may end up with
2008 * two clean-up threads but serialized by scan_mutex.
2009 */
179a8100 2010 schedule_work(&cleanup_work);
3c7b4e6b
CM
2011 return -ENOMEM;
2012 }
2013
2014 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
2015 &kmemleak_fops);
2016 if (!dentry)
598d8091 2017 pr_warn("Failed to create the debugfs kmemleak file\n");
4698c1f2 2018 mutex_lock(&scan_mutex);
3c7b4e6b 2019 start_scan_thread();
4698c1f2 2020 mutex_unlock(&scan_mutex);
3c7b4e6b
CM
2021
2022 pr_info("Kernel memory leak detector initialized\n");
2023
2024 return 0;
2025}
2026late_initcall(kmemleak_late_init);