Commit | Line | Data |
---|---|---|
4e66934e ED |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | #include <linux/export.h> | |
3 | #include <linux/ref_tracker.h> | |
4 | #include <linux/slab.h> | |
5 | #include <linux/stacktrace.h> | |
6 | #include <linux/stackdepot.h> | |
7 | ||
8 | #define REF_TRACKER_STACK_ENTRIES 16 | |
9 | ||
10 | struct ref_tracker { | |
11 | struct list_head head; /* anchor into dir->list or dir->quarantine */ | |
12 | bool dead; | |
13 | depot_stack_handle_t alloc_stack_handle; | |
14 | depot_stack_handle_t free_stack_handle; | |
15 | }; | |
16 | ||
17 | void ref_tracker_dir_exit(struct ref_tracker_dir *dir) | |
18 | { | |
19 | struct ref_tracker *tracker, *n; | |
20 | unsigned long flags; | |
21 | bool leak = false; | |
22 | ||
23 | spin_lock_irqsave(&dir->lock, flags); | |
24 | list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { | |
25 | list_del(&tracker->head); | |
26 | kfree(tracker); | |
27 | dir->quarantine_avail++; | |
28 | } | |
29 | list_for_each_entry_safe(tracker, n, &dir->list, head) { | |
30 | pr_err("leaked reference.\n"); | |
31 | if (tracker->alloc_stack_handle) | |
32 | stack_depot_print(tracker->alloc_stack_handle); | |
33 | leak = true; | |
34 | list_del(&tracker->head); | |
35 | kfree(tracker); | |
36 | } | |
37 | spin_unlock_irqrestore(&dir->lock, flags); | |
38 | WARN_ON_ONCE(leak); | |
39 | WARN_ON_ONCE(refcount_read(&dir->untracked) != 1); | |
40 | } | |
41 | EXPORT_SYMBOL(ref_tracker_dir_exit); | |
42 | ||
43 | void ref_tracker_dir_print(struct ref_tracker_dir *dir, | |
44 | unsigned int display_limit) | |
45 | { | |
46 | struct ref_tracker *tracker; | |
47 | unsigned long flags; | |
48 | unsigned int i = 0; | |
49 | ||
50 | spin_lock_irqsave(&dir->lock, flags); | |
51 | list_for_each_entry(tracker, &dir->list, head) { | |
52 | if (i < display_limit) { | |
53 | pr_err("leaked reference.\n"); | |
54 | if (tracker->alloc_stack_handle) | |
55 | stack_depot_print(tracker->alloc_stack_handle); | |
56 | i++; | |
57 | } else { | |
58 | break; | |
59 | } | |
60 | } | |
61 | spin_unlock_irqrestore(&dir->lock, flags); | |
62 | } | |
63 | EXPORT_SYMBOL(ref_tracker_dir_print); | |
64 | ||
65 | int ref_tracker_alloc(struct ref_tracker_dir *dir, | |
66 | struct ref_tracker **trackerp, | |
67 | gfp_t gfp) | |
68 | { | |
69 | unsigned long entries[REF_TRACKER_STACK_ENTRIES]; | |
70 | struct ref_tracker *tracker; | |
71 | unsigned int nr_entries; | |
c12837d1 | 72 | gfp_t gfp_mask = gfp; |
4e66934e ED |
73 | unsigned long flags; |
74 | ||
c12837d1 ED |
75 | if (gfp & __GFP_DIRECT_RECLAIM) |
76 | gfp_mask |= __GFP_NOFAIL; | |
77 | *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask); | |
4e66934e ED |
78 | if (unlikely(!tracker)) { |
79 | pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); | |
80 | refcount_inc(&dir->untracked); | |
81 | return -ENOMEM; | |
82 | } | |
83 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); | |
84 | nr_entries = filter_irq_stacks(entries, nr_entries); | |
85 | tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp); | |
86 | ||
87 | spin_lock_irqsave(&dir->lock, flags); | |
88 | list_add(&tracker->head, &dir->list); | |
89 | spin_unlock_irqrestore(&dir->lock, flags); | |
90 | return 0; | |
91 | } | |
92 | EXPORT_SYMBOL_GPL(ref_tracker_alloc); | |
93 | ||
94 | int ref_tracker_free(struct ref_tracker_dir *dir, | |
95 | struct ref_tracker **trackerp) | |
96 | { | |
97 | unsigned long entries[REF_TRACKER_STACK_ENTRIES]; | |
98 | struct ref_tracker *tracker = *trackerp; | |
99 | depot_stack_handle_t stack_handle; | |
100 | unsigned int nr_entries; | |
101 | unsigned long flags; | |
102 | ||
103 | if (!tracker) { | |
104 | refcount_dec(&dir->untracked); | |
105 | return -EEXIST; | |
106 | } | |
107 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); | |
108 | nr_entries = filter_irq_stacks(entries, nr_entries); | |
109 | stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC); | |
110 | ||
111 | spin_lock_irqsave(&dir->lock, flags); | |
112 | if (tracker->dead) { | |
113 | pr_err("reference already released.\n"); | |
114 | if (tracker->alloc_stack_handle) { | |
115 | pr_err("allocated in:\n"); | |
116 | stack_depot_print(tracker->alloc_stack_handle); | |
117 | } | |
118 | if (tracker->free_stack_handle) { | |
119 | pr_err("freed in:\n"); | |
120 | stack_depot_print(tracker->free_stack_handle); | |
121 | } | |
122 | spin_unlock_irqrestore(&dir->lock, flags); | |
123 | WARN_ON_ONCE(1); | |
124 | return -EINVAL; | |
125 | } | |
126 | tracker->dead = true; | |
127 | ||
128 | tracker->free_stack_handle = stack_handle; | |
129 | ||
130 | list_move_tail(&tracker->head, &dir->quarantine); | |
131 | if (!dir->quarantine_avail) { | |
132 | tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head); | |
133 | list_del(&tracker->head); | |
134 | } else { | |
135 | dir->quarantine_avail--; | |
136 | tracker = NULL; | |
137 | } | |
138 | spin_unlock_irqrestore(&dir->lock, flags); | |
139 | ||
140 | kfree(tracker); | |
141 | return 0; | |
142 | } | |
143 | EXPORT_SYMBOL_GPL(ref_tracker_free); |