xref: /linux-6.15/lib/ref_tracker.c (revision b6d7c0eb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #define pr_fmt(fmt) "ref_tracker: " fmt
4 
5 #include <linux/export.h>
6 #include <linux/list_sort.h>
7 #include <linux/ref_tracker.h>
8 #include <linux/slab.h>
9 #include <linux/stacktrace.h>
10 #include <linux/stackdepot.h>
11 
12 #define REF_TRACKER_STACK_ENTRIES 16
13 #define STACK_BUF_SIZE 1024
14 
15 struct ref_tracker {
16 	struct list_head	head;   /* anchor into dir->list or dir->quarantine */
17 	bool			dead;
18 	depot_stack_handle_t	alloc_stack_handle;
19 	depot_stack_handle_t	free_stack_handle;
20 };
21 
22 struct ref_tracker_dir_stats {
23 	int total;
24 	int count;
25 	struct {
26 		depot_stack_handle_t stack_handle;
27 		unsigned int count;
28 	} stacks[];
29 };
30 
31 static struct ref_tracker_dir_stats *
32 ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
33 {
34 	struct ref_tracker_dir_stats *stats;
35 	struct ref_tracker *tracker;
36 
37 	stats = kmalloc(struct_size(stats, stacks, limit),
38 			GFP_NOWAIT | __GFP_NOWARN);
39 	if (!stats)
40 		return ERR_PTR(-ENOMEM);
41 	stats->total = 0;
42 	stats->count = 0;
43 
44 	list_for_each_entry(tracker, &dir->list, head) {
45 		depot_stack_handle_t stack = tracker->alloc_stack_handle;
46 		int i;
47 
48 		++stats->total;
49 		for (i = 0; i < stats->count; ++i)
50 			if (stats->stacks[i].stack_handle == stack)
51 				break;
52 		if (i >= limit)
53 			continue;
54 		if (i >= stats->count) {
55 			stats->stacks[i].stack_handle = stack;
56 			stats->stacks[i].count = 0;
57 			++stats->count;
58 		}
59 		++stats->stacks[i].count;
60 	}
61 
62 	return stats;
63 }
64 
65 void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
66 				  unsigned int display_limit)
67 {
68 	struct ref_tracker_dir_stats *stats;
69 	unsigned int i = 0, skipped;
70 	depot_stack_handle_t stack;
71 	char *sbuf;
72 
73 	lockdep_assert_held(&dir->lock);
74 
75 	if (list_empty(&dir->list))
76 		return;
77 
78 	stats = ref_tracker_get_stats(dir, display_limit);
79 	if (IS_ERR(stats)) {
80 		pr_err("%s@%pK: couldn't get stats, error %pe\n",
81 		       dir->name, dir, stats);
82 		return;
83 	}
84 
85 	sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN);
86 
87 	for (i = 0, skipped = stats->total; i < stats->count; ++i) {
88 		stack = stats->stacks[i].stack_handle;
89 		if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4))
90 			sbuf[0] = 0;
91 		pr_err("%s@%pK has %d/%d users at\n%s\n", dir->name, dir,
92 		       stats->stacks[i].count, stats->total, sbuf);
93 		skipped -= stats->stacks[i].count;
94 	}
95 
96 	if (skipped)
97 		pr_err("%s@%pK skipped reports about %d/%d users.\n",
98 		       dir->name, dir, skipped, stats->total);
99 
100 	kfree(sbuf);
101 
102 	kfree(stats);
103 }
104 EXPORT_SYMBOL(ref_tracker_dir_print_locked);
105 
106 void ref_tracker_dir_print(struct ref_tracker_dir *dir,
107 			   unsigned int display_limit)
108 {
109 	unsigned long flags;
110 
111 	spin_lock_irqsave(&dir->lock, flags);
112 	ref_tracker_dir_print_locked(dir, display_limit);
113 	spin_unlock_irqrestore(&dir->lock, flags);
114 }
115 EXPORT_SYMBOL(ref_tracker_dir_print);
116 
117 void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
118 {
119 	struct ref_tracker *tracker, *n;
120 	unsigned long flags;
121 	bool leak = false;
122 
123 	dir->dead = true;
124 	spin_lock_irqsave(&dir->lock, flags);
125 	list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
126 		list_del(&tracker->head);
127 		kfree(tracker);
128 		dir->quarantine_avail++;
129 	}
130 	if (!list_empty(&dir->list)) {
131 		ref_tracker_dir_print_locked(dir, 16);
132 		leak = true;
133 		list_for_each_entry_safe(tracker, n, &dir->list, head) {
134 			list_del(&tracker->head);
135 			kfree(tracker);
136 		}
137 	}
138 	spin_unlock_irqrestore(&dir->lock, flags);
139 	WARN_ON_ONCE(leak);
140 	WARN_ON_ONCE(refcount_read(&dir->untracked) != 1);
141 	WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1);
142 }
143 EXPORT_SYMBOL(ref_tracker_dir_exit);
144 
145 int ref_tracker_alloc(struct ref_tracker_dir *dir,
146 		      struct ref_tracker **trackerp,
147 		      gfp_t gfp)
148 {
149 	unsigned long entries[REF_TRACKER_STACK_ENTRIES];
150 	struct ref_tracker *tracker;
151 	unsigned int nr_entries;
152 	gfp_t gfp_mask = gfp;
153 	unsigned long flags;
154 
155 	WARN_ON_ONCE(dir->dead);
156 
157 	if (!trackerp) {
158 		refcount_inc(&dir->no_tracker);
159 		return 0;
160 	}
161 	if (gfp & __GFP_DIRECT_RECLAIM)
162 		gfp_mask |= __GFP_NOFAIL;
163 	*trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
164 	if (unlikely(!tracker)) {
165 		pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
166 		refcount_inc(&dir->untracked);
167 		return -ENOMEM;
168 	}
169 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
170 	tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp);
171 
172 	spin_lock_irqsave(&dir->lock, flags);
173 	list_add(&tracker->head, &dir->list);
174 	spin_unlock_irqrestore(&dir->lock, flags);
175 	return 0;
176 }
177 EXPORT_SYMBOL_GPL(ref_tracker_alloc);
178 
179 int ref_tracker_free(struct ref_tracker_dir *dir,
180 		     struct ref_tracker **trackerp)
181 {
182 	unsigned long entries[REF_TRACKER_STACK_ENTRIES];
183 	depot_stack_handle_t stack_handle;
184 	struct ref_tracker *tracker;
185 	unsigned int nr_entries;
186 	unsigned long flags;
187 
188 	WARN_ON_ONCE(dir->dead);
189 
190 	if (!trackerp) {
191 		refcount_dec(&dir->no_tracker);
192 		return 0;
193 	}
194 	tracker = *trackerp;
195 	if (!tracker) {
196 		refcount_dec(&dir->untracked);
197 		return -EEXIST;
198 	}
199 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
200 	stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC);
201 
202 	spin_lock_irqsave(&dir->lock, flags);
203 	if (tracker->dead) {
204 		pr_err("reference already released.\n");
205 		if (tracker->alloc_stack_handle) {
206 			pr_err("allocated in:\n");
207 			stack_depot_print(tracker->alloc_stack_handle);
208 		}
209 		if (tracker->free_stack_handle) {
210 			pr_err("freed in:\n");
211 			stack_depot_print(tracker->free_stack_handle);
212 		}
213 		spin_unlock_irqrestore(&dir->lock, flags);
214 		WARN_ON_ONCE(1);
215 		return -EINVAL;
216 	}
217 	tracker->dead = true;
218 
219 	tracker->free_stack_handle = stack_handle;
220 
221 	list_move_tail(&tracker->head, &dir->quarantine);
222 	if (!dir->quarantine_avail) {
223 		tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head);
224 		list_del(&tracker->head);
225 	} else {
226 		dir->quarantine_avail--;
227 		tracker = NULL;
228 	}
229 	spin_unlock_irqrestore(&dir->lock, flags);
230 
231 	kfree(tracker);
232 	return 0;
233 }
234 EXPORT_SYMBOL_GPL(ref_tracker_free);
235