xref: /linux-6.15/lib/ref_tracker.c (revision 7a113ff6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/export.h>
3 #include <linux/ref_tracker.h>
4 #include <linux/slab.h>
5 #include <linux/stacktrace.h>
6 #include <linux/stackdepot.h>
7 
8 #define REF_TRACKER_STACK_ENTRIES 16
9 
10 struct ref_tracker {
11 	struct list_head	head;   /* anchor into dir->list or dir->quarantine */
12 	bool			dead;
13 	depot_stack_handle_t	alloc_stack_handle;
14 	depot_stack_handle_t	free_stack_handle;
15 };
16 
17 void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
18 				  unsigned int display_limit)
19 {
20 	struct ref_tracker *tracker;
21 	unsigned int i = 0;
22 
23 	lockdep_assert_held(&dir->lock);
24 
25 	list_for_each_entry(tracker, &dir->list, head) {
26 		if (i < display_limit) {
27 			pr_err("leaked reference.\n");
28 			if (tracker->alloc_stack_handle)
29 				stack_depot_print(tracker->alloc_stack_handle);
30 			i++;
31 		} else {
32 			break;
33 		}
34 	}
35 }
36 EXPORT_SYMBOL(ref_tracker_dir_print_locked);
37 
38 void ref_tracker_dir_print(struct ref_tracker_dir *dir,
39 			   unsigned int display_limit)
40 {
41 	unsigned long flags;
42 
43 	spin_lock_irqsave(&dir->lock, flags);
44 	ref_tracker_dir_print_locked(dir, display_limit);
45 	spin_unlock_irqrestore(&dir->lock, flags);
46 }
47 EXPORT_SYMBOL(ref_tracker_dir_print);
48 
49 void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
50 {
51 	struct ref_tracker *tracker, *n;
52 	unsigned long flags;
53 	bool leak = false;
54 
55 	dir->dead = true;
56 	spin_lock_irqsave(&dir->lock, flags);
57 	list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
58 		list_del(&tracker->head);
59 		kfree(tracker);
60 		dir->quarantine_avail++;
61 	}
62 	if (!list_empty(&dir->list)) {
63 		ref_tracker_dir_print_locked(dir, 16);
64 		leak = true;
65 		list_for_each_entry_safe(tracker, n, &dir->list, head) {
66 			list_del(&tracker->head);
67 			kfree(tracker);
68 		}
69 	}
70 	spin_unlock_irqrestore(&dir->lock, flags);
71 	WARN_ON_ONCE(leak);
72 	WARN_ON_ONCE(refcount_read(&dir->untracked) != 1);
73 	WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1);
74 }
75 EXPORT_SYMBOL(ref_tracker_dir_exit);
76 
77 int ref_tracker_alloc(struct ref_tracker_dir *dir,
78 		      struct ref_tracker **trackerp,
79 		      gfp_t gfp)
80 {
81 	unsigned long entries[REF_TRACKER_STACK_ENTRIES];
82 	struct ref_tracker *tracker;
83 	unsigned int nr_entries;
84 	gfp_t gfp_mask = gfp;
85 	unsigned long flags;
86 
87 	WARN_ON_ONCE(dir->dead);
88 
89 	if (!trackerp) {
90 		refcount_inc(&dir->no_tracker);
91 		return 0;
92 	}
93 	if (gfp & __GFP_DIRECT_RECLAIM)
94 		gfp_mask |= __GFP_NOFAIL;
95 	*trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
96 	if (unlikely(!tracker)) {
97 		pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
98 		refcount_inc(&dir->untracked);
99 		return -ENOMEM;
100 	}
101 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
102 	tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp);
103 
104 	spin_lock_irqsave(&dir->lock, flags);
105 	list_add(&tracker->head, &dir->list);
106 	spin_unlock_irqrestore(&dir->lock, flags);
107 	return 0;
108 }
109 EXPORT_SYMBOL_GPL(ref_tracker_alloc);
110 
111 int ref_tracker_free(struct ref_tracker_dir *dir,
112 		     struct ref_tracker **trackerp)
113 {
114 	unsigned long entries[REF_TRACKER_STACK_ENTRIES];
115 	depot_stack_handle_t stack_handle;
116 	struct ref_tracker *tracker;
117 	unsigned int nr_entries;
118 	unsigned long flags;
119 
120 	WARN_ON_ONCE(dir->dead);
121 
122 	if (!trackerp) {
123 		refcount_dec(&dir->no_tracker);
124 		return 0;
125 	}
126 	tracker = *trackerp;
127 	if (!tracker) {
128 		refcount_dec(&dir->untracked);
129 		return -EEXIST;
130 	}
131 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
132 	stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC);
133 
134 	spin_lock_irqsave(&dir->lock, flags);
135 	if (tracker->dead) {
136 		pr_err("reference already released.\n");
137 		if (tracker->alloc_stack_handle) {
138 			pr_err("allocated in:\n");
139 			stack_depot_print(tracker->alloc_stack_handle);
140 		}
141 		if (tracker->free_stack_handle) {
142 			pr_err("freed in:\n");
143 			stack_depot_print(tracker->free_stack_handle);
144 		}
145 		spin_unlock_irqrestore(&dir->lock, flags);
146 		WARN_ON_ONCE(1);
147 		return -EINVAL;
148 	}
149 	tracker->dead = true;
150 
151 	tracker->free_stack_handle = stack_handle;
152 
153 	list_move_tail(&tracker->head, &dir->quarantine);
154 	if (!dir->quarantine_avail) {
155 		tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head);
156 		list_del(&tracker->head);
157 	} else {
158 		dir->quarantine_avail--;
159 		tracker = NULL;
160 	}
161 	spin_unlock_irqrestore(&dir->lock, flags);
162 
163 	kfree(tracker);
164 	return 0;
165 }
166 EXPORT_SYMBOL_GPL(ref_tracker_free);
167