xref: /linux-6.15/kernel/module/dups.c (revision 8660484e)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * kmod dups - the kernel module autoloader duplicate suppressor
4  *
5  * Copyright (C) 2023 Luis Chamberlain <[email protected]>
6  */
7 
8 #define pr_fmt(fmt)     "module: " fmt
9 
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task.h>
13 #include <linux/binfmts.h>
14 #include <linux/syscalls.h>
15 #include <linux/unistd.h>
16 #include <linux/kmod.h>
17 #include <linux/slab.h>
18 #include <linux/completion.h>
19 #include <linux/cred.h>
20 #include <linux/file.h>
21 #include <linux/fdtable.h>
22 #include <linux/workqueue.h>
23 #include <linux/security.h>
24 #include <linux/mount.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/resource.h>
28 #include <linux/notifier.h>
29 #include <linux/suspend.h>
30 #include <linux/rwsem.h>
31 #include <linux/ptrace.h>
32 #include <linux/async.h>
33 #include <linux/uaccess.h>
34 
35 #undef MODULE_PARAM_PREFIX
36 #define MODULE_PARAM_PREFIX "module."
37 static bool enable_dups_trace = IS_ENABLED(CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS_TRACE);
38 module_param(enable_dups_trace, bool_enable_only, 0644);
39 
40 /*
41  * Protects dup_kmod_reqs list, adds / removals with RCU.
42  */
43 static DEFINE_MUTEX(kmod_dup_mutex);
44 static LIST_HEAD(dup_kmod_reqs);
45 
46 struct kmod_dup_req {
47 	struct list_head list;
48 	char name[MODULE_NAME_LEN];
49 	struct completion first_req_done;
50 	struct work_struct complete_work;
51 	struct delayed_work delete_work;
52 	int dup_ret;
53 };
54 
55 static struct kmod_dup_req *kmod_dup_request_lookup(char *module_name)
56 {
57 	struct kmod_dup_req *kmod_req;
58 
59 	list_for_each_entry_rcu(kmod_req, &dup_kmod_reqs, list,
60 				lockdep_is_held(&kmod_dup_mutex)) {
61 		if (strlen(kmod_req->name) == strlen(module_name) &&
62 		    !memcmp(kmod_req->name, module_name, strlen(module_name))) {
63 			return kmod_req;
64                 }
65         }
66 
67 	return NULL;
68 }
69 
70 static void kmod_dup_request_delete(struct work_struct *work)
71 {
72 	struct kmod_dup_req *kmod_req;
73 	kmod_req = container_of(to_delayed_work(work), struct kmod_dup_req, delete_work);
74 
75 	/*
76 	 * The typical situation is a module successully loaded. In that
77 	 * situation the module will be present already in userspace. If
78 	 * new requests come in after that, userspace will already know the
79 	 * module is loaded so will just return 0 right away. There is still
80 	 * a small chance right after we delete this entry new request_module()
81 	 * calls may happen after that, they can happen. These heuristics
82 	 * are to protect finit_module() abuse for auto-loading, if modules
83 	 * are still tryign to auto-load even if a module is already loaded,
84 	 * that's on them, and those inneficiencies should not be fixed by
85 	 * kmod. The inneficies there are a call to modprobe and modprobe
86 	 * just returning 0.
87 	 */
88 	mutex_lock(&kmod_dup_mutex);
89 	list_del_rcu(&kmod_req->list);
90 	synchronize_rcu();
91 	mutex_unlock(&kmod_dup_mutex);
92 	kfree(kmod_req);
93 }
94 
95 static void kmod_dup_request_complete(struct work_struct *work)
96 {
97 	struct kmod_dup_req *kmod_req;
98 
99 	kmod_req = container_of(work, struct kmod_dup_req, complete_work);
100 
101 	/*
102 	 * This will ensure that the kernel will let all the waiters get
103 	 * informed its time to check the return value. It's time to
104 	 * go home.
105 	 */
106 	complete_all(&kmod_req->first_req_done);
107 
108 	/*
109 	 * Now that we have allowed prior request_module() calls to go on
110 	 * with life, let's schedule deleting this entry. We don't have
111 	 * to do it right away, but we *eventually* want to do it so to not
112 	 * let this linger forever as this is just a boot optimization for
113 	 * possible abuses of vmalloc() incurred by finit_module() thrashing.
114 	 */
115 	queue_delayed_work(system_wq, &kmod_req->delete_work, 60 * HZ);
116 }
117 
118 bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret)
119 {
120 	struct kmod_dup_req *kmod_req, *new_kmod_req;
121 	int ret;
122 
123 	/*
124 	 * Pre-allocate the entry in case we have to use it later
125 	 * to avoid contention with the mutex.
126 	 */
127 	new_kmod_req = kzalloc(sizeof(*new_kmod_req), GFP_KERNEL);
128 	if (!new_kmod_req)
129 		return false;
130 
131 	memcpy(new_kmod_req->name, module_name, strlen(module_name));
132 	INIT_WORK(&new_kmod_req->complete_work, kmod_dup_request_complete);
133 	INIT_DELAYED_WORK(&new_kmod_req->delete_work, kmod_dup_request_delete);
134 	init_completion(&new_kmod_req->first_req_done);
135 
136 	mutex_lock(&kmod_dup_mutex);
137 
138 	kmod_req = kmod_dup_request_lookup(module_name);
139 	if (!kmod_req) {
140 		/*
141 		 * If the first request that came through for a module
142 		 * was with request_module_nowait() we cannot wait for it
143 		 * and share its return value with other users which may
144 		 * have used request_module() and need a proper return value
145 		 * so just skip using them as an anchor.
146 		 *
147 		 * If a prior request to this one came through with
148 		 * request_module() though, then a request_module_nowait()
149 		 * would benefit from duplicate detection.
150 		 */
151 		if (!wait) {
152 			kfree(new_kmod_req);
153 			pr_debug("New request_module_nowait() for %s -- cannot track duplicates for this request\n", module_name);
154 			mutex_unlock(&kmod_dup_mutex);
155 			return false;
156 		}
157 
158 		/*
159 		 * There was no duplicate, just add the request so we can
160 		 * keep tab on duplicates later.
161 		 */
162 		pr_debug("New request_module() for %s\n", module_name);
163 		list_add_rcu(&new_kmod_req->list, &dup_kmod_reqs);
164 		mutex_unlock(&kmod_dup_mutex);
165 		return false;
166 	}
167 	mutex_unlock(&kmod_dup_mutex);
168 
169 	/* We are dealing with a duplicate request now */
170 	kfree(new_kmod_req);
171 
172 	/*
173 	 * To fix these try to use try_then_request_module() instead as that
174 	 * will check if the component you are looking for is present or not.
175 	 * You could also just queue a single request to load the module once,
176 	 * instead of having each and everything you need try to request for
177 	 * the module.
178 	 *
179 	 * Duplicate request_module() calls  can cause quite a bit of wasted
180 	 * vmalloc() space when racing with userspace.
181 	 */
182 	if (enable_dups_trace)
183 		WARN(1, "module-autoload: duplicate request for module %s\n", module_name);
184 	else
185 		pr_warn("module-autoload: duplicate request for module %s\n", module_name);
186 
187 	if (!wait) {
188 		/*
189 		 * If request_module_nowait() was used then the user just
190 		 * wanted to issue the request and if another module request
191 		 * was already its way with the same name we don't care for
192 		 * the return value either. Let duplicate request_module_nowait()
193 		 * calls bail out right away.
194 		 */
195 		*dup_ret = 0;
196 		return true;
197 	}
198 
199 	/*
200 	 * If a duplicate request_module() was used they *may* care for
201 	 * the return value, so we have no other option but to wait for
202 	 * the first caller to complete. If the first caller used
203 	 * the request_module_nowait() call, subsquent callers will
204 	 * deal with the comprmise of getting a successful call with this
205 	 * optimization enabled ...
206 	 */
207 	ret = wait_for_completion_state(&kmod_req->first_req_done,
208 					TASK_UNINTERRUPTIBLE | TASK_KILLABLE);
209 	if (ret) {
210 		*dup_ret = ret;
211 		return true;
212 	}
213 
214 	/* Now the duplicate request has the same exact return value as the first request */
215 	*dup_ret = kmod_req->dup_ret;
216 
217 	return true;
218 }
219 
220 void kmod_dup_request_announce(char *module_name, int ret)
221 {
222 	struct kmod_dup_req *kmod_req;
223 
224 	mutex_lock(&kmod_dup_mutex);
225 
226 	kmod_req = kmod_dup_request_lookup(module_name);
227 	if (!kmod_req)
228 		goto out;
229 
230 	kmod_req->dup_ret = ret;
231 
232 	/*
233 	 * If we complete() here we may allow duplicate threads
234 	 * to continue before the first one that submitted the
235 	 * request. We're in no rush also, given that each and
236 	 * every bounce back to userspace is slow we avoid that
237 	 * with a slight delay here. So queueue up the completion
238 	 * and let duplicates suffer, just wait a tad bit longer.
239 	 * There is no rush. But we also don't want to hold the
240 	 * caller up forever or introduce any boot delays.
241 	 */
242 	queue_work(system_wq, &kmod_req->complete_work);
243 
244 out:
245 	mutex_unlock(&kmod_dup_mutex);
246 }
247