xref: /linux-6.15/net/devlink/core.c (revision 6ef8f7da)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2016 Jiri Pirko <[email protected]>
5  */
6 
7 #include <net/genetlink.h>
8 
9 #include "devl_internal.h"
10 
11 DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
12 
13 void *devlink_priv(struct devlink *devlink)
14 {
15 	return &devlink->priv;
16 }
17 EXPORT_SYMBOL_GPL(devlink_priv);
18 
19 struct devlink *priv_to_devlink(void *priv)
20 {
21 	return container_of(priv, struct devlink, priv);
22 }
23 EXPORT_SYMBOL_GPL(priv_to_devlink);
24 
25 struct device *devlink_to_dev(const struct devlink *devlink)
26 {
27 	return devlink->dev;
28 }
29 EXPORT_SYMBOL_GPL(devlink_to_dev);
30 
31 struct net *devlink_net(const struct devlink *devlink)
32 {
33 	return read_pnet(&devlink->_net);
34 }
35 EXPORT_SYMBOL_GPL(devlink_net);
36 
37 void devl_assert_locked(struct devlink *devlink)
38 {
39 	lockdep_assert_held(&devlink->lock);
40 }
41 EXPORT_SYMBOL_GPL(devl_assert_locked);
42 
43 #ifdef CONFIG_LOCKDEP
44 /* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
45 bool devl_lock_is_held(struct devlink *devlink)
46 {
47 	return lockdep_is_held(&devlink->lock);
48 }
49 EXPORT_SYMBOL_GPL(devl_lock_is_held);
50 #endif
51 
52 void devl_lock(struct devlink *devlink)
53 {
54 	mutex_lock(&devlink->lock);
55 }
56 EXPORT_SYMBOL_GPL(devl_lock);
57 
58 int devl_trylock(struct devlink *devlink)
59 {
60 	return mutex_trylock(&devlink->lock);
61 }
62 EXPORT_SYMBOL_GPL(devl_trylock);
63 
64 void devl_unlock(struct devlink *devlink)
65 {
66 	mutex_unlock(&devlink->lock);
67 }
68 EXPORT_SYMBOL_GPL(devl_unlock);
69 
70 /**
71  * devlink_try_get() - try to obtain a reference on a devlink instance
72  * @devlink: instance to reference
73  *
74  * Obtain a reference on a devlink instance. A reference on a devlink instance
75  * only implies that it's safe to take the instance lock. It does not imply
76  * that the instance is registered, use devl_is_registered() after taking
77  * the instance lock to check registration status.
78  */
79 struct devlink *__must_check devlink_try_get(struct devlink *devlink)
80 {
81 	if (refcount_inc_not_zero(&devlink->refcount))
82 		return devlink;
83 	return NULL;
84 }
85 
86 void devlink_put(struct devlink *devlink)
87 {
88 	if (refcount_dec_and_test(&devlink->refcount))
89 		kfree_rcu(devlink, rcu);
90 }
91 
92 struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp)
93 {
94 	struct devlink *devlink = NULL;
95 
96 	rcu_read_lock();
97 retry:
98 	devlink = xa_find(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
99 	if (!devlink)
100 		goto unlock;
101 
102 	if (!devlink_try_get(devlink))
103 		goto next;
104 	if (!net_eq(devlink_net(devlink), net)) {
105 		devlink_put(devlink);
106 		goto next;
107 	}
108 unlock:
109 	rcu_read_unlock();
110 	return devlink;
111 
112 next:
113 	(*indexp)++;
114 	goto retry;
115 }
116 
117 /**
118  *	devlink_set_features - Set devlink supported features
119  *
120  *	@devlink: devlink
121  *	@features: devlink support features
122  *
123  *	This interface allows us to set reload ops separatelly from
124  *	the devlink_alloc.
125  */
126 void devlink_set_features(struct devlink *devlink, u64 features)
127 {
128 	WARN_ON(features & DEVLINK_F_RELOAD &&
129 		!devlink_reload_supported(devlink->ops));
130 	devlink->features = features;
131 }
132 EXPORT_SYMBOL_GPL(devlink_set_features);
133 
134 /**
135  * devl_register - Register devlink instance
136  * @devlink: devlink
137  */
138 int devl_register(struct devlink *devlink)
139 {
140 	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
141 	devl_assert_locked(devlink);
142 
143 	xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
144 	devlink_notify_register(devlink);
145 
146 	return 0;
147 }
148 EXPORT_SYMBOL_GPL(devl_register);
149 
150 void devlink_register(struct devlink *devlink)
151 {
152 	devl_lock(devlink);
153 	devl_register(devlink);
154 	devl_unlock(devlink);
155 }
156 EXPORT_SYMBOL_GPL(devlink_register);
157 
158 /**
159  * devl_unregister - Unregister devlink instance
160  * @devlink: devlink
161  */
162 void devl_unregister(struct devlink *devlink)
163 {
164 	ASSERT_DEVLINK_REGISTERED(devlink);
165 	devl_assert_locked(devlink);
166 
167 	devlink_notify_unregister(devlink);
168 	xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
169 }
170 EXPORT_SYMBOL_GPL(devl_unregister);
171 
172 void devlink_unregister(struct devlink *devlink)
173 {
174 	devl_lock(devlink);
175 	devl_unregister(devlink);
176 	devl_unlock(devlink);
177 }
178 EXPORT_SYMBOL_GPL(devlink_unregister);
179 
180 /**
181  *	devlink_alloc_ns - Allocate new devlink instance resources
182  *	in specific namespace
183  *
184  *	@ops: ops
185  *	@priv_size: size of user private data
186  *	@net: net namespace
187  *	@dev: parent device
188  *
189  *	Allocate new devlink instance resources, including devlink index
190  *	and name.
191  */
192 struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
193 				 size_t priv_size, struct net *net,
194 				 struct device *dev)
195 {
196 	struct devlink *devlink;
197 	static u32 last_id;
198 	int ret;
199 
200 	WARN_ON(!ops || !dev);
201 	if (!devlink_reload_actions_valid(ops))
202 		return NULL;
203 
204 	devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
205 	if (!devlink)
206 		return NULL;
207 
208 	ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
209 			      &last_id, GFP_KERNEL);
210 	if (ret < 0)
211 		goto err_xa_alloc;
212 
213 	devlink->netdevice_nb.notifier_call = devlink_port_netdevice_event;
214 	ret = register_netdevice_notifier_net(net, &devlink->netdevice_nb);
215 	if (ret)
216 		goto err_register_netdevice_notifier;
217 
218 	devlink->dev = dev;
219 	devlink->ops = ops;
220 	xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
221 	xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
222 	write_pnet(&devlink->_net, net);
223 	INIT_LIST_HEAD(&devlink->rate_list);
224 	INIT_LIST_HEAD(&devlink->linecard_list);
225 	INIT_LIST_HEAD(&devlink->sb_list);
226 	INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
227 	INIT_LIST_HEAD(&devlink->resource_list);
228 	INIT_LIST_HEAD(&devlink->param_list);
229 	INIT_LIST_HEAD(&devlink->region_list);
230 	INIT_LIST_HEAD(&devlink->reporter_list);
231 	INIT_LIST_HEAD(&devlink->trap_list);
232 	INIT_LIST_HEAD(&devlink->trap_group_list);
233 	INIT_LIST_HEAD(&devlink->trap_policer_list);
234 	lockdep_register_key(&devlink->lock_key);
235 	mutex_init(&devlink->lock);
236 	lockdep_set_class(&devlink->lock, &devlink->lock_key);
237 	mutex_init(&devlink->reporters_lock);
238 	mutex_init(&devlink->linecards_lock);
239 	refcount_set(&devlink->refcount, 1);
240 
241 	return devlink;
242 
243 err_register_netdevice_notifier:
244 	xa_erase(&devlinks, devlink->index);
245 err_xa_alloc:
246 	kfree(devlink);
247 	return NULL;
248 }
249 EXPORT_SYMBOL_GPL(devlink_alloc_ns);
250 
251 /**
252  *	devlink_free - Free devlink instance resources
253  *
254  *	@devlink: devlink
255  */
256 void devlink_free(struct devlink *devlink)
257 {
258 	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
259 
260 	mutex_destroy(&devlink->linecards_lock);
261 	mutex_destroy(&devlink->reporters_lock);
262 	mutex_destroy(&devlink->lock);
263 	lockdep_unregister_key(&devlink->lock_key);
264 	WARN_ON(!list_empty(&devlink->trap_policer_list));
265 	WARN_ON(!list_empty(&devlink->trap_group_list));
266 	WARN_ON(!list_empty(&devlink->trap_list));
267 	WARN_ON(!list_empty(&devlink->reporter_list));
268 	WARN_ON(!list_empty(&devlink->region_list));
269 	WARN_ON(!list_empty(&devlink->param_list));
270 	WARN_ON(!list_empty(&devlink->resource_list));
271 	WARN_ON(!list_empty(&devlink->dpipe_table_list));
272 	WARN_ON(!list_empty(&devlink->sb_list));
273 	WARN_ON(!list_empty(&devlink->rate_list));
274 	WARN_ON(!list_empty(&devlink->linecard_list));
275 	WARN_ON(!xa_empty(&devlink->ports));
276 
277 	xa_destroy(&devlink->snapshot_ids);
278 	xa_destroy(&devlink->ports);
279 
280 	WARN_ON_ONCE(unregister_netdevice_notifier_net(devlink_net(devlink),
281 						       &devlink->netdevice_nb));
282 
283 	xa_erase(&devlinks, devlink->index);
284 
285 	devlink_put(devlink);
286 }
287 EXPORT_SYMBOL_GPL(devlink_free);
288 
289 static void __net_exit devlink_pernet_pre_exit(struct net *net)
290 {
291 	struct devlink *devlink;
292 	u32 actions_performed;
293 	unsigned long index;
294 	int err;
295 
296 	/* In case network namespace is getting destroyed, reload
297 	 * all devlink instances from this namespace into init_net.
298 	 */
299 	devlinks_xa_for_each_registered_get(net, index, devlink) {
300 		WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
301 		devl_lock(devlink);
302 		err = 0;
303 		if (devl_is_registered(devlink))
304 			err = devlink_reload(devlink, &init_net,
305 					     DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
306 					     DEVLINK_RELOAD_LIMIT_UNSPEC,
307 					     &actions_performed, NULL);
308 		devl_unlock(devlink);
309 		devlink_put(devlink);
310 
311 		if (err && err != -EOPNOTSUPP)
312 			pr_warn("Failed to reload devlink instance into init_net\n");
313 	}
314 }
315 
316 static struct pernet_operations devlink_pernet_ops __net_initdata = {
317 	.pre_exit = devlink_pernet_pre_exit,
318 };
319 
320 static int __init devlink_init(void)
321 {
322 	int err;
323 
324 	err = genl_register_family(&devlink_nl_family);
325 	if (err)
326 		goto out;
327 	err = register_pernet_subsys(&devlink_pernet_ops);
328 
329 out:
330 	WARN_ON(err);
331 	return err;
332 }
333 
334 subsys_initcall(devlink_init);
335