xref: /linux-6.15/net/switchdev/switchdev.c (revision 91cf8ece)
1007f790cSJiri Pirko /*
2007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
37ea6eb3fSJiri Pirko  * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
4f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
5007f790cSJiri Pirko  *
6007f790cSJiri Pirko  * This program is free software; you can redistribute it and/or modify
7007f790cSJiri Pirko  * it under the terms of the GNU General Public License as published by
8007f790cSJiri Pirko  * the Free Software Foundation; either version 2 of the License, or
9007f790cSJiri Pirko  * (at your option) any later version.
10007f790cSJiri Pirko  */
11007f790cSJiri Pirko 
12007f790cSJiri Pirko #include <linux/kernel.h>
13007f790cSJiri Pirko #include <linux/types.h>
14007f790cSJiri Pirko #include <linux/init.h>
1503bf0c28SJiri Pirko #include <linux/mutex.h>
1603bf0c28SJiri Pirko #include <linux/notifier.h>
17007f790cSJiri Pirko #include <linux/netdevice.h>
18850d0cbcSJiri Pirko #include <linux/etherdevice.h>
1947f8328bSScott Feldman #include <linux/if_bridge.h>
207ea6eb3fSJiri Pirko #include <linux/list.h>
21793f4014SJiri Pirko #include <linux/workqueue.h>
2287aaf2caSNikolay Aleksandrov #include <linux/if_vlan.h>
234f2c6ae5SIdo Schimmel #include <linux/rtnetlink.h>
24007f790cSJiri Pirko #include <net/switchdev.h>
25007f790cSJiri Pirko 
26793f4014SJiri Pirko static LIST_HEAD(deferred);
27793f4014SJiri Pirko static DEFINE_SPINLOCK(deferred_lock);
28793f4014SJiri Pirko 
29793f4014SJiri Pirko typedef void switchdev_deferred_func_t(struct net_device *dev,
30793f4014SJiri Pirko 				       const void *data);
31793f4014SJiri Pirko 
32793f4014SJiri Pirko struct switchdev_deferred_item {
33793f4014SJiri Pirko 	struct list_head list;
34793f4014SJiri Pirko 	struct net_device *dev;
35793f4014SJiri Pirko 	switchdev_deferred_func_t *func;
36793f4014SJiri Pirko 	unsigned long data[0];
37793f4014SJiri Pirko };
38793f4014SJiri Pirko 
39793f4014SJiri Pirko static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
40793f4014SJiri Pirko {
41793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
42793f4014SJiri Pirko 
43793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
44793f4014SJiri Pirko 	if (list_empty(&deferred)) {
45793f4014SJiri Pirko 		dfitem = NULL;
46793f4014SJiri Pirko 		goto unlock;
47793f4014SJiri Pirko 	}
48793f4014SJiri Pirko 	dfitem = list_first_entry(&deferred,
49793f4014SJiri Pirko 				  struct switchdev_deferred_item, list);
50793f4014SJiri Pirko 	list_del(&dfitem->list);
51793f4014SJiri Pirko unlock:
52793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
53793f4014SJiri Pirko 	return dfitem;
54793f4014SJiri Pirko }
55793f4014SJiri Pirko 
56793f4014SJiri Pirko /**
57793f4014SJiri Pirko  *	switchdev_deferred_process - Process ops in deferred queue
58793f4014SJiri Pirko  *
59793f4014SJiri Pirko  *	Called to flush the ops currently queued in deferred ops queue.
60793f4014SJiri Pirko  *	rtnl_lock must be held.
61793f4014SJiri Pirko  */
62793f4014SJiri Pirko void switchdev_deferred_process(void)
63793f4014SJiri Pirko {
64793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
65793f4014SJiri Pirko 
66793f4014SJiri Pirko 	ASSERT_RTNL();
67793f4014SJiri Pirko 
68793f4014SJiri Pirko 	while ((dfitem = switchdev_deferred_dequeue())) {
69793f4014SJiri Pirko 		dfitem->func(dfitem->dev, dfitem->data);
70793f4014SJiri Pirko 		dev_put(dfitem->dev);
71793f4014SJiri Pirko 		kfree(dfitem);
72793f4014SJiri Pirko 	}
73793f4014SJiri Pirko }
74793f4014SJiri Pirko EXPORT_SYMBOL_GPL(switchdev_deferred_process);
75793f4014SJiri Pirko 
76793f4014SJiri Pirko static void switchdev_deferred_process_work(struct work_struct *work)
77793f4014SJiri Pirko {
78793f4014SJiri Pirko 	rtnl_lock();
79793f4014SJiri Pirko 	switchdev_deferred_process();
80793f4014SJiri Pirko 	rtnl_unlock();
81793f4014SJiri Pirko }
82793f4014SJiri Pirko 
83793f4014SJiri Pirko static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
84793f4014SJiri Pirko 
85793f4014SJiri Pirko static int switchdev_deferred_enqueue(struct net_device *dev,
86793f4014SJiri Pirko 				      const void *data, size_t data_len,
87793f4014SJiri Pirko 				      switchdev_deferred_func_t *func)
88793f4014SJiri Pirko {
89793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
90793f4014SJiri Pirko 
91793f4014SJiri Pirko 	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
92793f4014SJiri Pirko 	if (!dfitem)
93793f4014SJiri Pirko 		return -ENOMEM;
94793f4014SJiri Pirko 	dfitem->dev = dev;
95793f4014SJiri Pirko 	dfitem->func = func;
96793f4014SJiri Pirko 	memcpy(dfitem->data, data, data_len);
97793f4014SJiri Pirko 	dev_hold(dev);
98793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
99793f4014SJiri Pirko 	list_add_tail(&dfitem->list, &deferred);
100793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
101793f4014SJiri Pirko 	schedule_work(&deferred_process_work);
102793f4014SJiri Pirko 	return 0;
103793f4014SJiri Pirko }
104793f4014SJiri Pirko 
105d45224d6SFlorian Fainelli static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
106d45224d6SFlorian Fainelli 				      struct net_device *dev,
107f7fadf30SJiri Pirko 				      const struct switchdev_attr *attr,
1087ea6eb3fSJiri Pirko 				      struct switchdev_trans *trans)
1093094333dSScott Feldman {
110d45224d6SFlorian Fainelli 	int err;
111d45224d6SFlorian Fainelli 	int rc;
1123094333dSScott Feldman 
113d45224d6SFlorian Fainelli 	struct switchdev_notifier_port_attr_info attr_info = {
114d45224d6SFlorian Fainelli 		.attr = attr,
115d45224d6SFlorian Fainelli 		.trans = trans,
116d45224d6SFlorian Fainelli 		.handled = false,
117d45224d6SFlorian Fainelli 	};
1183094333dSScott Feldman 
119d45224d6SFlorian Fainelli 	rc = call_switchdev_blocking_notifiers(nt, dev,
120d45224d6SFlorian Fainelli 					       &attr_info.info, NULL);
121d45224d6SFlorian Fainelli 	err = notifier_to_errno(rc);
122d45224d6SFlorian Fainelli 	if (err) {
123d45224d6SFlorian Fainelli 		WARN_ON(!attr_info.handled);
1243094333dSScott Feldman 		return err;
1253094333dSScott Feldman 	}
1263094333dSScott Feldman 
127d45224d6SFlorian Fainelli 	if (!attr_info.handled)
128d45224d6SFlorian Fainelli 		return -EOPNOTSUPP;
129d45224d6SFlorian Fainelli 
130d45224d6SFlorian Fainelli 	return 0;
131d45224d6SFlorian Fainelli }
132d45224d6SFlorian Fainelli 
1330bc05d58SJiri Pirko static int switchdev_port_attr_set_now(struct net_device *dev,
134f7fadf30SJiri Pirko 				       const struct switchdev_attr *attr)
1353094333dSScott Feldman {
1367ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
1373094333dSScott Feldman 	int err;
1383094333dSScott Feldman 
1393094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
1403094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
1413094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
1423094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
1433094333dSScott Feldman 	 * but should not commit the attr.
1443094333dSScott Feldman 	 */
1453094333dSScott Feldman 
146f623ab7fSJiri Pirko 	trans.ph_prepare = true;
147d45224d6SFlorian Fainelli 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
148d45224d6SFlorian Fainelli 					 &trans);
149*91cf8eceSFlorian Fainelli 	if (err)
1503094333dSScott Feldman 		return err;
1513094333dSScott Feldman 
1523094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
1533094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
1543094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
1553094333dSScott Feldman 	 */
1563094333dSScott Feldman 
157f623ab7fSJiri Pirko 	trans.ph_prepare = false;
158d45224d6SFlorian Fainelli 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
159d45224d6SFlorian Fainelli 					 &trans);
160e9fdaec0SScott Feldman 	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
161e9fdaec0SScott Feldman 	     dev->name, attr->id);
1623094333dSScott Feldman 
1633094333dSScott Feldman 	return err;
1643094333dSScott Feldman }
1650bc05d58SJiri Pirko 
1660bc05d58SJiri Pirko static void switchdev_port_attr_set_deferred(struct net_device *dev,
1670bc05d58SJiri Pirko 					     const void *data)
1680bc05d58SJiri Pirko {
1690bc05d58SJiri Pirko 	const struct switchdev_attr *attr = data;
1700bc05d58SJiri Pirko 	int err;
1710bc05d58SJiri Pirko 
1720bc05d58SJiri Pirko 	err = switchdev_port_attr_set_now(dev, attr);
1730bc05d58SJiri Pirko 	if (err && err != -EOPNOTSUPP)
1740bc05d58SJiri Pirko 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
1750bc05d58SJiri Pirko 			   err, attr->id);
1767ceb2afbSElad Raz 	if (attr->complete)
1777ceb2afbSElad Raz 		attr->complete(dev, err, attr->complete_priv);
1780bc05d58SJiri Pirko }
1790bc05d58SJiri Pirko 
1800bc05d58SJiri Pirko static int switchdev_port_attr_set_defer(struct net_device *dev,
1810bc05d58SJiri Pirko 					 const struct switchdev_attr *attr)
1820bc05d58SJiri Pirko {
1830bc05d58SJiri Pirko 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
1840bc05d58SJiri Pirko 					  switchdev_port_attr_set_deferred);
1850bc05d58SJiri Pirko }
1860bc05d58SJiri Pirko 
1870bc05d58SJiri Pirko /**
1880bc05d58SJiri Pirko  *	switchdev_port_attr_set - Set port attribute
1890bc05d58SJiri Pirko  *
1900bc05d58SJiri Pirko  *	@dev: port device
1910bc05d58SJiri Pirko  *	@attr: attribute to set
1920bc05d58SJiri Pirko  *
1930bc05d58SJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
1940bc05d58SJiri Pirko  *	system is not left in a partially updated state due to
1950bc05d58SJiri Pirko  *	failure from driver/device.
1960bc05d58SJiri Pirko  *
1970bc05d58SJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
1980bc05d58SJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
1990bc05d58SJiri Pirko  */
2000bc05d58SJiri Pirko int switchdev_port_attr_set(struct net_device *dev,
2010bc05d58SJiri Pirko 			    const struct switchdev_attr *attr)
2020bc05d58SJiri Pirko {
2030bc05d58SJiri Pirko 	if (attr->flags & SWITCHDEV_F_DEFER)
2040bc05d58SJiri Pirko 		return switchdev_port_attr_set_defer(dev, attr);
2050bc05d58SJiri Pirko 	ASSERT_RTNL();
2060bc05d58SJiri Pirko 	return switchdev_port_attr_set_now(dev, attr);
2070bc05d58SJiri Pirko }
2083094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
2093094333dSScott Feldman 
210e258d919SScott Feldman static size_t switchdev_obj_size(const struct switchdev_obj *obj)
211e258d919SScott Feldman {
212e258d919SScott Feldman 	switch (obj->id) {
213e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
214e258d919SScott Feldman 		return sizeof(struct switchdev_obj_port_vlan);
2154d41e125SElad Raz 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2164d41e125SElad Raz 		return sizeof(struct switchdev_obj_port_mdb);
21747d5b6dbSAndrew Lunn 	case SWITCHDEV_OBJ_ID_HOST_MDB:
21847d5b6dbSAndrew Lunn 		return sizeof(struct switchdev_obj_port_mdb);
219e258d919SScott Feldman 	default:
220e258d919SScott Feldman 		BUG();
221e258d919SScott Feldman 	}
222e258d919SScott Feldman 	return 0;
223e258d919SScott Feldman }
224e258d919SScott Feldman 
225d17d9f5eSPetr Machata static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
226d17d9f5eSPetr Machata 				     struct net_device *dev,
227648b4a99SJiri Pirko 				     const struct switchdev_obj *obj,
22869b7320eSPetr Machata 				     struct switchdev_trans *trans,
22969b7320eSPetr Machata 				     struct netlink_ext_ack *extack)
230491d0f15SScott Feldman {
231d17d9f5eSPetr Machata 	int rc;
232d17d9f5eSPetr Machata 	int err;
233491d0f15SScott Feldman 
234d17d9f5eSPetr Machata 	struct switchdev_notifier_port_obj_info obj_info = {
235d17d9f5eSPetr Machata 		.obj = obj,
236d17d9f5eSPetr Machata 		.trans = trans,
237d17d9f5eSPetr Machata 		.handled = false,
238d17d9f5eSPetr Machata 	};
239491d0f15SScott Feldman 
240479c86dcSPetr Machata 	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
241d17d9f5eSPetr Machata 	err = notifier_to_errno(rc);
242d17d9f5eSPetr Machata 	if (err) {
243d17d9f5eSPetr Machata 		WARN_ON(!obj_info.handled);
244491d0f15SScott Feldman 		return err;
245491d0f15SScott Feldman 	}
246d17d9f5eSPetr Machata 	if (!obj_info.handled)
247d17d9f5eSPetr Machata 		return -EOPNOTSUPP;
248d17d9f5eSPetr Machata 	return 0;
249d17d9f5eSPetr Machata }
250491d0f15SScott Feldman 
2514d429c5dSJiri Pirko static int switchdev_port_obj_add_now(struct net_device *dev,
25269b7320eSPetr Machata 				      const struct switchdev_obj *obj,
25369b7320eSPetr Machata 				      struct netlink_ext_ack *extack)
254491d0f15SScott Feldman {
2557ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
256491d0f15SScott Feldman 	int err;
257491d0f15SScott Feldman 
258491d0f15SScott Feldman 	ASSERT_RTNL();
259491d0f15SScott Feldman 
260491d0f15SScott Feldman 	/* Phase I: prepare for obj add. Driver/device should fail
261491d0f15SScott Feldman 	 * here if there are going to be issues in the commit phase,
262491d0f15SScott Feldman 	 * such as lack of resources or support.  The driver/device
263491d0f15SScott Feldman 	 * should reserve resources needed for the commit phase here,
264491d0f15SScott Feldman 	 * but should not commit the obj.
265491d0f15SScott Feldman 	 */
266491d0f15SScott Feldman 
267f623ab7fSJiri Pirko 	trans.ph_prepare = true;
268d17d9f5eSPetr Machata 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
26969b7320eSPetr Machata 					dev, obj, &trans, extack);
270*91cf8eceSFlorian Fainelli 	if (err)
271491d0f15SScott Feldman 		return err;
272491d0f15SScott Feldman 
273491d0f15SScott Feldman 	/* Phase II: commit obj add.  This cannot fail as a fault
274491d0f15SScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
275491d0f15SScott Feldman 	 * because the driver said everythings was OK in phase I.
276491d0f15SScott Feldman 	 */
277491d0f15SScott Feldman 
278f623ab7fSJiri Pirko 	trans.ph_prepare = false;
279d17d9f5eSPetr Machata 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
28069b7320eSPetr Machata 					dev, obj, &trans, extack);
2819e8f4a54SJiri Pirko 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
282491d0f15SScott Feldman 
283491d0f15SScott Feldman 	return err;
284491d0f15SScott Feldman }
2854d429c5dSJiri Pirko 
2864d429c5dSJiri Pirko static void switchdev_port_obj_add_deferred(struct net_device *dev,
2874d429c5dSJiri Pirko 					    const void *data)
2884d429c5dSJiri Pirko {
2894d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
2904d429c5dSJiri Pirko 	int err;
2914d429c5dSJiri Pirko 
29269b7320eSPetr Machata 	err = switchdev_port_obj_add_now(dev, obj, NULL);
2934d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
2944d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
2954d429c5dSJiri Pirko 			   err, obj->id);
2967ceb2afbSElad Raz 	if (obj->complete)
2977ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
2984d429c5dSJiri Pirko }
2994d429c5dSJiri Pirko 
3004d429c5dSJiri Pirko static int switchdev_port_obj_add_defer(struct net_device *dev,
3014d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
3024d429c5dSJiri Pirko {
303e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
3044d429c5dSJiri Pirko 					  switchdev_port_obj_add_deferred);
3054d429c5dSJiri Pirko }
306491d0f15SScott Feldman 
307491d0f15SScott Feldman /**
3084d429c5dSJiri Pirko  *	switchdev_port_obj_add - Add port object
309491d0f15SScott Feldman  *
310491d0f15SScott Feldman  *	@dev: port device
311ab069002SVivien Didelot  *	@id: object ID
3124d429c5dSJiri Pirko  *	@obj: object to add
3134d429c5dSJiri Pirko  *
3144d429c5dSJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
3154d429c5dSJiri Pirko  *	system is not left in a partially updated state due to
3164d429c5dSJiri Pirko  *	failure from driver/device.
3174d429c5dSJiri Pirko  *
3184d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
3194d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
320491d0f15SScott Feldman  */
3214d429c5dSJiri Pirko int switchdev_port_obj_add(struct net_device *dev,
32269b7320eSPetr Machata 			   const struct switchdev_obj *obj,
32369b7320eSPetr Machata 			   struct netlink_ext_ack *extack)
3244d429c5dSJiri Pirko {
3254d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
3264d429c5dSJiri Pirko 		return switchdev_port_obj_add_defer(dev, obj);
3274d429c5dSJiri Pirko 	ASSERT_RTNL();
32869b7320eSPetr Machata 	return switchdev_port_obj_add_now(dev, obj, extack);
3294d429c5dSJiri Pirko }
3304d429c5dSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
3314d429c5dSJiri Pirko 
3324d429c5dSJiri Pirko static int switchdev_port_obj_del_now(struct net_device *dev,
333648b4a99SJiri Pirko 				      const struct switchdev_obj *obj)
334491d0f15SScott Feldman {
335d17d9f5eSPetr Machata 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
33669b7320eSPetr Machata 					 dev, obj, NULL, NULL);
337491d0f15SScott Feldman }
3384d429c5dSJiri Pirko 
3394d429c5dSJiri Pirko static void switchdev_port_obj_del_deferred(struct net_device *dev,
3404d429c5dSJiri Pirko 					    const void *data)
3414d429c5dSJiri Pirko {
3424d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
3434d429c5dSJiri Pirko 	int err;
3444d429c5dSJiri Pirko 
3454d429c5dSJiri Pirko 	err = switchdev_port_obj_del_now(dev, obj);
3464d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
3474d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
3484d429c5dSJiri Pirko 			   err, obj->id);
3497ceb2afbSElad Raz 	if (obj->complete)
3507ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
3514d429c5dSJiri Pirko }
3524d429c5dSJiri Pirko 
3534d429c5dSJiri Pirko static int switchdev_port_obj_del_defer(struct net_device *dev,
3544d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
3554d429c5dSJiri Pirko {
356e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
3574d429c5dSJiri Pirko 					  switchdev_port_obj_del_deferred);
3584d429c5dSJiri Pirko }
3594d429c5dSJiri Pirko 
3604d429c5dSJiri Pirko /**
3614d429c5dSJiri Pirko  *	switchdev_port_obj_del - Delete port object
3624d429c5dSJiri Pirko  *
3634d429c5dSJiri Pirko  *	@dev: port device
3644d429c5dSJiri Pirko  *	@id: object ID
3654d429c5dSJiri Pirko  *	@obj: object to delete
3664d429c5dSJiri Pirko  *
3674d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
3684d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
3694d429c5dSJiri Pirko  */
3704d429c5dSJiri Pirko int switchdev_port_obj_del(struct net_device *dev,
3714d429c5dSJiri Pirko 			   const struct switchdev_obj *obj)
3724d429c5dSJiri Pirko {
3734d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
3744d429c5dSJiri Pirko 		return switchdev_port_obj_del_defer(dev, obj);
3754d429c5dSJiri Pirko 	ASSERT_RTNL();
3764d429c5dSJiri Pirko 	return switchdev_port_obj_del_now(dev, obj);
3774d429c5dSJiri Pirko }
378491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
379491d0f15SScott Feldman 
380ff5cf100SArkadi Sharshevsky static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
381a93e3b17SPetr Machata static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
38203bf0c28SJiri Pirko 
38303bf0c28SJiri Pirko /**
384ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
38503bf0c28SJiri Pirko  *	@nb: notifier_block
38603bf0c28SJiri Pirko  *
387ff5cf100SArkadi Sharshevsky  *	Register switch device notifier.
38803bf0c28SJiri Pirko  */
389ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
39003bf0c28SJiri Pirko {
391ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
39203bf0c28SJiri Pirko }
393ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
39403bf0c28SJiri Pirko 
39503bf0c28SJiri Pirko /**
396ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
39703bf0c28SJiri Pirko  *	@nb: notifier_block
39803bf0c28SJiri Pirko  *
39903bf0c28SJiri Pirko  *	Unregister switch device notifier.
40003bf0c28SJiri Pirko  */
401ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
40203bf0c28SJiri Pirko {
403ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
40403bf0c28SJiri Pirko }
405ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
40603bf0c28SJiri Pirko 
40703bf0c28SJiri Pirko /**
408ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
40903bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
41003bf0c28SJiri Pirko  *	@dev: port device
41103bf0c28SJiri Pirko  *	@info: notifier information data
41203bf0c28SJiri Pirko  *
413ff5cf100SArkadi Sharshevsky  *	Call all network notifier blocks.
41403bf0c28SJiri Pirko  */
415ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
4166685987cSPetr Machata 			     struct switchdev_notifier_info *info,
4176685987cSPetr Machata 			     struct netlink_ext_ack *extack)
41803bf0c28SJiri Pirko {
41903bf0c28SJiri Pirko 	info->dev = dev;
4206685987cSPetr Machata 	info->extack = extack;
421ff5cf100SArkadi Sharshevsky 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
42203bf0c28SJiri Pirko }
423ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
4248a44dbb2SRoopa Prabhu 
425a93e3b17SPetr Machata int register_switchdev_blocking_notifier(struct notifier_block *nb)
426a93e3b17SPetr Machata {
427a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
428a93e3b17SPetr Machata 
429a93e3b17SPetr Machata 	return blocking_notifier_chain_register(chain, nb);
430a93e3b17SPetr Machata }
431a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
432a93e3b17SPetr Machata 
433a93e3b17SPetr Machata int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
434a93e3b17SPetr Machata {
435a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
436a93e3b17SPetr Machata 
437a93e3b17SPetr Machata 	return blocking_notifier_chain_unregister(chain, nb);
438a93e3b17SPetr Machata }
439a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
440a93e3b17SPetr Machata 
441a93e3b17SPetr Machata int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
442479c86dcSPetr Machata 				      struct switchdev_notifier_info *info,
443479c86dcSPetr Machata 				      struct netlink_ext_ack *extack)
444a93e3b17SPetr Machata {
445a93e3b17SPetr Machata 	info->dev = dev;
446479c86dcSPetr Machata 	info->extack = extack;
447a93e3b17SPetr Machata 	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
448a93e3b17SPetr Machata 					    val, info);
449a93e3b17SPetr Machata }
450a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
451a93e3b17SPetr Machata 
452f30f0601SPetr Machata static int __switchdev_handle_port_obj_add(struct net_device *dev,
453f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
454f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
455f30f0601SPetr Machata 			int (*add_cb)(struct net_device *dev,
456f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
45769213513SPetr Machata 				      struct switchdev_trans *trans,
45869213513SPetr Machata 				      struct netlink_ext_ack *extack))
459f30f0601SPetr Machata {
46069213513SPetr Machata 	struct netlink_ext_ack *extack;
461f30f0601SPetr Machata 	struct net_device *lower_dev;
462f30f0601SPetr Machata 	struct list_head *iter;
463f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
464f30f0601SPetr Machata 
46569213513SPetr Machata 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
46669213513SPetr Machata 
467f30f0601SPetr Machata 	if (check_cb(dev)) {
468f30f0601SPetr Machata 		/* This flag is only checked if the return value is success. */
469f30f0601SPetr Machata 		port_obj_info->handled = true;
47069213513SPetr Machata 		return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
47169213513SPetr Machata 			      extack);
472f30f0601SPetr Machata 	}
473f30f0601SPetr Machata 
474f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
475f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
476f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
477f30f0601SPetr Machata 	 *
478f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
479f30f0601SPetr Machata 	 * necessary to go through this helper.
480f30f0601SPetr Machata 	 */
481f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
482f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
483f30f0601SPetr Machata 						      check_cb, add_cb);
484f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
485f30f0601SPetr Machata 			return err;
486f30f0601SPetr Machata 	}
487f30f0601SPetr Machata 
488f30f0601SPetr Machata 	return err;
489f30f0601SPetr Machata }
490f30f0601SPetr Machata 
491f30f0601SPetr Machata int switchdev_handle_port_obj_add(struct net_device *dev,
492f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
493f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
494f30f0601SPetr Machata 			int (*add_cb)(struct net_device *dev,
495f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
49669213513SPetr Machata 				      struct switchdev_trans *trans,
49769213513SPetr Machata 				      struct netlink_ext_ack *extack))
498f30f0601SPetr Machata {
499f30f0601SPetr Machata 	int err;
500f30f0601SPetr Machata 
501f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
502f30f0601SPetr Machata 					      add_cb);
503f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
504f30f0601SPetr Machata 		err = 0;
505f30f0601SPetr Machata 	return err;
506f30f0601SPetr Machata }
507f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
508f30f0601SPetr Machata 
509f30f0601SPetr Machata static int __switchdev_handle_port_obj_del(struct net_device *dev,
510f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
511f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
512f30f0601SPetr Machata 			int (*del_cb)(struct net_device *dev,
513f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
514f30f0601SPetr Machata {
515f30f0601SPetr Machata 	struct net_device *lower_dev;
516f30f0601SPetr Machata 	struct list_head *iter;
517f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
518f30f0601SPetr Machata 
519f30f0601SPetr Machata 	if (check_cb(dev)) {
520f30f0601SPetr Machata 		/* This flag is only checked if the return value is success. */
521f30f0601SPetr Machata 		port_obj_info->handled = true;
522f30f0601SPetr Machata 		return del_cb(dev, port_obj_info->obj);
523f30f0601SPetr Machata 	}
524f30f0601SPetr Machata 
525f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
526f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
527f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
528f30f0601SPetr Machata 	 *
529f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
530f30f0601SPetr Machata 	 * necessary to go through this helper.
531f30f0601SPetr Machata 	 */
532f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
533f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
534f30f0601SPetr Machata 						      check_cb, del_cb);
535f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
536f30f0601SPetr Machata 			return err;
537f30f0601SPetr Machata 	}
538f30f0601SPetr Machata 
539f30f0601SPetr Machata 	return err;
540f30f0601SPetr Machata }
541f30f0601SPetr Machata 
542f30f0601SPetr Machata int switchdev_handle_port_obj_del(struct net_device *dev,
543f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
544f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
545f30f0601SPetr Machata 			int (*del_cb)(struct net_device *dev,
546f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
547f30f0601SPetr Machata {
548f30f0601SPetr Machata 	int err;
549f30f0601SPetr Machata 
550f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
551f30f0601SPetr Machata 					      del_cb);
552f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
553f30f0601SPetr Machata 		err = 0;
554f30f0601SPetr Machata 	return err;
555f30f0601SPetr Machata }
556f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
5571cb33af1SFlorian Fainelli 
5581cb33af1SFlorian Fainelli static int __switchdev_handle_port_attr_set(struct net_device *dev,
5591cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
5601cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
5611cb33af1SFlorian Fainelli 			int (*set_cb)(struct net_device *dev,
5621cb33af1SFlorian Fainelli 				      const struct switchdev_attr *attr,
5631cb33af1SFlorian Fainelli 				      struct switchdev_trans *trans))
5641cb33af1SFlorian Fainelli {
5651cb33af1SFlorian Fainelli 	struct net_device *lower_dev;
5661cb33af1SFlorian Fainelli 	struct list_head *iter;
5671cb33af1SFlorian Fainelli 	int err = -EOPNOTSUPP;
5681cb33af1SFlorian Fainelli 
5691cb33af1SFlorian Fainelli 	if (check_cb(dev)) {
5701cb33af1SFlorian Fainelli 		port_attr_info->handled = true;
5711cb33af1SFlorian Fainelli 		return set_cb(dev, port_attr_info->attr,
5721cb33af1SFlorian Fainelli 			      port_attr_info->trans);
5731cb33af1SFlorian Fainelli 	}
5741cb33af1SFlorian Fainelli 
5751cb33af1SFlorian Fainelli 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
5761cb33af1SFlorian Fainelli 	 * unsupported devices, another driver might be able to handle them. But
5771cb33af1SFlorian Fainelli 	 * propagate to the callers any hard errors.
5781cb33af1SFlorian Fainelli 	 *
5791cb33af1SFlorian Fainelli 	 * If the driver does its own bookkeeping of stacked ports, it's not
5801cb33af1SFlorian Fainelli 	 * necessary to go through this helper.
5811cb33af1SFlorian Fainelli 	 */
5821cb33af1SFlorian Fainelli 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
5831cb33af1SFlorian Fainelli 		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
5841cb33af1SFlorian Fainelli 						       check_cb, set_cb);
5851cb33af1SFlorian Fainelli 		if (err && err != -EOPNOTSUPP)
5861cb33af1SFlorian Fainelli 			return err;
5871cb33af1SFlorian Fainelli 	}
5881cb33af1SFlorian Fainelli 
5891cb33af1SFlorian Fainelli 	return err;
5901cb33af1SFlorian Fainelli }
5911cb33af1SFlorian Fainelli 
5921cb33af1SFlorian Fainelli int switchdev_handle_port_attr_set(struct net_device *dev,
5931cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
5941cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
5951cb33af1SFlorian Fainelli 			int (*set_cb)(struct net_device *dev,
5961cb33af1SFlorian Fainelli 				      const struct switchdev_attr *attr,
5971cb33af1SFlorian Fainelli 				      struct switchdev_trans *trans))
5981cb33af1SFlorian Fainelli {
5991cb33af1SFlorian Fainelli 	int err;
6001cb33af1SFlorian Fainelli 
6011cb33af1SFlorian Fainelli 	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
6021cb33af1SFlorian Fainelli 					       set_cb);
6031cb33af1SFlorian Fainelli 	if (err == -EOPNOTSUPP)
6041cb33af1SFlorian Fainelli 		err = 0;
6051cb33af1SFlorian Fainelli 	return err;
6061cb33af1SFlorian Fainelli }
6071cb33af1SFlorian Fainelli EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
608