xref: /linux-6.15/net/switchdev/switchdev.c (revision ea6754ae)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2007f790cSJiri Pirko /*
3007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
47ea6eb3fSJiri Pirko  * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
5f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
6007f790cSJiri Pirko  */
7007f790cSJiri Pirko 
8007f790cSJiri Pirko #include <linux/kernel.h>
9007f790cSJiri Pirko #include <linux/types.h>
10007f790cSJiri Pirko #include <linux/init.h>
1103bf0c28SJiri Pirko #include <linux/mutex.h>
1203bf0c28SJiri Pirko #include <linux/notifier.h>
13007f790cSJiri Pirko #include <linux/netdevice.h>
14850d0cbcSJiri Pirko #include <linux/etherdevice.h>
1547f8328bSScott Feldman #include <linux/if_bridge.h>
167ea6eb3fSJiri Pirko #include <linux/list.h>
17793f4014SJiri Pirko #include <linux/workqueue.h>
1887aaf2caSNikolay Aleksandrov #include <linux/if_vlan.h>
194f2c6ae5SIdo Schimmel #include <linux/rtnetlink.h>
20007f790cSJiri Pirko #include <net/switchdev.h>
21007f790cSJiri Pirko 
22793f4014SJiri Pirko static LIST_HEAD(deferred);
23793f4014SJiri Pirko static DEFINE_SPINLOCK(deferred_lock);
24793f4014SJiri Pirko 
25793f4014SJiri Pirko typedef void switchdev_deferred_func_t(struct net_device *dev,
26793f4014SJiri Pirko 				       const void *data);
27793f4014SJiri Pirko 
28793f4014SJiri Pirko struct switchdev_deferred_item {
29793f4014SJiri Pirko 	struct list_head list;
30793f4014SJiri Pirko 	struct net_device *dev;
31793f4014SJiri Pirko 	switchdev_deferred_func_t *func;
32fbfc8502SGustavo A. R. Silva 	unsigned long data[];
33793f4014SJiri Pirko };
34793f4014SJiri Pirko 
35793f4014SJiri Pirko static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
36793f4014SJiri Pirko {
37793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
38793f4014SJiri Pirko 
39793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
40793f4014SJiri Pirko 	if (list_empty(&deferred)) {
41793f4014SJiri Pirko 		dfitem = NULL;
42793f4014SJiri Pirko 		goto unlock;
43793f4014SJiri Pirko 	}
44793f4014SJiri Pirko 	dfitem = list_first_entry(&deferred,
45793f4014SJiri Pirko 				  struct switchdev_deferred_item, list);
46793f4014SJiri Pirko 	list_del(&dfitem->list);
47793f4014SJiri Pirko unlock:
48793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
49793f4014SJiri Pirko 	return dfitem;
50793f4014SJiri Pirko }
51793f4014SJiri Pirko 
52793f4014SJiri Pirko /**
53793f4014SJiri Pirko  *	switchdev_deferred_process - Process ops in deferred queue
54793f4014SJiri Pirko  *
55793f4014SJiri Pirko  *	Called to flush the ops currently queued in deferred ops queue.
56793f4014SJiri Pirko  *	rtnl_lock must be held.
57793f4014SJiri Pirko  */
58793f4014SJiri Pirko void switchdev_deferred_process(void)
59793f4014SJiri Pirko {
60793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
61793f4014SJiri Pirko 
62793f4014SJiri Pirko 	ASSERT_RTNL();
63793f4014SJiri Pirko 
64793f4014SJiri Pirko 	while ((dfitem = switchdev_deferred_dequeue())) {
65793f4014SJiri Pirko 		dfitem->func(dfitem->dev, dfitem->data);
66793f4014SJiri Pirko 		dev_put(dfitem->dev);
67793f4014SJiri Pirko 		kfree(dfitem);
68793f4014SJiri Pirko 	}
69793f4014SJiri Pirko }
70793f4014SJiri Pirko EXPORT_SYMBOL_GPL(switchdev_deferred_process);
71793f4014SJiri Pirko 
72793f4014SJiri Pirko static void switchdev_deferred_process_work(struct work_struct *work)
73793f4014SJiri Pirko {
74793f4014SJiri Pirko 	rtnl_lock();
75793f4014SJiri Pirko 	switchdev_deferred_process();
76793f4014SJiri Pirko 	rtnl_unlock();
77793f4014SJiri Pirko }
78793f4014SJiri Pirko 
79793f4014SJiri Pirko static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
80793f4014SJiri Pirko 
81793f4014SJiri Pirko static int switchdev_deferred_enqueue(struct net_device *dev,
82793f4014SJiri Pirko 				      const void *data, size_t data_len,
83793f4014SJiri Pirko 				      switchdev_deferred_func_t *func)
84793f4014SJiri Pirko {
85793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
86793f4014SJiri Pirko 
87793f4014SJiri Pirko 	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
88793f4014SJiri Pirko 	if (!dfitem)
89793f4014SJiri Pirko 		return -ENOMEM;
90793f4014SJiri Pirko 	dfitem->dev = dev;
91793f4014SJiri Pirko 	dfitem->func = func;
92793f4014SJiri Pirko 	memcpy(dfitem->data, data, data_len);
93793f4014SJiri Pirko 	dev_hold(dev);
94793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
95793f4014SJiri Pirko 	list_add_tail(&dfitem->list, &deferred);
96793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
97793f4014SJiri Pirko 	schedule_work(&deferred_process_work);
98793f4014SJiri Pirko 	return 0;
99793f4014SJiri Pirko }
100793f4014SJiri Pirko 
101d45224d6SFlorian Fainelli static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102d45224d6SFlorian Fainelli 				      struct net_device *dev,
103f7fadf30SJiri Pirko 				      const struct switchdev_attr *attr,
1047ea6eb3fSJiri Pirko 				      struct switchdev_trans *trans)
1053094333dSScott Feldman {
106d45224d6SFlorian Fainelli 	int err;
107d45224d6SFlorian Fainelli 	int rc;
1083094333dSScott Feldman 
109d45224d6SFlorian Fainelli 	struct switchdev_notifier_port_attr_info attr_info = {
110d45224d6SFlorian Fainelli 		.attr = attr,
111d45224d6SFlorian Fainelli 		.trans = trans,
112d45224d6SFlorian Fainelli 		.handled = false,
113d45224d6SFlorian Fainelli 	};
1143094333dSScott Feldman 
115d45224d6SFlorian Fainelli 	rc = call_switchdev_blocking_notifiers(nt, dev,
116d45224d6SFlorian Fainelli 					       &attr_info.info, NULL);
117d45224d6SFlorian Fainelli 	err = notifier_to_errno(rc);
118d45224d6SFlorian Fainelli 	if (err) {
119d45224d6SFlorian Fainelli 		WARN_ON(!attr_info.handled);
1203094333dSScott Feldman 		return err;
1213094333dSScott Feldman 	}
1223094333dSScott Feldman 
123d45224d6SFlorian Fainelli 	if (!attr_info.handled)
124d45224d6SFlorian Fainelli 		return -EOPNOTSUPP;
125d45224d6SFlorian Fainelli 
126d45224d6SFlorian Fainelli 	return 0;
127d45224d6SFlorian Fainelli }
128d45224d6SFlorian Fainelli 
1290bc05d58SJiri Pirko static int switchdev_port_attr_set_now(struct net_device *dev,
130f7fadf30SJiri Pirko 				       const struct switchdev_attr *attr)
1313094333dSScott Feldman {
1327ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
1333094333dSScott Feldman 	int err;
1343094333dSScott Feldman 
1353094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
1363094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
1373094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
1383094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
1393094333dSScott Feldman 	 * but should not commit the attr.
1403094333dSScott Feldman 	 */
1413094333dSScott Feldman 
142f623ab7fSJiri Pirko 	trans.ph_prepare = true;
143d45224d6SFlorian Fainelli 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
144d45224d6SFlorian Fainelli 					 &trans);
14591cf8eceSFlorian Fainelli 	if (err)
1463094333dSScott Feldman 		return err;
1473094333dSScott Feldman 
1483094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
1493094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
1503094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
1513094333dSScott Feldman 	 */
1523094333dSScott Feldman 
153f623ab7fSJiri Pirko 	trans.ph_prepare = false;
154d45224d6SFlorian Fainelli 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
155d45224d6SFlorian Fainelli 					 &trans);
156e9fdaec0SScott Feldman 	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
157e9fdaec0SScott Feldman 	     dev->name, attr->id);
1583094333dSScott Feldman 
1593094333dSScott Feldman 	return err;
1603094333dSScott Feldman }
1610bc05d58SJiri Pirko 
1620bc05d58SJiri Pirko static void switchdev_port_attr_set_deferred(struct net_device *dev,
1630bc05d58SJiri Pirko 					     const void *data)
1640bc05d58SJiri Pirko {
1650bc05d58SJiri Pirko 	const struct switchdev_attr *attr = data;
1660bc05d58SJiri Pirko 	int err;
1670bc05d58SJiri Pirko 
1680bc05d58SJiri Pirko 	err = switchdev_port_attr_set_now(dev, attr);
1690bc05d58SJiri Pirko 	if (err && err != -EOPNOTSUPP)
1700bc05d58SJiri Pirko 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
1710bc05d58SJiri Pirko 			   err, attr->id);
1727ceb2afbSElad Raz 	if (attr->complete)
1737ceb2afbSElad Raz 		attr->complete(dev, err, attr->complete_priv);
1740bc05d58SJiri Pirko }
1750bc05d58SJiri Pirko 
1760bc05d58SJiri Pirko static int switchdev_port_attr_set_defer(struct net_device *dev,
1770bc05d58SJiri Pirko 					 const struct switchdev_attr *attr)
1780bc05d58SJiri Pirko {
1790bc05d58SJiri Pirko 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
1800bc05d58SJiri Pirko 					  switchdev_port_attr_set_deferred);
1810bc05d58SJiri Pirko }
1820bc05d58SJiri Pirko 
1830bc05d58SJiri Pirko /**
1840bc05d58SJiri Pirko  *	switchdev_port_attr_set - Set port attribute
1850bc05d58SJiri Pirko  *
1860bc05d58SJiri Pirko  *	@dev: port device
1870bc05d58SJiri Pirko  *	@attr: attribute to set
1880bc05d58SJiri Pirko  *
1890bc05d58SJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
1900bc05d58SJiri Pirko  *	system is not left in a partially updated state due to
1910bc05d58SJiri Pirko  *	failure from driver/device.
1920bc05d58SJiri Pirko  *
1930bc05d58SJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
1940bc05d58SJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
1950bc05d58SJiri Pirko  */
1960bc05d58SJiri Pirko int switchdev_port_attr_set(struct net_device *dev,
1970bc05d58SJiri Pirko 			    const struct switchdev_attr *attr)
1980bc05d58SJiri Pirko {
1990bc05d58SJiri Pirko 	if (attr->flags & SWITCHDEV_F_DEFER)
2000bc05d58SJiri Pirko 		return switchdev_port_attr_set_defer(dev, attr);
2010bc05d58SJiri Pirko 	ASSERT_RTNL();
2020bc05d58SJiri Pirko 	return switchdev_port_attr_set_now(dev, attr);
2030bc05d58SJiri Pirko }
2043094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
2053094333dSScott Feldman 
206e258d919SScott Feldman static size_t switchdev_obj_size(const struct switchdev_obj *obj)
207e258d919SScott Feldman {
208e258d919SScott Feldman 	switch (obj->id) {
209e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
210e258d919SScott Feldman 		return sizeof(struct switchdev_obj_port_vlan);
2114d41e125SElad Raz 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2124d41e125SElad Raz 		return sizeof(struct switchdev_obj_port_mdb);
21347d5b6dbSAndrew Lunn 	case SWITCHDEV_OBJ_ID_HOST_MDB:
21447d5b6dbSAndrew Lunn 		return sizeof(struct switchdev_obj_port_mdb);
215e258d919SScott Feldman 	default:
216e258d919SScott Feldman 		BUG();
217e258d919SScott Feldman 	}
218e258d919SScott Feldman 	return 0;
219e258d919SScott Feldman }
220e258d919SScott Feldman 
221d17d9f5eSPetr Machata static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
222d17d9f5eSPetr Machata 				     struct net_device *dev,
223648b4a99SJiri Pirko 				     const struct switchdev_obj *obj,
22469b7320eSPetr Machata 				     struct switchdev_trans *trans,
22569b7320eSPetr Machata 				     struct netlink_ext_ack *extack)
226491d0f15SScott Feldman {
227d17d9f5eSPetr Machata 	int rc;
228d17d9f5eSPetr Machata 	int err;
229491d0f15SScott Feldman 
230d17d9f5eSPetr Machata 	struct switchdev_notifier_port_obj_info obj_info = {
231d17d9f5eSPetr Machata 		.obj = obj,
232d17d9f5eSPetr Machata 		.trans = trans,
233d17d9f5eSPetr Machata 		.handled = false,
234d17d9f5eSPetr Machata 	};
235491d0f15SScott Feldman 
236479c86dcSPetr Machata 	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
237d17d9f5eSPetr Machata 	err = notifier_to_errno(rc);
238d17d9f5eSPetr Machata 	if (err) {
239d17d9f5eSPetr Machata 		WARN_ON(!obj_info.handled);
240491d0f15SScott Feldman 		return err;
241491d0f15SScott Feldman 	}
242d17d9f5eSPetr Machata 	if (!obj_info.handled)
243d17d9f5eSPetr Machata 		return -EOPNOTSUPP;
244d17d9f5eSPetr Machata 	return 0;
245d17d9f5eSPetr Machata }
246491d0f15SScott Feldman 
2474d429c5dSJiri Pirko static int switchdev_port_obj_add_now(struct net_device *dev,
24869b7320eSPetr Machata 				      const struct switchdev_obj *obj,
24969b7320eSPetr Machata 				      struct netlink_ext_ack *extack)
250491d0f15SScott Feldman {
2517ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
252491d0f15SScott Feldman 	int err;
253491d0f15SScott Feldman 
254491d0f15SScott Feldman 	ASSERT_RTNL();
255491d0f15SScott Feldman 
256491d0f15SScott Feldman 	/* Phase I: prepare for obj add. Driver/device should fail
257491d0f15SScott Feldman 	 * here if there are going to be issues in the commit phase,
258491d0f15SScott Feldman 	 * such as lack of resources or support.  The driver/device
259491d0f15SScott Feldman 	 * should reserve resources needed for the commit phase here,
260491d0f15SScott Feldman 	 * but should not commit the obj.
261491d0f15SScott Feldman 	 */
262491d0f15SScott Feldman 
263f623ab7fSJiri Pirko 	trans.ph_prepare = true;
264d17d9f5eSPetr Machata 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
26569b7320eSPetr Machata 					dev, obj, &trans, extack);
26691cf8eceSFlorian Fainelli 	if (err)
267491d0f15SScott Feldman 		return err;
268491d0f15SScott Feldman 
269491d0f15SScott Feldman 	/* Phase II: commit obj add.  This cannot fail as a fault
270491d0f15SScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
271491d0f15SScott Feldman 	 * because the driver said everythings was OK in phase I.
272491d0f15SScott Feldman 	 */
273491d0f15SScott Feldman 
274f623ab7fSJiri Pirko 	trans.ph_prepare = false;
275d17d9f5eSPetr Machata 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
27669b7320eSPetr Machata 					dev, obj, &trans, extack);
2779e8f4a54SJiri Pirko 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
278491d0f15SScott Feldman 
279491d0f15SScott Feldman 	return err;
280491d0f15SScott Feldman }
2814d429c5dSJiri Pirko 
2824d429c5dSJiri Pirko static void switchdev_port_obj_add_deferred(struct net_device *dev,
2834d429c5dSJiri Pirko 					    const void *data)
2844d429c5dSJiri Pirko {
2854d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
2864d429c5dSJiri Pirko 	int err;
2874d429c5dSJiri Pirko 
28869b7320eSPetr Machata 	err = switchdev_port_obj_add_now(dev, obj, NULL);
2894d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
2904d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
2914d429c5dSJiri Pirko 			   err, obj->id);
2927ceb2afbSElad Raz 	if (obj->complete)
2937ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
2944d429c5dSJiri Pirko }
2954d429c5dSJiri Pirko 
2964d429c5dSJiri Pirko static int switchdev_port_obj_add_defer(struct net_device *dev,
2974d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
2984d429c5dSJiri Pirko {
299e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
3004d429c5dSJiri Pirko 					  switchdev_port_obj_add_deferred);
3014d429c5dSJiri Pirko }
302491d0f15SScott Feldman 
303491d0f15SScott Feldman /**
3044d429c5dSJiri Pirko  *	switchdev_port_obj_add - Add port object
305491d0f15SScott Feldman  *
306491d0f15SScott Feldman  *	@dev: port device
3074d429c5dSJiri Pirko  *	@obj: object to add
308c8af73f0SAndrew Lunn  *	@extack: netlink extended ack
3094d429c5dSJiri Pirko  *
3104d429c5dSJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
3114d429c5dSJiri Pirko  *	system is not left in a partially updated state due to
3124d429c5dSJiri Pirko  *	failure from driver/device.
3134d429c5dSJiri Pirko  *
3144d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
3154d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
316491d0f15SScott Feldman  */
3174d429c5dSJiri Pirko int switchdev_port_obj_add(struct net_device *dev,
31869b7320eSPetr Machata 			   const struct switchdev_obj *obj,
31969b7320eSPetr Machata 			   struct netlink_ext_ack *extack)
3204d429c5dSJiri Pirko {
3214d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
3224d429c5dSJiri Pirko 		return switchdev_port_obj_add_defer(dev, obj);
3234d429c5dSJiri Pirko 	ASSERT_RTNL();
32469b7320eSPetr Machata 	return switchdev_port_obj_add_now(dev, obj, extack);
3254d429c5dSJiri Pirko }
3264d429c5dSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
3274d429c5dSJiri Pirko 
3284d429c5dSJiri Pirko static int switchdev_port_obj_del_now(struct net_device *dev,
329648b4a99SJiri Pirko 				      const struct switchdev_obj *obj)
330491d0f15SScott Feldman {
331d17d9f5eSPetr Machata 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
33269b7320eSPetr Machata 					 dev, obj, NULL, NULL);
333491d0f15SScott Feldman }
3344d429c5dSJiri Pirko 
3354d429c5dSJiri Pirko static void switchdev_port_obj_del_deferred(struct net_device *dev,
3364d429c5dSJiri Pirko 					    const void *data)
3374d429c5dSJiri Pirko {
3384d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
3394d429c5dSJiri Pirko 	int err;
3404d429c5dSJiri Pirko 
3414d429c5dSJiri Pirko 	err = switchdev_port_obj_del_now(dev, obj);
3424d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
3434d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
3444d429c5dSJiri Pirko 			   err, obj->id);
3457ceb2afbSElad Raz 	if (obj->complete)
3467ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
3474d429c5dSJiri Pirko }
3484d429c5dSJiri Pirko 
3494d429c5dSJiri Pirko static int switchdev_port_obj_del_defer(struct net_device *dev,
3504d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
3514d429c5dSJiri Pirko {
352e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
3534d429c5dSJiri Pirko 					  switchdev_port_obj_del_deferred);
3544d429c5dSJiri Pirko }
3554d429c5dSJiri Pirko 
3564d429c5dSJiri Pirko /**
3574d429c5dSJiri Pirko  *	switchdev_port_obj_del - Delete port object
3584d429c5dSJiri Pirko  *
3594d429c5dSJiri Pirko  *	@dev: port device
3604d429c5dSJiri Pirko  *	@obj: object to delete
3614d429c5dSJiri Pirko  *
3624d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
3634d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
3644d429c5dSJiri Pirko  */
3654d429c5dSJiri Pirko int switchdev_port_obj_del(struct net_device *dev,
3664d429c5dSJiri Pirko 			   const struct switchdev_obj *obj)
3674d429c5dSJiri Pirko {
3684d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
3694d429c5dSJiri Pirko 		return switchdev_port_obj_del_defer(dev, obj);
3704d429c5dSJiri Pirko 	ASSERT_RTNL();
3714d429c5dSJiri Pirko 	return switchdev_port_obj_del_now(dev, obj);
3724d429c5dSJiri Pirko }
373491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
374491d0f15SScott Feldman 
375ff5cf100SArkadi Sharshevsky static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
376a93e3b17SPetr Machata static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
37703bf0c28SJiri Pirko 
37803bf0c28SJiri Pirko /**
379ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
38003bf0c28SJiri Pirko  *	@nb: notifier_block
38103bf0c28SJiri Pirko  *
382ff5cf100SArkadi Sharshevsky  *	Register switch device notifier.
38303bf0c28SJiri Pirko  */
384ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
38503bf0c28SJiri Pirko {
386ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
38703bf0c28SJiri Pirko }
388ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
38903bf0c28SJiri Pirko 
39003bf0c28SJiri Pirko /**
391ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
39203bf0c28SJiri Pirko  *	@nb: notifier_block
39303bf0c28SJiri Pirko  *
39403bf0c28SJiri Pirko  *	Unregister switch device notifier.
39503bf0c28SJiri Pirko  */
396ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
39703bf0c28SJiri Pirko {
398ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
39903bf0c28SJiri Pirko }
400ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
40103bf0c28SJiri Pirko 
40203bf0c28SJiri Pirko /**
403ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
40403bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
40503bf0c28SJiri Pirko  *	@dev: port device
40603bf0c28SJiri Pirko  *	@info: notifier information data
407*ea6754aeSTian Tao  *	@extack: netlink extended ack
408ff5cf100SArkadi Sharshevsky  *	Call all network notifier blocks.
40903bf0c28SJiri Pirko  */
410ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
4116685987cSPetr Machata 			     struct switchdev_notifier_info *info,
4126685987cSPetr Machata 			     struct netlink_ext_ack *extack)
41303bf0c28SJiri Pirko {
41403bf0c28SJiri Pirko 	info->dev = dev;
4156685987cSPetr Machata 	info->extack = extack;
416ff5cf100SArkadi Sharshevsky 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
41703bf0c28SJiri Pirko }
418ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
4198a44dbb2SRoopa Prabhu 
420a93e3b17SPetr Machata int register_switchdev_blocking_notifier(struct notifier_block *nb)
421a93e3b17SPetr Machata {
422a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
423a93e3b17SPetr Machata 
424a93e3b17SPetr Machata 	return blocking_notifier_chain_register(chain, nb);
425a93e3b17SPetr Machata }
426a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
427a93e3b17SPetr Machata 
428a93e3b17SPetr Machata int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
429a93e3b17SPetr Machata {
430a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
431a93e3b17SPetr Machata 
432a93e3b17SPetr Machata 	return blocking_notifier_chain_unregister(chain, nb);
433a93e3b17SPetr Machata }
434a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
435a93e3b17SPetr Machata 
436a93e3b17SPetr Machata int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
437479c86dcSPetr Machata 				      struct switchdev_notifier_info *info,
438479c86dcSPetr Machata 				      struct netlink_ext_ack *extack)
439a93e3b17SPetr Machata {
440a93e3b17SPetr Machata 	info->dev = dev;
441479c86dcSPetr Machata 	info->extack = extack;
442a93e3b17SPetr Machata 	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
443a93e3b17SPetr Machata 					    val, info);
444a93e3b17SPetr Machata }
445a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
446a93e3b17SPetr Machata 
447f30f0601SPetr Machata static int __switchdev_handle_port_obj_add(struct net_device *dev,
448f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
449f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
450f30f0601SPetr Machata 			int (*add_cb)(struct net_device *dev,
451f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
45269213513SPetr Machata 				      struct switchdev_trans *trans,
45369213513SPetr Machata 				      struct netlink_ext_ack *extack))
454f30f0601SPetr Machata {
45569213513SPetr Machata 	struct netlink_ext_ack *extack;
456f30f0601SPetr Machata 	struct net_device *lower_dev;
457f30f0601SPetr Machata 	struct list_head *iter;
458f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
459f30f0601SPetr Machata 
46069213513SPetr Machata 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
46169213513SPetr Machata 
462f30f0601SPetr Machata 	if (check_cb(dev)) {
463f30f0601SPetr Machata 		/* This flag is only checked if the return value is success. */
464f30f0601SPetr Machata 		port_obj_info->handled = true;
46569213513SPetr Machata 		return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
46669213513SPetr Machata 			      extack);
467f30f0601SPetr Machata 	}
468f30f0601SPetr Machata 
469f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
470f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
471f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
472f30f0601SPetr Machata 	 *
473f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
474f30f0601SPetr Machata 	 * necessary to go through this helper.
475f30f0601SPetr Machata 	 */
476f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
47707c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
47807c6f980SRussell King 			continue;
47907c6f980SRussell King 
480f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
481f30f0601SPetr Machata 						      check_cb, add_cb);
482f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
483f30f0601SPetr Machata 			return err;
484f30f0601SPetr Machata 	}
485f30f0601SPetr Machata 
486f30f0601SPetr Machata 	return err;
487f30f0601SPetr Machata }
488f30f0601SPetr Machata 
489f30f0601SPetr Machata int switchdev_handle_port_obj_add(struct net_device *dev,
490f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
491f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
492f30f0601SPetr Machata 			int (*add_cb)(struct net_device *dev,
493f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
49469213513SPetr Machata 				      struct switchdev_trans *trans,
49569213513SPetr Machata 				      struct netlink_ext_ack *extack))
496f30f0601SPetr Machata {
497f30f0601SPetr Machata 	int err;
498f30f0601SPetr Machata 
499f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
500f30f0601SPetr Machata 					      add_cb);
501f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
502f30f0601SPetr Machata 		err = 0;
503f30f0601SPetr Machata 	return err;
504f30f0601SPetr Machata }
505f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
506f30f0601SPetr Machata 
507f30f0601SPetr Machata static int __switchdev_handle_port_obj_del(struct net_device *dev,
508f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
509f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
510f30f0601SPetr Machata 			int (*del_cb)(struct net_device *dev,
511f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
512f30f0601SPetr Machata {
513f30f0601SPetr Machata 	struct net_device *lower_dev;
514f30f0601SPetr Machata 	struct list_head *iter;
515f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
516f30f0601SPetr Machata 
517f30f0601SPetr Machata 	if (check_cb(dev)) {
518f30f0601SPetr Machata 		/* This flag is only checked if the return value is success. */
519f30f0601SPetr Machata 		port_obj_info->handled = true;
520f30f0601SPetr Machata 		return del_cb(dev, port_obj_info->obj);
521f30f0601SPetr Machata 	}
522f30f0601SPetr Machata 
523f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
524f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
525f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
526f30f0601SPetr Machata 	 *
527f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
528f30f0601SPetr Machata 	 * necessary to go through this helper.
529f30f0601SPetr Machata 	 */
530f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
53107c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
53207c6f980SRussell King 			continue;
53307c6f980SRussell King 
534f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
535f30f0601SPetr Machata 						      check_cb, del_cb);
536f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
537f30f0601SPetr Machata 			return err;
538f30f0601SPetr Machata 	}
539f30f0601SPetr Machata 
540f30f0601SPetr Machata 	return err;
541f30f0601SPetr Machata }
542f30f0601SPetr Machata 
543f30f0601SPetr Machata int switchdev_handle_port_obj_del(struct net_device *dev,
544f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
545f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
546f30f0601SPetr Machata 			int (*del_cb)(struct net_device *dev,
547f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
548f30f0601SPetr Machata {
549f30f0601SPetr Machata 	int err;
550f30f0601SPetr Machata 
551f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
552f30f0601SPetr Machata 					      del_cb);
553f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
554f30f0601SPetr Machata 		err = 0;
555f30f0601SPetr Machata 	return err;
556f30f0601SPetr Machata }
557f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
5581cb33af1SFlorian Fainelli 
5591cb33af1SFlorian Fainelli static int __switchdev_handle_port_attr_set(struct net_device *dev,
5601cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
5611cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
5621cb33af1SFlorian Fainelli 			int (*set_cb)(struct net_device *dev,
5631cb33af1SFlorian Fainelli 				      const struct switchdev_attr *attr,
5641cb33af1SFlorian Fainelli 				      struct switchdev_trans *trans))
5651cb33af1SFlorian Fainelli {
5661cb33af1SFlorian Fainelli 	struct net_device *lower_dev;
5671cb33af1SFlorian Fainelli 	struct list_head *iter;
5681cb33af1SFlorian Fainelli 	int err = -EOPNOTSUPP;
5691cb33af1SFlorian Fainelli 
5701cb33af1SFlorian Fainelli 	if (check_cb(dev)) {
5711cb33af1SFlorian Fainelli 		port_attr_info->handled = true;
5721cb33af1SFlorian Fainelli 		return set_cb(dev, port_attr_info->attr,
5731cb33af1SFlorian Fainelli 			      port_attr_info->trans);
5741cb33af1SFlorian Fainelli 	}
5751cb33af1SFlorian Fainelli 
5761cb33af1SFlorian Fainelli 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
5771cb33af1SFlorian Fainelli 	 * unsupported devices, another driver might be able to handle them. But
5781cb33af1SFlorian Fainelli 	 * propagate to the callers any hard errors.
5791cb33af1SFlorian Fainelli 	 *
5801cb33af1SFlorian Fainelli 	 * If the driver does its own bookkeeping of stacked ports, it's not
5811cb33af1SFlorian Fainelli 	 * necessary to go through this helper.
5821cb33af1SFlorian Fainelli 	 */
5831cb33af1SFlorian Fainelli 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
58407c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
58507c6f980SRussell King 			continue;
58607c6f980SRussell King 
5871cb33af1SFlorian Fainelli 		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
5881cb33af1SFlorian Fainelli 						       check_cb, set_cb);
5891cb33af1SFlorian Fainelli 		if (err && err != -EOPNOTSUPP)
5901cb33af1SFlorian Fainelli 			return err;
5911cb33af1SFlorian Fainelli 	}
5921cb33af1SFlorian Fainelli 
5931cb33af1SFlorian Fainelli 	return err;
5941cb33af1SFlorian Fainelli }
5951cb33af1SFlorian Fainelli 
5961cb33af1SFlorian Fainelli int switchdev_handle_port_attr_set(struct net_device *dev,
5971cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
5981cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
5991cb33af1SFlorian Fainelli 			int (*set_cb)(struct net_device *dev,
6001cb33af1SFlorian Fainelli 				      const struct switchdev_attr *attr,
6011cb33af1SFlorian Fainelli 				      struct switchdev_trans *trans))
6021cb33af1SFlorian Fainelli {
6031cb33af1SFlorian Fainelli 	int err;
6041cb33af1SFlorian Fainelli 
6051cb33af1SFlorian Fainelli 	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
6061cb33af1SFlorian Fainelli 					       set_cb);
6071cb33af1SFlorian Fainelli 	if (err == -EOPNOTSUPP)
6081cb33af1SFlorian Fainelli 		err = 0;
6091cb33af1SFlorian Fainelli 	return err;
6101cb33af1SFlorian Fainelli }
6111cb33af1SFlorian Fainelli EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
612