xref: /linux-6.15/net/switchdev/switchdev.c (revision ffb68fc5)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2007f790cSJiri Pirko /*
3007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
47ea6eb3fSJiri Pirko  * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
5f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
6007f790cSJiri Pirko  */
7007f790cSJiri Pirko 
8007f790cSJiri Pirko #include <linux/kernel.h>
9007f790cSJiri Pirko #include <linux/types.h>
10007f790cSJiri Pirko #include <linux/init.h>
1103bf0c28SJiri Pirko #include <linux/mutex.h>
1203bf0c28SJiri Pirko #include <linux/notifier.h>
13007f790cSJiri Pirko #include <linux/netdevice.h>
14850d0cbcSJiri Pirko #include <linux/etherdevice.h>
1547f8328bSScott Feldman #include <linux/if_bridge.h>
167ea6eb3fSJiri Pirko #include <linux/list.h>
17793f4014SJiri Pirko #include <linux/workqueue.h>
1887aaf2caSNikolay Aleksandrov #include <linux/if_vlan.h>
194f2c6ae5SIdo Schimmel #include <linux/rtnetlink.h>
20007f790cSJiri Pirko #include <net/switchdev.h>
21007f790cSJiri Pirko 
22793f4014SJiri Pirko static LIST_HEAD(deferred);
23793f4014SJiri Pirko static DEFINE_SPINLOCK(deferred_lock);
24793f4014SJiri Pirko 
25793f4014SJiri Pirko typedef void switchdev_deferred_func_t(struct net_device *dev,
26793f4014SJiri Pirko 				       const void *data);
27793f4014SJiri Pirko 
28793f4014SJiri Pirko struct switchdev_deferred_item {
29793f4014SJiri Pirko 	struct list_head list;
30793f4014SJiri Pirko 	struct net_device *dev;
31793f4014SJiri Pirko 	switchdev_deferred_func_t *func;
32fbfc8502SGustavo A. R. Silva 	unsigned long data[];
33793f4014SJiri Pirko };
34793f4014SJiri Pirko 
35793f4014SJiri Pirko static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
36793f4014SJiri Pirko {
37793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
38793f4014SJiri Pirko 
39793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
40793f4014SJiri Pirko 	if (list_empty(&deferred)) {
41793f4014SJiri Pirko 		dfitem = NULL;
42793f4014SJiri Pirko 		goto unlock;
43793f4014SJiri Pirko 	}
44793f4014SJiri Pirko 	dfitem = list_first_entry(&deferred,
45793f4014SJiri Pirko 				  struct switchdev_deferred_item, list);
46793f4014SJiri Pirko 	list_del(&dfitem->list);
47793f4014SJiri Pirko unlock:
48793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
49793f4014SJiri Pirko 	return dfitem;
50793f4014SJiri Pirko }
51793f4014SJiri Pirko 
52793f4014SJiri Pirko /**
53793f4014SJiri Pirko  *	switchdev_deferred_process - Process ops in deferred queue
54793f4014SJiri Pirko  *
55793f4014SJiri Pirko  *	Called to flush the ops currently queued in deferred ops queue.
56793f4014SJiri Pirko  *	rtnl_lock must be held.
57793f4014SJiri Pirko  */
58793f4014SJiri Pirko void switchdev_deferred_process(void)
59793f4014SJiri Pirko {
60793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
61793f4014SJiri Pirko 
62793f4014SJiri Pirko 	ASSERT_RTNL();
63793f4014SJiri Pirko 
64793f4014SJiri Pirko 	while ((dfitem = switchdev_deferred_dequeue())) {
65793f4014SJiri Pirko 		dfitem->func(dfitem->dev, dfitem->data);
66793f4014SJiri Pirko 		dev_put(dfitem->dev);
67793f4014SJiri Pirko 		kfree(dfitem);
68793f4014SJiri Pirko 	}
69793f4014SJiri Pirko }
70793f4014SJiri Pirko EXPORT_SYMBOL_GPL(switchdev_deferred_process);
71793f4014SJiri Pirko 
72793f4014SJiri Pirko static void switchdev_deferred_process_work(struct work_struct *work)
73793f4014SJiri Pirko {
74793f4014SJiri Pirko 	rtnl_lock();
75793f4014SJiri Pirko 	switchdev_deferred_process();
76793f4014SJiri Pirko 	rtnl_unlock();
77793f4014SJiri Pirko }
78793f4014SJiri Pirko 
79793f4014SJiri Pirko static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
80793f4014SJiri Pirko 
81793f4014SJiri Pirko static int switchdev_deferred_enqueue(struct net_device *dev,
82793f4014SJiri Pirko 				      const void *data, size_t data_len,
83793f4014SJiri Pirko 				      switchdev_deferred_func_t *func)
84793f4014SJiri Pirko {
85793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
86793f4014SJiri Pirko 
87793f4014SJiri Pirko 	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
88793f4014SJiri Pirko 	if (!dfitem)
89793f4014SJiri Pirko 		return -ENOMEM;
90793f4014SJiri Pirko 	dfitem->dev = dev;
91793f4014SJiri Pirko 	dfitem->func = func;
92793f4014SJiri Pirko 	memcpy(dfitem->data, data, data_len);
93793f4014SJiri Pirko 	dev_hold(dev);
94793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
95793f4014SJiri Pirko 	list_add_tail(&dfitem->list, &deferred);
96793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
97793f4014SJiri Pirko 	schedule_work(&deferred_process_work);
98793f4014SJiri Pirko 	return 0;
99793f4014SJiri Pirko }
100793f4014SJiri Pirko 
101d45224d6SFlorian Fainelli static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102d45224d6SFlorian Fainelli 				      struct net_device *dev,
103f7fadf30SJiri Pirko 				      const struct switchdev_attr *attr,
1047ea6eb3fSJiri Pirko 				      struct switchdev_trans *trans)
1053094333dSScott Feldman {
106d45224d6SFlorian Fainelli 	int err;
107d45224d6SFlorian Fainelli 	int rc;
1083094333dSScott Feldman 
109d45224d6SFlorian Fainelli 	struct switchdev_notifier_port_attr_info attr_info = {
110d45224d6SFlorian Fainelli 		.attr = attr,
111d45224d6SFlorian Fainelli 		.trans = trans,
112d45224d6SFlorian Fainelli 		.handled = false,
113d45224d6SFlorian Fainelli 	};
1143094333dSScott Feldman 
115d45224d6SFlorian Fainelli 	rc = call_switchdev_blocking_notifiers(nt, dev,
116d45224d6SFlorian Fainelli 					       &attr_info.info, NULL);
117d45224d6SFlorian Fainelli 	err = notifier_to_errno(rc);
118d45224d6SFlorian Fainelli 	if (err) {
119d45224d6SFlorian Fainelli 		WARN_ON(!attr_info.handled);
1203094333dSScott Feldman 		return err;
1213094333dSScott Feldman 	}
1223094333dSScott Feldman 
123d45224d6SFlorian Fainelli 	if (!attr_info.handled)
124d45224d6SFlorian Fainelli 		return -EOPNOTSUPP;
125d45224d6SFlorian Fainelli 
126d45224d6SFlorian Fainelli 	return 0;
127d45224d6SFlorian Fainelli }
128d45224d6SFlorian Fainelli 
1290bc05d58SJiri Pirko static int switchdev_port_attr_set_now(struct net_device *dev,
130f7fadf30SJiri Pirko 				       const struct switchdev_attr *attr)
1313094333dSScott Feldman {
1327ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
1333094333dSScott Feldman 	int err;
1343094333dSScott Feldman 
1353094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
1363094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
1373094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
1383094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
1393094333dSScott Feldman 	 * but should not commit the attr.
1403094333dSScott Feldman 	 */
1413094333dSScott Feldman 
142f623ab7fSJiri Pirko 	trans.ph_prepare = true;
143d45224d6SFlorian Fainelli 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
144d45224d6SFlorian Fainelli 					 &trans);
14591cf8eceSFlorian Fainelli 	if (err)
1463094333dSScott Feldman 		return err;
1473094333dSScott Feldman 
1483094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
1493094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
1503094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
1513094333dSScott Feldman 	 */
1523094333dSScott Feldman 
153f623ab7fSJiri Pirko 	trans.ph_prepare = false;
154d45224d6SFlorian Fainelli 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
155d45224d6SFlorian Fainelli 					 &trans);
156e9fdaec0SScott Feldman 	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
157e9fdaec0SScott Feldman 	     dev->name, attr->id);
1583094333dSScott Feldman 
1593094333dSScott Feldman 	return err;
1603094333dSScott Feldman }
1610bc05d58SJiri Pirko 
1620bc05d58SJiri Pirko static void switchdev_port_attr_set_deferred(struct net_device *dev,
1630bc05d58SJiri Pirko 					     const void *data)
1640bc05d58SJiri Pirko {
1650bc05d58SJiri Pirko 	const struct switchdev_attr *attr = data;
1660bc05d58SJiri Pirko 	int err;
1670bc05d58SJiri Pirko 
1680bc05d58SJiri Pirko 	err = switchdev_port_attr_set_now(dev, attr);
1690bc05d58SJiri Pirko 	if (err && err != -EOPNOTSUPP)
1700bc05d58SJiri Pirko 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
1710bc05d58SJiri Pirko 			   err, attr->id);
1727ceb2afbSElad Raz 	if (attr->complete)
1737ceb2afbSElad Raz 		attr->complete(dev, err, attr->complete_priv);
1740bc05d58SJiri Pirko }
1750bc05d58SJiri Pirko 
1760bc05d58SJiri Pirko static int switchdev_port_attr_set_defer(struct net_device *dev,
1770bc05d58SJiri Pirko 					 const struct switchdev_attr *attr)
1780bc05d58SJiri Pirko {
1790bc05d58SJiri Pirko 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
1800bc05d58SJiri Pirko 					  switchdev_port_attr_set_deferred);
1810bc05d58SJiri Pirko }
1820bc05d58SJiri Pirko 
1830bc05d58SJiri Pirko /**
1840bc05d58SJiri Pirko  *	switchdev_port_attr_set - Set port attribute
1850bc05d58SJiri Pirko  *
1860bc05d58SJiri Pirko  *	@dev: port device
1870bc05d58SJiri Pirko  *	@attr: attribute to set
1880bc05d58SJiri Pirko  *
1890bc05d58SJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
1900bc05d58SJiri Pirko  *	system is not left in a partially updated state due to
1910bc05d58SJiri Pirko  *	failure from driver/device.
1920bc05d58SJiri Pirko  *
1930bc05d58SJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
1940bc05d58SJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
1950bc05d58SJiri Pirko  */
1960bc05d58SJiri Pirko int switchdev_port_attr_set(struct net_device *dev,
1970bc05d58SJiri Pirko 			    const struct switchdev_attr *attr)
1980bc05d58SJiri Pirko {
1990bc05d58SJiri Pirko 	if (attr->flags & SWITCHDEV_F_DEFER)
2000bc05d58SJiri Pirko 		return switchdev_port_attr_set_defer(dev, attr);
2010bc05d58SJiri Pirko 	ASSERT_RTNL();
2020bc05d58SJiri Pirko 	return switchdev_port_attr_set_now(dev, attr);
2030bc05d58SJiri Pirko }
2043094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
2053094333dSScott Feldman 
206e258d919SScott Feldman static size_t switchdev_obj_size(const struct switchdev_obj *obj)
207e258d919SScott Feldman {
208e258d919SScott Feldman 	switch (obj->id) {
209e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
210e258d919SScott Feldman 		return sizeof(struct switchdev_obj_port_vlan);
2114d41e125SElad Raz 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2124d41e125SElad Raz 		return sizeof(struct switchdev_obj_port_mdb);
21347d5b6dbSAndrew Lunn 	case SWITCHDEV_OBJ_ID_HOST_MDB:
21447d5b6dbSAndrew Lunn 		return sizeof(struct switchdev_obj_port_mdb);
215e258d919SScott Feldman 	default:
216e258d919SScott Feldman 		BUG();
217e258d919SScott Feldman 	}
218e258d919SScott Feldman 	return 0;
219e258d919SScott Feldman }
220e258d919SScott Feldman 
221d17d9f5eSPetr Machata static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
222d17d9f5eSPetr Machata 				     struct net_device *dev,
223648b4a99SJiri Pirko 				     const struct switchdev_obj *obj,
22469b7320eSPetr Machata 				     struct netlink_ext_ack *extack)
225491d0f15SScott Feldman {
226d17d9f5eSPetr Machata 	int rc;
227d17d9f5eSPetr Machata 	int err;
228491d0f15SScott Feldman 
229d17d9f5eSPetr Machata 	struct switchdev_notifier_port_obj_info obj_info = {
230d17d9f5eSPetr Machata 		.obj = obj,
231d17d9f5eSPetr Machata 		.handled = false,
232d17d9f5eSPetr Machata 	};
233491d0f15SScott Feldman 
234479c86dcSPetr Machata 	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
235d17d9f5eSPetr Machata 	err = notifier_to_errno(rc);
236d17d9f5eSPetr Machata 	if (err) {
237d17d9f5eSPetr Machata 		WARN_ON(!obj_info.handled);
238491d0f15SScott Feldman 		return err;
239491d0f15SScott Feldman 	}
240d17d9f5eSPetr Machata 	if (!obj_info.handled)
241d17d9f5eSPetr Machata 		return -EOPNOTSUPP;
242d17d9f5eSPetr Machata 	return 0;
243d17d9f5eSPetr Machata }
244491d0f15SScott Feldman 
2454d429c5dSJiri Pirko static int switchdev_port_obj_add_now(struct net_device *dev,
24669b7320eSPetr Machata 				      const struct switchdev_obj *obj,
24769b7320eSPetr Machata 				      struct netlink_ext_ack *extack)
248491d0f15SScott Feldman {
249491d0f15SScott Feldman 	ASSERT_RTNL();
250491d0f15SScott Feldman 
251*ffb68fc5SVladimir Oltean 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
252*ffb68fc5SVladimir Oltean 					 dev, obj, extack);
253491d0f15SScott Feldman }
2544d429c5dSJiri Pirko 
2554d429c5dSJiri Pirko static void switchdev_port_obj_add_deferred(struct net_device *dev,
2564d429c5dSJiri Pirko 					    const void *data)
2574d429c5dSJiri Pirko {
2584d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
2594d429c5dSJiri Pirko 	int err;
2604d429c5dSJiri Pirko 
26169b7320eSPetr Machata 	err = switchdev_port_obj_add_now(dev, obj, NULL);
2624d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
2634d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
2644d429c5dSJiri Pirko 			   err, obj->id);
2657ceb2afbSElad Raz 	if (obj->complete)
2667ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
2674d429c5dSJiri Pirko }
2684d429c5dSJiri Pirko 
2694d429c5dSJiri Pirko static int switchdev_port_obj_add_defer(struct net_device *dev,
2704d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
2714d429c5dSJiri Pirko {
272e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
2734d429c5dSJiri Pirko 					  switchdev_port_obj_add_deferred);
2744d429c5dSJiri Pirko }
275491d0f15SScott Feldman 
276491d0f15SScott Feldman /**
2774d429c5dSJiri Pirko  *	switchdev_port_obj_add - Add port object
278491d0f15SScott Feldman  *
279491d0f15SScott Feldman  *	@dev: port device
2804d429c5dSJiri Pirko  *	@obj: object to add
281c8af73f0SAndrew Lunn  *	@extack: netlink extended ack
2824d429c5dSJiri Pirko  *
2834d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
2844d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
285491d0f15SScott Feldman  */
2864d429c5dSJiri Pirko int switchdev_port_obj_add(struct net_device *dev,
28769b7320eSPetr Machata 			   const struct switchdev_obj *obj,
28869b7320eSPetr Machata 			   struct netlink_ext_ack *extack)
2894d429c5dSJiri Pirko {
2904d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
2914d429c5dSJiri Pirko 		return switchdev_port_obj_add_defer(dev, obj);
2924d429c5dSJiri Pirko 	ASSERT_RTNL();
29369b7320eSPetr Machata 	return switchdev_port_obj_add_now(dev, obj, extack);
2944d429c5dSJiri Pirko }
2954d429c5dSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
2964d429c5dSJiri Pirko 
2974d429c5dSJiri Pirko static int switchdev_port_obj_del_now(struct net_device *dev,
298648b4a99SJiri Pirko 				      const struct switchdev_obj *obj)
299491d0f15SScott Feldman {
300d17d9f5eSPetr Machata 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
301*ffb68fc5SVladimir Oltean 					 dev, obj, NULL);
302491d0f15SScott Feldman }
3034d429c5dSJiri Pirko 
3044d429c5dSJiri Pirko static void switchdev_port_obj_del_deferred(struct net_device *dev,
3054d429c5dSJiri Pirko 					    const void *data)
3064d429c5dSJiri Pirko {
3074d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
3084d429c5dSJiri Pirko 	int err;
3094d429c5dSJiri Pirko 
3104d429c5dSJiri Pirko 	err = switchdev_port_obj_del_now(dev, obj);
3114d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
3124d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
3134d429c5dSJiri Pirko 			   err, obj->id);
3147ceb2afbSElad Raz 	if (obj->complete)
3157ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
3164d429c5dSJiri Pirko }
3174d429c5dSJiri Pirko 
3184d429c5dSJiri Pirko static int switchdev_port_obj_del_defer(struct net_device *dev,
3194d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
3204d429c5dSJiri Pirko {
321e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
3224d429c5dSJiri Pirko 					  switchdev_port_obj_del_deferred);
3234d429c5dSJiri Pirko }
3244d429c5dSJiri Pirko 
3254d429c5dSJiri Pirko /**
3264d429c5dSJiri Pirko  *	switchdev_port_obj_del - Delete port object
3274d429c5dSJiri Pirko  *
3284d429c5dSJiri Pirko  *	@dev: port device
3294d429c5dSJiri Pirko  *	@obj: object to delete
3304d429c5dSJiri Pirko  *
3314d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
3324d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
3334d429c5dSJiri Pirko  */
3344d429c5dSJiri Pirko int switchdev_port_obj_del(struct net_device *dev,
3354d429c5dSJiri Pirko 			   const struct switchdev_obj *obj)
3364d429c5dSJiri Pirko {
3374d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
3384d429c5dSJiri Pirko 		return switchdev_port_obj_del_defer(dev, obj);
3394d429c5dSJiri Pirko 	ASSERT_RTNL();
3404d429c5dSJiri Pirko 	return switchdev_port_obj_del_now(dev, obj);
3414d429c5dSJiri Pirko }
342491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
343491d0f15SScott Feldman 
344ff5cf100SArkadi Sharshevsky static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
345a93e3b17SPetr Machata static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
34603bf0c28SJiri Pirko 
34703bf0c28SJiri Pirko /**
348ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
34903bf0c28SJiri Pirko  *	@nb: notifier_block
35003bf0c28SJiri Pirko  *
351ff5cf100SArkadi Sharshevsky  *	Register switch device notifier.
35203bf0c28SJiri Pirko  */
353ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
35403bf0c28SJiri Pirko {
355ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
35603bf0c28SJiri Pirko }
357ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
35803bf0c28SJiri Pirko 
35903bf0c28SJiri Pirko /**
360ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
36103bf0c28SJiri Pirko  *	@nb: notifier_block
36203bf0c28SJiri Pirko  *
36303bf0c28SJiri Pirko  *	Unregister switch device notifier.
36403bf0c28SJiri Pirko  */
365ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
36603bf0c28SJiri Pirko {
367ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
36803bf0c28SJiri Pirko }
369ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
37003bf0c28SJiri Pirko 
37103bf0c28SJiri Pirko /**
372ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
37303bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
37403bf0c28SJiri Pirko  *	@dev: port device
37503bf0c28SJiri Pirko  *	@info: notifier information data
376ea6754aeSTian Tao  *	@extack: netlink extended ack
377ff5cf100SArkadi Sharshevsky  *	Call all network notifier blocks.
37803bf0c28SJiri Pirko  */
379ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
3806685987cSPetr Machata 			     struct switchdev_notifier_info *info,
3816685987cSPetr Machata 			     struct netlink_ext_ack *extack)
38203bf0c28SJiri Pirko {
38303bf0c28SJiri Pirko 	info->dev = dev;
3846685987cSPetr Machata 	info->extack = extack;
385ff5cf100SArkadi Sharshevsky 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
38603bf0c28SJiri Pirko }
387ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
3888a44dbb2SRoopa Prabhu 
389a93e3b17SPetr Machata int register_switchdev_blocking_notifier(struct notifier_block *nb)
390a93e3b17SPetr Machata {
391a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
392a93e3b17SPetr Machata 
393a93e3b17SPetr Machata 	return blocking_notifier_chain_register(chain, nb);
394a93e3b17SPetr Machata }
395a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
396a93e3b17SPetr Machata 
397a93e3b17SPetr Machata int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
398a93e3b17SPetr Machata {
399a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
400a93e3b17SPetr Machata 
401a93e3b17SPetr Machata 	return blocking_notifier_chain_unregister(chain, nb);
402a93e3b17SPetr Machata }
403a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
404a93e3b17SPetr Machata 
405a93e3b17SPetr Machata int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
406479c86dcSPetr Machata 				      struct switchdev_notifier_info *info,
407479c86dcSPetr Machata 				      struct netlink_ext_ack *extack)
408a93e3b17SPetr Machata {
409a93e3b17SPetr Machata 	info->dev = dev;
410479c86dcSPetr Machata 	info->extack = extack;
411a93e3b17SPetr Machata 	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
412a93e3b17SPetr Machata 					    val, info);
413a93e3b17SPetr Machata }
414a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
415a93e3b17SPetr Machata 
416f30f0601SPetr Machata static int __switchdev_handle_port_obj_add(struct net_device *dev,
417f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
418f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
419f30f0601SPetr Machata 			int (*add_cb)(struct net_device *dev,
420f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
42169213513SPetr Machata 				      struct netlink_ext_ack *extack))
422f30f0601SPetr Machata {
42369213513SPetr Machata 	struct netlink_ext_ack *extack;
424f30f0601SPetr Machata 	struct net_device *lower_dev;
425f30f0601SPetr Machata 	struct list_head *iter;
426f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
427f30f0601SPetr Machata 
42869213513SPetr Machata 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
42969213513SPetr Machata 
430f30f0601SPetr Machata 	if (check_cb(dev)) {
431f30f0601SPetr Machata 		/* This flag is only checked if the return value is success. */
432f30f0601SPetr Machata 		port_obj_info->handled = true;
433*ffb68fc5SVladimir Oltean 		return add_cb(dev, port_obj_info->obj, extack);
434f30f0601SPetr Machata 	}
435f30f0601SPetr Machata 
436f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
437f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
438f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
439f30f0601SPetr Machata 	 *
440f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
441f30f0601SPetr Machata 	 * necessary to go through this helper.
442f30f0601SPetr Machata 	 */
443f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
44407c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
44507c6f980SRussell King 			continue;
44607c6f980SRussell King 
447f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
448f30f0601SPetr Machata 						      check_cb, add_cb);
449f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
450f30f0601SPetr Machata 			return err;
451f30f0601SPetr Machata 	}
452f30f0601SPetr Machata 
453f30f0601SPetr Machata 	return err;
454f30f0601SPetr Machata }
455f30f0601SPetr Machata 
456f30f0601SPetr Machata int switchdev_handle_port_obj_add(struct net_device *dev,
457f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
458f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
459f30f0601SPetr Machata 			int (*add_cb)(struct net_device *dev,
460f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
46169213513SPetr Machata 				      struct netlink_ext_ack *extack))
462f30f0601SPetr Machata {
463f30f0601SPetr Machata 	int err;
464f30f0601SPetr Machata 
465f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
466f30f0601SPetr Machata 					      add_cb);
467f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
468f30f0601SPetr Machata 		err = 0;
469f30f0601SPetr Machata 	return err;
470f30f0601SPetr Machata }
471f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
472f30f0601SPetr Machata 
473f30f0601SPetr Machata static int __switchdev_handle_port_obj_del(struct net_device *dev,
474f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
475f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
476f30f0601SPetr Machata 			int (*del_cb)(struct net_device *dev,
477f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
478f30f0601SPetr Machata {
479f30f0601SPetr Machata 	struct net_device *lower_dev;
480f30f0601SPetr Machata 	struct list_head *iter;
481f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
482f30f0601SPetr Machata 
483f30f0601SPetr Machata 	if (check_cb(dev)) {
484f30f0601SPetr Machata 		/* This flag is only checked if the return value is success. */
485f30f0601SPetr Machata 		port_obj_info->handled = true;
486f30f0601SPetr Machata 		return del_cb(dev, port_obj_info->obj);
487f30f0601SPetr Machata 	}
488f30f0601SPetr Machata 
489f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
490f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
491f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
492f30f0601SPetr Machata 	 *
493f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
494f30f0601SPetr Machata 	 * necessary to go through this helper.
495f30f0601SPetr Machata 	 */
496f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
49707c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
49807c6f980SRussell King 			continue;
49907c6f980SRussell King 
500f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
501f30f0601SPetr Machata 						      check_cb, del_cb);
502f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
503f30f0601SPetr Machata 			return err;
504f30f0601SPetr Machata 	}
505f30f0601SPetr Machata 
506f30f0601SPetr Machata 	return err;
507f30f0601SPetr Machata }
508f30f0601SPetr Machata 
509f30f0601SPetr Machata int switchdev_handle_port_obj_del(struct net_device *dev,
510f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
511f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
512f30f0601SPetr Machata 			int (*del_cb)(struct net_device *dev,
513f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
514f30f0601SPetr Machata {
515f30f0601SPetr Machata 	int err;
516f30f0601SPetr Machata 
517f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
518f30f0601SPetr Machata 					      del_cb);
519f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
520f30f0601SPetr Machata 		err = 0;
521f30f0601SPetr Machata 	return err;
522f30f0601SPetr Machata }
523f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
5241cb33af1SFlorian Fainelli 
5251cb33af1SFlorian Fainelli static int __switchdev_handle_port_attr_set(struct net_device *dev,
5261cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
5271cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
5281cb33af1SFlorian Fainelli 			int (*set_cb)(struct net_device *dev,
5291cb33af1SFlorian Fainelli 				      const struct switchdev_attr *attr,
5301cb33af1SFlorian Fainelli 				      struct switchdev_trans *trans))
5311cb33af1SFlorian Fainelli {
5321cb33af1SFlorian Fainelli 	struct net_device *lower_dev;
5331cb33af1SFlorian Fainelli 	struct list_head *iter;
5341cb33af1SFlorian Fainelli 	int err = -EOPNOTSUPP;
5351cb33af1SFlorian Fainelli 
5361cb33af1SFlorian Fainelli 	if (check_cb(dev)) {
5371cb33af1SFlorian Fainelli 		port_attr_info->handled = true;
5381cb33af1SFlorian Fainelli 		return set_cb(dev, port_attr_info->attr,
5391cb33af1SFlorian Fainelli 			      port_attr_info->trans);
5401cb33af1SFlorian Fainelli 	}
5411cb33af1SFlorian Fainelli 
5421cb33af1SFlorian Fainelli 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
5431cb33af1SFlorian Fainelli 	 * unsupported devices, another driver might be able to handle them. But
5441cb33af1SFlorian Fainelli 	 * propagate to the callers any hard errors.
5451cb33af1SFlorian Fainelli 	 *
5461cb33af1SFlorian Fainelli 	 * If the driver does its own bookkeeping of stacked ports, it's not
5471cb33af1SFlorian Fainelli 	 * necessary to go through this helper.
5481cb33af1SFlorian Fainelli 	 */
5491cb33af1SFlorian Fainelli 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
55007c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
55107c6f980SRussell King 			continue;
55207c6f980SRussell King 
5531cb33af1SFlorian Fainelli 		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
5541cb33af1SFlorian Fainelli 						       check_cb, set_cb);
5551cb33af1SFlorian Fainelli 		if (err && err != -EOPNOTSUPP)
5561cb33af1SFlorian Fainelli 			return err;
5571cb33af1SFlorian Fainelli 	}
5581cb33af1SFlorian Fainelli 
5591cb33af1SFlorian Fainelli 	return err;
5601cb33af1SFlorian Fainelli }
5611cb33af1SFlorian Fainelli 
5621cb33af1SFlorian Fainelli int switchdev_handle_port_attr_set(struct net_device *dev,
5631cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
5641cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
5651cb33af1SFlorian Fainelli 			int (*set_cb)(struct net_device *dev,
5661cb33af1SFlorian Fainelli 				      const struct switchdev_attr *attr,
5671cb33af1SFlorian Fainelli 				      struct switchdev_trans *trans))
5681cb33af1SFlorian Fainelli {
5691cb33af1SFlorian Fainelli 	int err;
5701cb33af1SFlorian Fainelli 
5711cb33af1SFlorian Fainelli 	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
5721cb33af1SFlorian Fainelli 					       set_cb);
5731cb33af1SFlorian Fainelli 	if (err == -EOPNOTSUPP)
5741cb33af1SFlorian Fainelli 		err = 0;
5751cb33af1SFlorian Fainelli 	return err;
5761cb33af1SFlorian Fainelli }
5771cb33af1SFlorian Fainelli EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
578