xref: /linux-6.15/net/switchdev/switchdev.c (revision cf6def51)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2007f790cSJiri Pirko /*
3007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
47ea6eb3fSJiri Pirko  * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
5f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
6007f790cSJiri Pirko  */
7007f790cSJiri Pirko 
8007f790cSJiri Pirko #include <linux/kernel.h>
9007f790cSJiri Pirko #include <linux/types.h>
10007f790cSJiri Pirko #include <linux/init.h>
1103bf0c28SJiri Pirko #include <linux/mutex.h>
1203bf0c28SJiri Pirko #include <linux/notifier.h>
13007f790cSJiri Pirko #include <linux/netdevice.h>
14850d0cbcSJiri Pirko #include <linux/etherdevice.h>
1547f8328bSScott Feldman #include <linux/if_bridge.h>
167ea6eb3fSJiri Pirko #include <linux/list.h>
17793f4014SJiri Pirko #include <linux/workqueue.h>
1887aaf2caSNikolay Aleksandrov #include <linux/if_vlan.h>
194f2c6ae5SIdo Schimmel #include <linux/rtnetlink.h>
20007f790cSJiri Pirko #include <net/switchdev.h>
21007f790cSJiri Pirko 
22793f4014SJiri Pirko static LIST_HEAD(deferred);
23793f4014SJiri Pirko static DEFINE_SPINLOCK(deferred_lock);
24793f4014SJiri Pirko 
25793f4014SJiri Pirko typedef void switchdev_deferred_func_t(struct net_device *dev,
26793f4014SJiri Pirko 				       const void *data);
27793f4014SJiri Pirko 
28793f4014SJiri Pirko struct switchdev_deferred_item {
29793f4014SJiri Pirko 	struct list_head list;
30793f4014SJiri Pirko 	struct net_device *dev;
31793f4014SJiri Pirko 	switchdev_deferred_func_t *func;
32fbfc8502SGustavo A. R. Silva 	unsigned long data[];
33793f4014SJiri Pirko };
34793f4014SJiri Pirko 
35793f4014SJiri Pirko static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
36793f4014SJiri Pirko {
37793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
38793f4014SJiri Pirko 
39793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
40793f4014SJiri Pirko 	if (list_empty(&deferred)) {
41793f4014SJiri Pirko 		dfitem = NULL;
42793f4014SJiri Pirko 		goto unlock;
43793f4014SJiri Pirko 	}
44793f4014SJiri Pirko 	dfitem = list_first_entry(&deferred,
45793f4014SJiri Pirko 				  struct switchdev_deferred_item, list);
46793f4014SJiri Pirko 	list_del(&dfitem->list);
47793f4014SJiri Pirko unlock:
48793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
49793f4014SJiri Pirko 	return dfitem;
50793f4014SJiri Pirko }
51793f4014SJiri Pirko 
52793f4014SJiri Pirko /**
53793f4014SJiri Pirko  *	switchdev_deferred_process - Process ops in deferred queue
54793f4014SJiri Pirko  *
55793f4014SJiri Pirko  *	Called to flush the ops currently queued in deferred ops queue.
56793f4014SJiri Pirko  *	rtnl_lock must be held.
57793f4014SJiri Pirko  */
58793f4014SJiri Pirko void switchdev_deferred_process(void)
59793f4014SJiri Pirko {
60793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
61793f4014SJiri Pirko 
62793f4014SJiri Pirko 	ASSERT_RTNL();
63793f4014SJiri Pirko 
64793f4014SJiri Pirko 	while ((dfitem = switchdev_deferred_dequeue())) {
65793f4014SJiri Pirko 		dfitem->func(dfitem->dev, dfitem->data);
66793f4014SJiri Pirko 		dev_put(dfitem->dev);
67793f4014SJiri Pirko 		kfree(dfitem);
68793f4014SJiri Pirko 	}
69793f4014SJiri Pirko }
70793f4014SJiri Pirko EXPORT_SYMBOL_GPL(switchdev_deferred_process);
71793f4014SJiri Pirko 
72793f4014SJiri Pirko static void switchdev_deferred_process_work(struct work_struct *work)
73793f4014SJiri Pirko {
74793f4014SJiri Pirko 	rtnl_lock();
75793f4014SJiri Pirko 	switchdev_deferred_process();
76793f4014SJiri Pirko 	rtnl_unlock();
77793f4014SJiri Pirko }
78793f4014SJiri Pirko 
79793f4014SJiri Pirko static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
80793f4014SJiri Pirko 
81793f4014SJiri Pirko static int switchdev_deferred_enqueue(struct net_device *dev,
82793f4014SJiri Pirko 				      const void *data, size_t data_len,
83793f4014SJiri Pirko 				      switchdev_deferred_func_t *func)
84793f4014SJiri Pirko {
85793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
86793f4014SJiri Pirko 
87793f4014SJiri Pirko 	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
88793f4014SJiri Pirko 	if (!dfitem)
89793f4014SJiri Pirko 		return -ENOMEM;
90793f4014SJiri Pirko 	dfitem->dev = dev;
91793f4014SJiri Pirko 	dfitem->func = func;
92793f4014SJiri Pirko 	memcpy(dfitem->data, data, data_len);
93793f4014SJiri Pirko 	dev_hold(dev);
94793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
95793f4014SJiri Pirko 	list_add_tail(&dfitem->list, &deferred);
96793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
97793f4014SJiri Pirko 	schedule_work(&deferred_process_work);
98793f4014SJiri Pirko 	return 0;
99793f4014SJiri Pirko }
100793f4014SJiri Pirko 
101d45224d6SFlorian Fainelli static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102d45224d6SFlorian Fainelli 				      struct net_device *dev,
103f7fadf30SJiri Pirko 				      const struct switchdev_attr *attr,
1047ea6eb3fSJiri Pirko 				      struct switchdev_trans *trans)
1053094333dSScott Feldman {
106d45224d6SFlorian Fainelli 	int err;
107d45224d6SFlorian Fainelli 	int rc;
1083094333dSScott Feldman 
109d45224d6SFlorian Fainelli 	struct switchdev_notifier_port_attr_info attr_info = {
110d45224d6SFlorian Fainelli 		.attr = attr,
111d45224d6SFlorian Fainelli 		.trans = trans,
112d45224d6SFlorian Fainelli 		.handled = false,
113d45224d6SFlorian Fainelli 	};
1143094333dSScott Feldman 
115d45224d6SFlorian Fainelli 	rc = call_switchdev_blocking_notifiers(nt, dev,
116d45224d6SFlorian Fainelli 					       &attr_info.info, NULL);
117d45224d6SFlorian Fainelli 	err = notifier_to_errno(rc);
118d45224d6SFlorian Fainelli 	if (err) {
119d45224d6SFlorian Fainelli 		WARN_ON(!attr_info.handled);
1203094333dSScott Feldman 		return err;
1213094333dSScott Feldman 	}
1223094333dSScott Feldman 
123d45224d6SFlorian Fainelli 	if (!attr_info.handled)
124d45224d6SFlorian Fainelli 		return -EOPNOTSUPP;
125d45224d6SFlorian Fainelli 
126d45224d6SFlorian Fainelli 	return 0;
127d45224d6SFlorian Fainelli }
128d45224d6SFlorian Fainelli 
1290bc05d58SJiri Pirko static int switchdev_port_attr_set_now(struct net_device *dev,
130f7fadf30SJiri Pirko 				       const struct switchdev_attr *attr)
1313094333dSScott Feldman {
1327ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
1333094333dSScott Feldman 	int err;
1343094333dSScott Feldman 
1353094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
1363094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
1373094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
1383094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
1393094333dSScott Feldman 	 * but should not commit the attr.
1403094333dSScott Feldman 	 */
1413094333dSScott Feldman 
142f623ab7fSJiri Pirko 	trans.ph_prepare = true;
143d45224d6SFlorian Fainelli 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
144d45224d6SFlorian Fainelli 					 &trans);
14591cf8eceSFlorian Fainelli 	if (err)
1463094333dSScott Feldman 		return err;
1473094333dSScott Feldman 
1483094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
1493094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
1503094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
1513094333dSScott Feldman 	 */
1523094333dSScott Feldman 
153f623ab7fSJiri Pirko 	trans.ph_prepare = false;
154d45224d6SFlorian Fainelli 	err = switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
155d45224d6SFlorian Fainelli 					 &trans);
156e9fdaec0SScott Feldman 	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
157e9fdaec0SScott Feldman 	     dev->name, attr->id);
1583094333dSScott Feldman 
1593094333dSScott Feldman 	return err;
1603094333dSScott Feldman }
1610bc05d58SJiri Pirko 
1620bc05d58SJiri Pirko static void switchdev_port_attr_set_deferred(struct net_device *dev,
1630bc05d58SJiri Pirko 					     const void *data)
1640bc05d58SJiri Pirko {
1650bc05d58SJiri Pirko 	const struct switchdev_attr *attr = data;
1660bc05d58SJiri Pirko 	int err;
1670bc05d58SJiri Pirko 
1680bc05d58SJiri Pirko 	err = switchdev_port_attr_set_now(dev, attr);
1690bc05d58SJiri Pirko 	if (err && err != -EOPNOTSUPP)
1700bc05d58SJiri Pirko 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
1710bc05d58SJiri Pirko 			   err, attr->id);
1727ceb2afbSElad Raz 	if (attr->complete)
1737ceb2afbSElad Raz 		attr->complete(dev, err, attr->complete_priv);
1740bc05d58SJiri Pirko }
1750bc05d58SJiri Pirko 
1760bc05d58SJiri Pirko static int switchdev_port_attr_set_defer(struct net_device *dev,
1770bc05d58SJiri Pirko 					 const struct switchdev_attr *attr)
1780bc05d58SJiri Pirko {
1790bc05d58SJiri Pirko 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
1800bc05d58SJiri Pirko 					  switchdev_port_attr_set_deferred);
1810bc05d58SJiri Pirko }
1820bc05d58SJiri Pirko 
1830bc05d58SJiri Pirko /**
1840bc05d58SJiri Pirko  *	switchdev_port_attr_set - Set port attribute
1850bc05d58SJiri Pirko  *
1860bc05d58SJiri Pirko  *	@dev: port device
1870bc05d58SJiri Pirko  *	@attr: attribute to set
1880bc05d58SJiri Pirko  *
1890bc05d58SJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
1900bc05d58SJiri Pirko  *	system is not left in a partially updated state due to
1910bc05d58SJiri Pirko  *	failure from driver/device.
1920bc05d58SJiri Pirko  *
1930bc05d58SJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
1940bc05d58SJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
1950bc05d58SJiri Pirko  */
1960bc05d58SJiri Pirko int switchdev_port_attr_set(struct net_device *dev,
1970bc05d58SJiri Pirko 			    const struct switchdev_attr *attr)
1980bc05d58SJiri Pirko {
1990bc05d58SJiri Pirko 	if (attr->flags & SWITCHDEV_F_DEFER)
2000bc05d58SJiri Pirko 		return switchdev_port_attr_set_defer(dev, attr);
2010bc05d58SJiri Pirko 	ASSERT_RTNL();
2020bc05d58SJiri Pirko 	return switchdev_port_attr_set_now(dev, attr);
2030bc05d58SJiri Pirko }
2043094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
2053094333dSScott Feldman 
206e258d919SScott Feldman static size_t switchdev_obj_size(const struct switchdev_obj *obj)
207e258d919SScott Feldman {
208e258d919SScott Feldman 	switch (obj->id) {
209e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
210e258d919SScott Feldman 		return sizeof(struct switchdev_obj_port_vlan);
2114d41e125SElad Raz 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2124d41e125SElad Raz 		return sizeof(struct switchdev_obj_port_mdb);
21347d5b6dbSAndrew Lunn 	case SWITCHDEV_OBJ_ID_HOST_MDB:
21447d5b6dbSAndrew Lunn 		return sizeof(struct switchdev_obj_port_mdb);
215e258d919SScott Feldman 	default:
216e258d919SScott Feldman 		BUG();
217e258d919SScott Feldman 	}
218e258d919SScott Feldman 	return 0;
219e258d919SScott Feldman }
220e258d919SScott Feldman 
221d17d9f5eSPetr Machata static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
222d17d9f5eSPetr Machata 				     struct net_device *dev,
223648b4a99SJiri Pirko 				     const struct switchdev_obj *obj,
22469b7320eSPetr Machata 				     struct netlink_ext_ack *extack)
225491d0f15SScott Feldman {
226d17d9f5eSPetr Machata 	int rc;
227d17d9f5eSPetr Machata 	int err;
228491d0f15SScott Feldman 
229d17d9f5eSPetr Machata 	struct switchdev_notifier_port_obj_info obj_info = {
230d17d9f5eSPetr Machata 		.obj = obj,
231d17d9f5eSPetr Machata 		.handled = false,
232d17d9f5eSPetr Machata 	};
233491d0f15SScott Feldman 
234479c86dcSPetr Machata 	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
235d17d9f5eSPetr Machata 	err = notifier_to_errno(rc);
236d17d9f5eSPetr Machata 	if (err) {
237d17d9f5eSPetr Machata 		WARN_ON(!obj_info.handled);
238491d0f15SScott Feldman 		return err;
239491d0f15SScott Feldman 	}
240d17d9f5eSPetr Machata 	if (!obj_info.handled)
241d17d9f5eSPetr Machata 		return -EOPNOTSUPP;
242d17d9f5eSPetr Machata 	return 0;
243d17d9f5eSPetr Machata }
244491d0f15SScott Feldman 
2454d429c5dSJiri Pirko static void switchdev_port_obj_add_deferred(struct net_device *dev,
2464d429c5dSJiri Pirko 					    const void *data)
2474d429c5dSJiri Pirko {
2484d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
2494d429c5dSJiri Pirko 	int err;
2504d429c5dSJiri Pirko 
251*cf6def51SVladimir Oltean 	ASSERT_RTNL();
252*cf6def51SVladimir Oltean 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
253*cf6def51SVladimir Oltean 					dev, obj, NULL);
2544d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
2554d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
2564d429c5dSJiri Pirko 			   err, obj->id);
2577ceb2afbSElad Raz 	if (obj->complete)
2587ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
2594d429c5dSJiri Pirko }
2604d429c5dSJiri Pirko 
2614d429c5dSJiri Pirko static int switchdev_port_obj_add_defer(struct net_device *dev,
2624d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
2634d429c5dSJiri Pirko {
264e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
2654d429c5dSJiri Pirko 					  switchdev_port_obj_add_deferred);
2664d429c5dSJiri Pirko }
267491d0f15SScott Feldman 
268491d0f15SScott Feldman /**
2694d429c5dSJiri Pirko  *	switchdev_port_obj_add - Add port object
270491d0f15SScott Feldman  *
271491d0f15SScott Feldman  *	@dev: port device
2724d429c5dSJiri Pirko  *	@obj: object to add
273c8af73f0SAndrew Lunn  *	@extack: netlink extended ack
2744d429c5dSJiri Pirko  *
2754d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
2764d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
277491d0f15SScott Feldman  */
2784d429c5dSJiri Pirko int switchdev_port_obj_add(struct net_device *dev,
27969b7320eSPetr Machata 			   const struct switchdev_obj *obj,
28069b7320eSPetr Machata 			   struct netlink_ext_ack *extack)
2814d429c5dSJiri Pirko {
2824d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
2834d429c5dSJiri Pirko 		return switchdev_port_obj_add_defer(dev, obj);
2844d429c5dSJiri Pirko 	ASSERT_RTNL();
285*cf6def51SVladimir Oltean 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
286*cf6def51SVladimir Oltean 					 dev, obj, extack);
2874d429c5dSJiri Pirko }
2884d429c5dSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
2894d429c5dSJiri Pirko 
2904d429c5dSJiri Pirko static int switchdev_port_obj_del_now(struct net_device *dev,
291648b4a99SJiri Pirko 				      const struct switchdev_obj *obj)
292491d0f15SScott Feldman {
293d17d9f5eSPetr Machata 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
294ffb68fc5SVladimir Oltean 					 dev, obj, NULL);
295491d0f15SScott Feldman }
2964d429c5dSJiri Pirko 
2974d429c5dSJiri Pirko static void switchdev_port_obj_del_deferred(struct net_device *dev,
2984d429c5dSJiri Pirko 					    const void *data)
2994d429c5dSJiri Pirko {
3004d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
3014d429c5dSJiri Pirko 	int err;
3024d429c5dSJiri Pirko 
3034d429c5dSJiri Pirko 	err = switchdev_port_obj_del_now(dev, obj);
3044d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
3054d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
3064d429c5dSJiri Pirko 			   err, obj->id);
3077ceb2afbSElad Raz 	if (obj->complete)
3087ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
3094d429c5dSJiri Pirko }
3104d429c5dSJiri Pirko 
3114d429c5dSJiri Pirko static int switchdev_port_obj_del_defer(struct net_device *dev,
3124d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
3134d429c5dSJiri Pirko {
314e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
3154d429c5dSJiri Pirko 					  switchdev_port_obj_del_deferred);
3164d429c5dSJiri Pirko }
3174d429c5dSJiri Pirko 
3184d429c5dSJiri Pirko /**
3194d429c5dSJiri Pirko  *	switchdev_port_obj_del - Delete port object
3204d429c5dSJiri Pirko  *
3214d429c5dSJiri Pirko  *	@dev: port device
3224d429c5dSJiri Pirko  *	@obj: object to delete
3234d429c5dSJiri Pirko  *
3244d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
3254d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
3264d429c5dSJiri Pirko  */
3274d429c5dSJiri Pirko int switchdev_port_obj_del(struct net_device *dev,
3284d429c5dSJiri Pirko 			   const struct switchdev_obj *obj)
3294d429c5dSJiri Pirko {
3304d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
3314d429c5dSJiri Pirko 		return switchdev_port_obj_del_defer(dev, obj);
3324d429c5dSJiri Pirko 	ASSERT_RTNL();
3334d429c5dSJiri Pirko 	return switchdev_port_obj_del_now(dev, obj);
3344d429c5dSJiri Pirko }
335491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
336491d0f15SScott Feldman 
337ff5cf100SArkadi Sharshevsky static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
338a93e3b17SPetr Machata static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
33903bf0c28SJiri Pirko 
34003bf0c28SJiri Pirko /**
341ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
34203bf0c28SJiri Pirko  *	@nb: notifier_block
34303bf0c28SJiri Pirko  *
344ff5cf100SArkadi Sharshevsky  *	Register switch device notifier.
34503bf0c28SJiri Pirko  */
346ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
34703bf0c28SJiri Pirko {
348ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
34903bf0c28SJiri Pirko }
350ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
35103bf0c28SJiri Pirko 
35203bf0c28SJiri Pirko /**
353ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
35403bf0c28SJiri Pirko  *	@nb: notifier_block
35503bf0c28SJiri Pirko  *
35603bf0c28SJiri Pirko  *	Unregister switch device notifier.
35703bf0c28SJiri Pirko  */
358ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
35903bf0c28SJiri Pirko {
360ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
36103bf0c28SJiri Pirko }
362ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
36303bf0c28SJiri Pirko 
36403bf0c28SJiri Pirko /**
365ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
36603bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
36703bf0c28SJiri Pirko  *	@dev: port device
36803bf0c28SJiri Pirko  *	@info: notifier information data
369ea6754aeSTian Tao  *	@extack: netlink extended ack
370ff5cf100SArkadi Sharshevsky  *	Call all network notifier blocks.
37103bf0c28SJiri Pirko  */
372ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
3736685987cSPetr Machata 			     struct switchdev_notifier_info *info,
3746685987cSPetr Machata 			     struct netlink_ext_ack *extack)
37503bf0c28SJiri Pirko {
37603bf0c28SJiri Pirko 	info->dev = dev;
3776685987cSPetr Machata 	info->extack = extack;
378ff5cf100SArkadi Sharshevsky 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
37903bf0c28SJiri Pirko }
380ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
3818a44dbb2SRoopa Prabhu 
382a93e3b17SPetr Machata int register_switchdev_blocking_notifier(struct notifier_block *nb)
383a93e3b17SPetr Machata {
384a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
385a93e3b17SPetr Machata 
386a93e3b17SPetr Machata 	return blocking_notifier_chain_register(chain, nb);
387a93e3b17SPetr Machata }
388a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
389a93e3b17SPetr Machata 
390a93e3b17SPetr Machata int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
391a93e3b17SPetr Machata {
392a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
393a93e3b17SPetr Machata 
394a93e3b17SPetr Machata 	return blocking_notifier_chain_unregister(chain, nb);
395a93e3b17SPetr Machata }
396a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
397a93e3b17SPetr Machata 
398a93e3b17SPetr Machata int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
399479c86dcSPetr Machata 				      struct switchdev_notifier_info *info,
400479c86dcSPetr Machata 				      struct netlink_ext_ack *extack)
401a93e3b17SPetr Machata {
402a93e3b17SPetr Machata 	info->dev = dev;
403479c86dcSPetr Machata 	info->extack = extack;
404a93e3b17SPetr Machata 	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
405a93e3b17SPetr Machata 					    val, info);
406a93e3b17SPetr Machata }
407a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
408a93e3b17SPetr Machata 
409f30f0601SPetr Machata static int __switchdev_handle_port_obj_add(struct net_device *dev,
410f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
411f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
412f30f0601SPetr Machata 			int (*add_cb)(struct net_device *dev,
413f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
41469213513SPetr Machata 				      struct netlink_ext_ack *extack))
415f30f0601SPetr Machata {
41669213513SPetr Machata 	struct netlink_ext_ack *extack;
417f30f0601SPetr Machata 	struct net_device *lower_dev;
418f30f0601SPetr Machata 	struct list_head *iter;
419f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
420f30f0601SPetr Machata 
42169213513SPetr Machata 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
42269213513SPetr Machata 
423f30f0601SPetr Machata 	if (check_cb(dev)) {
424f30f0601SPetr Machata 		/* This flag is only checked if the return value is success. */
425f30f0601SPetr Machata 		port_obj_info->handled = true;
426ffb68fc5SVladimir Oltean 		return add_cb(dev, port_obj_info->obj, extack);
427f30f0601SPetr Machata 	}
428f30f0601SPetr Machata 
429f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
430f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
431f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
432f30f0601SPetr Machata 	 *
433f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
434f30f0601SPetr Machata 	 * necessary to go through this helper.
435f30f0601SPetr Machata 	 */
436f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
43707c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
43807c6f980SRussell King 			continue;
43907c6f980SRussell King 
440f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
441f30f0601SPetr Machata 						      check_cb, add_cb);
442f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
443f30f0601SPetr Machata 			return err;
444f30f0601SPetr Machata 	}
445f30f0601SPetr Machata 
446f30f0601SPetr Machata 	return err;
447f30f0601SPetr Machata }
448f30f0601SPetr Machata 
449f30f0601SPetr Machata int switchdev_handle_port_obj_add(struct net_device *dev,
450f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
451f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
452f30f0601SPetr Machata 			int (*add_cb)(struct net_device *dev,
453f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
45469213513SPetr Machata 				      struct netlink_ext_ack *extack))
455f30f0601SPetr Machata {
456f30f0601SPetr Machata 	int err;
457f30f0601SPetr Machata 
458f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
459f30f0601SPetr Machata 					      add_cb);
460f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
461f30f0601SPetr Machata 		err = 0;
462f30f0601SPetr Machata 	return err;
463f30f0601SPetr Machata }
464f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
465f30f0601SPetr Machata 
466f30f0601SPetr Machata static int __switchdev_handle_port_obj_del(struct net_device *dev,
467f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
468f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
469f30f0601SPetr Machata 			int (*del_cb)(struct net_device *dev,
470f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
471f30f0601SPetr Machata {
472f30f0601SPetr Machata 	struct net_device *lower_dev;
473f30f0601SPetr Machata 	struct list_head *iter;
474f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
475f30f0601SPetr Machata 
476f30f0601SPetr Machata 	if (check_cb(dev)) {
477f30f0601SPetr Machata 		/* This flag is only checked if the return value is success. */
478f30f0601SPetr Machata 		port_obj_info->handled = true;
479f30f0601SPetr Machata 		return del_cb(dev, port_obj_info->obj);
480f30f0601SPetr Machata 	}
481f30f0601SPetr Machata 
482f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
483f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
484f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
485f30f0601SPetr Machata 	 *
486f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
487f30f0601SPetr Machata 	 * necessary to go through this helper.
488f30f0601SPetr Machata 	 */
489f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
49007c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
49107c6f980SRussell King 			continue;
49207c6f980SRussell King 
493f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
494f30f0601SPetr Machata 						      check_cb, del_cb);
495f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
496f30f0601SPetr Machata 			return err;
497f30f0601SPetr Machata 	}
498f30f0601SPetr Machata 
499f30f0601SPetr Machata 	return err;
500f30f0601SPetr Machata }
501f30f0601SPetr Machata 
502f30f0601SPetr Machata int switchdev_handle_port_obj_del(struct net_device *dev,
503f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
504f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
505f30f0601SPetr Machata 			int (*del_cb)(struct net_device *dev,
506f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
507f30f0601SPetr Machata {
508f30f0601SPetr Machata 	int err;
509f30f0601SPetr Machata 
510f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
511f30f0601SPetr Machata 					      del_cb);
512f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
513f30f0601SPetr Machata 		err = 0;
514f30f0601SPetr Machata 	return err;
515f30f0601SPetr Machata }
516f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
5171cb33af1SFlorian Fainelli 
5181cb33af1SFlorian Fainelli static int __switchdev_handle_port_attr_set(struct net_device *dev,
5191cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
5201cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
5211cb33af1SFlorian Fainelli 			int (*set_cb)(struct net_device *dev,
5221cb33af1SFlorian Fainelli 				      const struct switchdev_attr *attr,
5231cb33af1SFlorian Fainelli 				      struct switchdev_trans *trans))
5241cb33af1SFlorian Fainelli {
5251cb33af1SFlorian Fainelli 	struct net_device *lower_dev;
5261cb33af1SFlorian Fainelli 	struct list_head *iter;
5271cb33af1SFlorian Fainelli 	int err = -EOPNOTSUPP;
5281cb33af1SFlorian Fainelli 
5291cb33af1SFlorian Fainelli 	if (check_cb(dev)) {
5301cb33af1SFlorian Fainelli 		port_attr_info->handled = true;
5311cb33af1SFlorian Fainelli 		return set_cb(dev, port_attr_info->attr,
5321cb33af1SFlorian Fainelli 			      port_attr_info->trans);
5331cb33af1SFlorian Fainelli 	}
5341cb33af1SFlorian Fainelli 
5351cb33af1SFlorian Fainelli 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
5361cb33af1SFlorian Fainelli 	 * unsupported devices, another driver might be able to handle them. But
5371cb33af1SFlorian Fainelli 	 * propagate to the callers any hard errors.
5381cb33af1SFlorian Fainelli 	 *
5391cb33af1SFlorian Fainelli 	 * If the driver does its own bookkeeping of stacked ports, it's not
5401cb33af1SFlorian Fainelli 	 * necessary to go through this helper.
5411cb33af1SFlorian Fainelli 	 */
5421cb33af1SFlorian Fainelli 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
54307c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
54407c6f980SRussell King 			continue;
54507c6f980SRussell King 
5461cb33af1SFlorian Fainelli 		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
5471cb33af1SFlorian Fainelli 						       check_cb, set_cb);
5481cb33af1SFlorian Fainelli 		if (err && err != -EOPNOTSUPP)
5491cb33af1SFlorian Fainelli 			return err;
5501cb33af1SFlorian Fainelli 	}
5511cb33af1SFlorian Fainelli 
5521cb33af1SFlorian Fainelli 	return err;
5531cb33af1SFlorian Fainelli }
5541cb33af1SFlorian Fainelli 
5551cb33af1SFlorian Fainelli int switchdev_handle_port_attr_set(struct net_device *dev,
5561cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
5571cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
5581cb33af1SFlorian Fainelli 			int (*set_cb)(struct net_device *dev,
5591cb33af1SFlorian Fainelli 				      const struct switchdev_attr *attr,
5601cb33af1SFlorian Fainelli 				      struct switchdev_trans *trans))
5611cb33af1SFlorian Fainelli {
5621cb33af1SFlorian Fainelli 	int err;
5631cb33af1SFlorian Fainelli 
5641cb33af1SFlorian Fainelli 	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
5651cb33af1SFlorian Fainelli 					       set_cb);
5661cb33af1SFlorian Fainelli 	if (err == -EOPNOTSUPP)
5671cb33af1SFlorian Fainelli 		err = 0;
5681cb33af1SFlorian Fainelli 	return err;
5691cb33af1SFlorian Fainelli }
5701cb33af1SFlorian Fainelli EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
571