xref: /linux-6.15/net/switchdev/switchdev.c (revision 8ca07176)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2007f790cSJiri Pirko /*
3007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
47ea6eb3fSJiri Pirko  * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
5f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
6007f790cSJiri Pirko  */
7007f790cSJiri Pirko 
8007f790cSJiri Pirko #include <linux/kernel.h>
9007f790cSJiri Pirko #include <linux/types.h>
10007f790cSJiri Pirko #include <linux/init.h>
1103bf0c28SJiri Pirko #include <linux/mutex.h>
1203bf0c28SJiri Pirko #include <linux/notifier.h>
13007f790cSJiri Pirko #include <linux/netdevice.h>
14850d0cbcSJiri Pirko #include <linux/etherdevice.h>
1547f8328bSScott Feldman #include <linux/if_bridge.h>
167ea6eb3fSJiri Pirko #include <linux/list.h>
17793f4014SJiri Pirko #include <linux/workqueue.h>
1887aaf2caSNikolay Aleksandrov #include <linux/if_vlan.h>
194f2c6ae5SIdo Schimmel #include <linux/rtnetlink.h>
20007f790cSJiri Pirko #include <net/switchdev.h>
21007f790cSJiri Pirko 
22793f4014SJiri Pirko static LIST_HEAD(deferred);
23793f4014SJiri Pirko static DEFINE_SPINLOCK(deferred_lock);
24793f4014SJiri Pirko 
25793f4014SJiri Pirko typedef void switchdev_deferred_func_t(struct net_device *dev,
26793f4014SJiri Pirko 				       const void *data);
27793f4014SJiri Pirko 
28793f4014SJiri Pirko struct switchdev_deferred_item {
29793f4014SJiri Pirko 	struct list_head list;
30793f4014SJiri Pirko 	struct net_device *dev;
31793f4014SJiri Pirko 	switchdev_deferred_func_t *func;
32fbfc8502SGustavo A. R. Silva 	unsigned long data[];
33793f4014SJiri Pirko };
34793f4014SJiri Pirko 
35793f4014SJiri Pirko static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
36793f4014SJiri Pirko {
37793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
38793f4014SJiri Pirko 
39793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
40793f4014SJiri Pirko 	if (list_empty(&deferred)) {
41793f4014SJiri Pirko 		dfitem = NULL;
42793f4014SJiri Pirko 		goto unlock;
43793f4014SJiri Pirko 	}
44793f4014SJiri Pirko 	dfitem = list_first_entry(&deferred,
45793f4014SJiri Pirko 				  struct switchdev_deferred_item, list);
46793f4014SJiri Pirko 	list_del(&dfitem->list);
47793f4014SJiri Pirko unlock:
48793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
49793f4014SJiri Pirko 	return dfitem;
50793f4014SJiri Pirko }
51793f4014SJiri Pirko 
52793f4014SJiri Pirko /**
53793f4014SJiri Pirko  *	switchdev_deferred_process - Process ops in deferred queue
54793f4014SJiri Pirko  *
55793f4014SJiri Pirko  *	Called to flush the ops currently queued in deferred ops queue.
56793f4014SJiri Pirko  *	rtnl_lock must be held.
57793f4014SJiri Pirko  */
58793f4014SJiri Pirko void switchdev_deferred_process(void)
59793f4014SJiri Pirko {
60793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
61793f4014SJiri Pirko 
62793f4014SJiri Pirko 	ASSERT_RTNL();
63793f4014SJiri Pirko 
64793f4014SJiri Pirko 	while ((dfitem = switchdev_deferred_dequeue())) {
65793f4014SJiri Pirko 		dfitem->func(dfitem->dev, dfitem->data);
66793f4014SJiri Pirko 		dev_put(dfitem->dev);
67793f4014SJiri Pirko 		kfree(dfitem);
68793f4014SJiri Pirko 	}
69793f4014SJiri Pirko }
70793f4014SJiri Pirko EXPORT_SYMBOL_GPL(switchdev_deferred_process);
71793f4014SJiri Pirko 
72793f4014SJiri Pirko static void switchdev_deferred_process_work(struct work_struct *work)
73793f4014SJiri Pirko {
74793f4014SJiri Pirko 	rtnl_lock();
75793f4014SJiri Pirko 	switchdev_deferred_process();
76793f4014SJiri Pirko 	rtnl_unlock();
77793f4014SJiri Pirko }
78793f4014SJiri Pirko 
79793f4014SJiri Pirko static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
80793f4014SJiri Pirko 
81793f4014SJiri Pirko static int switchdev_deferred_enqueue(struct net_device *dev,
82793f4014SJiri Pirko 				      const void *data, size_t data_len,
83793f4014SJiri Pirko 				      switchdev_deferred_func_t *func)
84793f4014SJiri Pirko {
85793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
86793f4014SJiri Pirko 
87793f4014SJiri Pirko 	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
88793f4014SJiri Pirko 	if (!dfitem)
89793f4014SJiri Pirko 		return -ENOMEM;
90793f4014SJiri Pirko 	dfitem->dev = dev;
91793f4014SJiri Pirko 	dfitem->func = func;
92793f4014SJiri Pirko 	memcpy(dfitem->data, data, data_len);
93793f4014SJiri Pirko 	dev_hold(dev);
94793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
95793f4014SJiri Pirko 	list_add_tail(&dfitem->list, &deferred);
96793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
97793f4014SJiri Pirko 	schedule_work(&deferred_process_work);
98793f4014SJiri Pirko 	return 0;
99793f4014SJiri Pirko }
100793f4014SJiri Pirko 
101d45224d6SFlorian Fainelli static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102d45224d6SFlorian Fainelli 				      struct net_device *dev,
103dcbdf135SVladimir Oltean 				      const struct switchdev_attr *attr,
104dcbdf135SVladimir Oltean 				      struct netlink_ext_ack *extack)
1053094333dSScott Feldman {
106d45224d6SFlorian Fainelli 	int err;
107d45224d6SFlorian Fainelli 	int rc;
1083094333dSScott Feldman 
109d45224d6SFlorian Fainelli 	struct switchdev_notifier_port_attr_info attr_info = {
110d45224d6SFlorian Fainelli 		.attr = attr,
111d45224d6SFlorian Fainelli 		.handled = false,
112d45224d6SFlorian Fainelli 	};
1133094333dSScott Feldman 
114d45224d6SFlorian Fainelli 	rc = call_switchdev_blocking_notifiers(nt, dev,
115dcbdf135SVladimir Oltean 					       &attr_info.info, extack);
116d45224d6SFlorian Fainelli 	err = notifier_to_errno(rc);
117d45224d6SFlorian Fainelli 	if (err) {
118d45224d6SFlorian Fainelli 		WARN_ON(!attr_info.handled);
1193094333dSScott Feldman 		return err;
1203094333dSScott Feldman 	}
1213094333dSScott Feldman 
122d45224d6SFlorian Fainelli 	if (!attr_info.handled)
123d45224d6SFlorian Fainelli 		return -EOPNOTSUPP;
124d45224d6SFlorian Fainelli 
125d45224d6SFlorian Fainelli 	return 0;
126d45224d6SFlorian Fainelli }
127d45224d6SFlorian Fainelli 
1280bc05d58SJiri Pirko static int switchdev_port_attr_set_now(struct net_device *dev,
129dcbdf135SVladimir Oltean 				       const struct switchdev_attr *attr,
130dcbdf135SVladimir Oltean 				       struct netlink_ext_ack *extack)
1313094333dSScott Feldman {
132dcbdf135SVladimir Oltean 	return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
133dcbdf135SVladimir Oltean 					  extack);
1343094333dSScott Feldman }
1350bc05d58SJiri Pirko 
1360bc05d58SJiri Pirko static void switchdev_port_attr_set_deferred(struct net_device *dev,
1370bc05d58SJiri Pirko 					     const void *data)
1380bc05d58SJiri Pirko {
1390bc05d58SJiri Pirko 	const struct switchdev_attr *attr = data;
1400bc05d58SJiri Pirko 	int err;
1410bc05d58SJiri Pirko 
142dcbdf135SVladimir Oltean 	err = switchdev_port_attr_set_now(dev, attr, NULL);
1430bc05d58SJiri Pirko 	if (err && err != -EOPNOTSUPP)
1440bc05d58SJiri Pirko 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
1450bc05d58SJiri Pirko 			   err, attr->id);
1467ceb2afbSElad Raz 	if (attr->complete)
1477ceb2afbSElad Raz 		attr->complete(dev, err, attr->complete_priv);
1480bc05d58SJiri Pirko }
1490bc05d58SJiri Pirko 
1500bc05d58SJiri Pirko static int switchdev_port_attr_set_defer(struct net_device *dev,
1510bc05d58SJiri Pirko 					 const struct switchdev_attr *attr)
1520bc05d58SJiri Pirko {
1530bc05d58SJiri Pirko 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
1540bc05d58SJiri Pirko 					  switchdev_port_attr_set_deferred);
1550bc05d58SJiri Pirko }
1560bc05d58SJiri Pirko 
1570bc05d58SJiri Pirko /**
1580bc05d58SJiri Pirko  *	switchdev_port_attr_set - Set port attribute
1590bc05d58SJiri Pirko  *
1600bc05d58SJiri Pirko  *	@dev: port device
1610bc05d58SJiri Pirko  *	@attr: attribute to set
162dcbdf135SVladimir Oltean  *	@extack: netlink extended ack, for error message propagation
1630bc05d58SJiri Pirko  *
1640bc05d58SJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
1650bc05d58SJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
1660bc05d58SJiri Pirko  */
1670bc05d58SJiri Pirko int switchdev_port_attr_set(struct net_device *dev,
168dcbdf135SVladimir Oltean 			    const struct switchdev_attr *attr,
169dcbdf135SVladimir Oltean 			    struct netlink_ext_ack *extack)
1700bc05d58SJiri Pirko {
1710bc05d58SJiri Pirko 	if (attr->flags & SWITCHDEV_F_DEFER)
1720bc05d58SJiri Pirko 		return switchdev_port_attr_set_defer(dev, attr);
1730bc05d58SJiri Pirko 	ASSERT_RTNL();
174dcbdf135SVladimir Oltean 	return switchdev_port_attr_set_now(dev, attr, extack);
1750bc05d58SJiri Pirko }
1763094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
1773094333dSScott Feldman 
178e258d919SScott Feldman static size_t switchdev_obj_size(const struct switchdev_obj *obj)
179e258d919SScott Feldman {
180e258d919SScott Feldman 	switch (obj->id) {
181e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
182e258d919SScott Feldman 		return sizeof(struct switchdev_obj_port_vlan);
1834d41e125SElad Raz 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1844d41e125SElad Raz 		return sizeof(struct switchdev_obj_port_mdb);
18547d5b6dbSAndrew Lunn 	case SWITCHDEV_OBJ_ID_HOST_MDB:
18647d5b6dbSAndrew Lunn 		return sizeof(struct switchdev_obj_port_mdb);
187e258d919SScott Feldman 	default:
188e258d919SScott Feldman 		BUG();
189e258d919SScott Feldman 	}
190e258d919SScott Feldman 	return 0;
191e258d919SScott Feldman }
192e258d919SScott Feldman 
193d17d9f5eSPetr Machata static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
194d17d9f5eSPetr Machata 				     struct net_device *dev,
195648b4a99SJiri Pirko 				     const struct switchdev_obj *obj,
19669b7320eSPetr Machata 				     struct netlink_ext_ack *extack)
197491d0f15SScott Feldman {
198d17d9f5eSPetr Machata 	int rc;
199d17d9f5eSPetr Machata 	int err;
200491d0f15SScott Feldman 
201d17d9f5eSPetr Machata 	struct switchdev_notifier_port_obj_info obj_info = {
202d17d9f5eSPetr Machata 		.obj = obj,
203d17d9f5eSPetr Machata 		.handled = false,
204d17d9f5eSPetr Machata 	};
205491d0f15SScott Feldman 
206479c86dcSPetr Machata 	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
207d17d9f5eSPetr Machata 	err = notifier_to_errno(rc);
208d17d9f5eSPetr Machata 	if (err) {
209d17d9f5eSPetr Machata 		WARN_ON(!obj_info.handled);
210491d0f15SScott Feldman 		return err;
211491d0f15SScott Feldman 	}
212d17d9f5eSPetr Machata 	if (!obj_info.handled)
213d17d9f5eSPetr Machata 		return -EOPNOTSUPP;
214d17d9f5eSPetr Machata 	return 0;
215d17d9f5eSPetr Machata }
216491d0f15SScott Feldman 
2174d429c5dSJiri Pirko static void switchdev_port_obj_add_deferred(struct net_device *dev,
2184d429c5dSJiri Pirko 					    const void *data)
2194d429c5dSJiri Pirko {
2204d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
2214d429c5dSJiri Pirko 	int err;
2224d429c5dSJiri Pirko 
223cf6def51SVladimir Oltean 	ASSERT_RTNL();
224cf6def51SVladimir Oltean 	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
225cf6def51SVladimir Oltean 					dev, obj, NULL);
2264d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
2274d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
2284d429c5dSJiri Pirko 			   err, obj->id);
2297ceb2afbSElad Raz 	if (obj->complete)
2307ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
2314d429c5dSJiri Pirko }
2324d429c5dSJiri Pirko 
2334d429c5dSJiri Pirko static int switchdev_port_obj_add_defer(struct net_device *dev,
2344d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
2354d429c5dSJiri Pirko {
236e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
2374d429c5dSJiri Pirko 					  switchdev_port_obj_add_deferred);
2384d429c5dSJiri Pirko }
239491d0f15SScott Feldman 
240491d0f15SScott Feldman /**
2414d429c5dSJiri Pirko  *	switchdev_port_obj_add - Add port object
242491d0f15SScott Feldman  *
243491d0f15SScott Feldman  *	@dev: port device
2444d429c5dSJiri Pirko  *	@obj: object to add
245c8af73f0SAndrew Lunn  *	@extack: netlink extended ack
2464d429c5dSJiri Pirko  *
2474d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
2484d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
249491d0f15SScott Feldman  */
2504d429c5dSJiri Pirko int switchdev_port_obj_add(struct net_device *dev,
25169b7320eSPetr Machata 			   const struct switchdev_obj *obj,
25269b7320eSPetr Machata 			   struct netlink_ext_ack *extack)
2534d429c5dSJiri Pirko {
2544d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
2554d429c5dSJiri Pirko 		return switchdev_port_obj_add_defer(dev, obj);
2564d429c5dSJiri Pirko 	ASSERT_RTNL();
257cf6def51SVladimir Oltean 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
258cf6def51SVladimir Oltean 					 dev, obj, extack);
2594d429c5dSJiri Pirko }
2604d429c5dSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
2614d429c5dSJiri Pirko 
2624d429c5dSJiri Pirko static int switchdev_port_obj_del_now(struct net_device *dev,
263648b4a99SJiri Pirko 				      const struct switchdev_obj *obj)
264491d0f15SScott Feldman {
265d17d9f5eSPetr Machata 	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
266ffb68fc5SVladimir Oltean 					 dev, obj, NULL);
267491d0f15SScott Feldman }
2684d429c5dSJiri Pirko 
2694d429c5dSJiri Pirko static void switchdev_port_obj_del_deferred(struct net_device *dev,
2704d429c5dSJiri Pirko 					    const void *data)
2714d429c5dSJiri Pirko {
2724d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
2734d429c5dSJiri Pirko 	int err;
2744d429c5dSJiri Pirko 
2754d429c5dSJiri Pirko 	err = switchdev_port_obj_del_now(dev, obj);
2764d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
2774d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
2784d429c5dSJiri Pirko 			   err, obj->id);
2797ceb2afbSElad Raz 	if (obj->complete)
2807ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
2814d429c5dSJiri Pirko }
2824d429c5dSJiri Pirko 
2834d429c5dSJiri Pirko static int switchdev_port_obj_del_defer(struct net_device *dev,
2844d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
2854d429c5dSJiri Pirko {
286e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
2874d429c5dSJiri Pirko 					  switchdev_port_obj_del_deferred);
2884d429c5dSJiri Pirko }
2894d429c5dSJiri Pirko 
2904d429c5dSJiri Pirko /**
2914d429c5dSJiri Pirko  *	switchdev_port_obj_del - Delete port object
2924d429c5dSJiri Pirko  *
2934d429c5dSJiri Pirko  *	@dev: port device
2944d429c5dSJiri Pirko  *	@obj: object to delete
2954d429c5dSJiri Pirko  *
2964d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
2974d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
2984d429c5dSJiri Pirko  */
2994d429c5dSJiri Pirko int switchdev_port_obj_del(struct net_device *dev,
3004d429c5dSJiri Pirko 			   const struct switchdev_obj *obj)
3014d429c5dSJiri Pirko {
3024d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
3034d429c5dSJiri Pirko 		return switchdev_port_obj_del_defer(dev, obj);
3044d429c5dSJiri Pirko 	ASSERT_RTNL();
3054d429c5dSJiri Pirko 	return switchdev_port_obj_del_now(dev, obj);
3064d429c5dSJiri Pirko }
307491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
308491d0f15SScott Feldman 
309ff5cf100SArkadi Sharshevsky static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
310a93e3b17SPetr Machata static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
31103bf0c28SJiri Pirko 
31203bf0c28SJiri Pirko /**
313ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
31403bf0c28SJiri Pirko  *	@nb: notifier_block
31503bf0c28SJiri Pirko  *
316ff5cf100SArkadi Sharshevsky  *	Register switch device notifier.
31703bf0c28SJiri Pirko  */
318ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
31903bf0c28SJiri Pirko {
320ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
32103bf0c28SJiri Pirko }
322ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
32303bf0c28SJiri Pirko 
32403bf0c28SJiri Pirko /**
325ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
32603bf0c28SJiri Pirko  *	@nb: notifier_block
32703bf0c28SJiri Pirko  *
32803bf0c28SJiri Pirko  *	Unregister switch device notifier.
32903bf0c28SJiri Pirko  */
330ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
33103bf0c28SJiri Pirko {
332ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
33303bf0c28SJiri Pirko }
334ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
33503bf0c28SJiri Pirko 
33603bf0c28SJiri Pirko /**
337ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
33803bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
33903bf0c28SJiri Pirko  *	@dev: port device
34003bf0c28SJiri Pirko  *	@info: notifier information data
341ea6754aeSTian Tao  *	@extack: netlink extended ack
342ff5cf100SArkadi Sharshevsky  *	Call all network notifier blocks.
34303bf0c28SJiri Pirko  */
344ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
3456685987cSPetr Machata 			     struct switchdev_notifier_info *info,
3466685987cSPetr Machata 			     struct netlink_ext_ack *extack)
34703bf0c28SJiri Pirko {
34803bf0c28SJiri Pirko 	info->dev = dev;
3496685987cSPetr Machata 	info->extack = extack;
350ff5cf100SArkadi Sharshevsky 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
35103bf0c28SJiri Pirko }
352ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
3538a44dbb2SRoopa Prabhu 
354a93e3b17SPetr Machata int register_switchdev_blocking_notifier(struct notifier_block *nb)
355a93e3b17SPetr Machata {
356a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
357a93e3b17SPetr Machata 
358a93e3b17SPetr Machata 	return blocking_notifier_chain_register(chain, nb);
359a93e3b17SPetr Machata }
360a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
361a93e3b17SPetr Machata 
362a93e3b17SPetr Machata int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
363a93e3b17SPetr Machata {
364a93e3b17SPetr Machata 	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
365a93e3b17SPetr Machata 
366a93e3b17SPetr Machata 	return blocking_notifier_chain_unregister(chain, nb);
367a93e3b17SPetr Machata }
368a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
369a93e3b17SPetr Machata 
370a93e3b17SPetr Machata int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
371479c86dcSPetr Machata 				      struct switchdev_notifier_info *info,
372479c86dcSPetr Machata 				      struct netlink_ext_ack *extack)
373a93e3b17SPetr Machata {
374a93e3b17SPetr Machata 	info->dev = dev;
375479c86dcSPetr Machata 	info->extack = extack;
376a93e3b17SPetr Machata 	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
377a93e3b17SPetr Machata 					    val, info);
378a93e3b17SPetr Machata }
379a93e3b17SPetr Machata EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
380a93e3b17SPetr Machata 
381*8ca07176SVladimir Oltean static int __switchdev_handle_fdb_add_to_device(struct net_device *dev,
382*8ca07176SVladimir Oltean 		const struct net_device *orig_dev,
383*8ca07176SVladimir Oltean 		const struct switchdev_notifier_fdb_info *fdb_info,
384*8ca07176SVladimir Oltean 		bool (*check_cb)(const struct net_device *dev),
385*8ca07176SVladimir Oltean 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
386*8ca07176SVladimir Oltean 					     const struct net_device *foreign_dev),
387*8ca07176SVladimir Oltean 		int (*add_cb)(struct net_device *dev,
388*8ca07176SVladimir Oltean 			      const struct net_device *orig_dev, const void *ctx,
389*8ca07176SVladimir Oltean 			      const struct switchdev_notifier_fdb_info *fdb_info),
390*8ca07176SVladimir Oltean 		int (*lag_add_cb)(struct net_device *dev,
391*8ca07176SVladimir Oltean 				  const struct net_device *orig_dev, const void *ctx,
392*8ca07176SVladimir Oltean 				  const struct switchdev_notifier_fdb_info *fdb_info))
393*8ca07176SVladimir Oltean {
394*8ca07176SVladimir Oltean 	const struct switchdev_notifier_info *info = &fdb_info->info;
395*8ca07176SVladimir Oltean 	struct net_device *lower_dev;
396*8ca07176SVladimir Oltean 	struct list_head *iter;
397*8ca07176SVladimir Oltean 	int err = -EOPNOTSUPP;
398*8ca07176SVladimir Oltean 
399*8ca07176SVladimir Oltean 	if (check_cb(dev)) {
400*8ca07176SVladimir Oltean 		/* Handle FDB entries on foreign interfaces as FDB entries
401*8ca07176SVladimir Oltean 		 * towards the software bridge.
402*8ca07176SVladimir Oltean 		 */
403*8ca07176SVladimir Oltean 		if (foreign_dev_check_cb && foreign_dev_check_cb(dev, orig_dev)) {
404*8ca07176SVladimir Oltean 			struct net_device *br = netdev_master_upper_dev_get_rcu(dev);
405*8ca07176SVladimir Oltean 
406*8ca07176SVladimir Oltean 			if (!br || !netif_is_bridge_master(br))
407*8ca07176SVladimir Oltean 				return 0;
408*8ca07176SVladimir Oltean 
409*8ca07176SVladimir Oltean 			/* No point in handling FDB entries on a foreign bridge */
410*8ca07176SVladimir Oltean 			if (foreign_dev_check_cb(dev, br))
411*8ca07176SVladimir Oltean 				return 0;
412*8ca07176SVladimir Oltean 
413*8ca07176SVladimir Oltean 			return __switchdev_handle_fdb_add_to_device(br, orig_dev,
414*8ca07176SVladimir Oltean 								    fdb_info, check_cb,
415*8ca07176SVladimir Oltean 								    foreign_dev_check_cb,
416*8ca07176SVladimir Oltean 								    add_cb, lag_add_cb);
417*8ca07176SVladimir Oltean 		}
418*8ca07176SVladimir Oltean 
419*8ca07176SVladimir Oltean 		return add_cb(dev, orig_dev, info->ctx, fdb_info);
420*8ca07176SVladimir Oltean 	}
421*8ca07176SVladimir Oltean 
422*8ca07176SVladimir Oltean 	/* If we passed over the foreign check, it means that the LAG interface
423*8ca07176SVladimir Oltean 	 * is offloaded.
424*8ca07176SVladimir Oltean 	 */
425*8ca07176SVladimir Oltean 	if (netif_is_lag_master(dev)) {
426*8ca07176SVladimir Oltean 		if (!lag_add_cb)
427*8ca07176SVladimir Oltean 			return -EOPNOTSUPP;
428*8ca07176SVladimir Oltean 
429*8ca07176SVladimir Oltean 		return lag_add_cb(dev, orig_dev, info->ctx, fdb_info);
430*8ca07176SVladimir Oltean 	}
431*8ca07176SVladimir Oltean 
432*8ca07176SVladimir Oltean 	/* Recurse through lower interfaces in case the FDB entry is pointing
433*8ca07176SVladimir Oltean 	 * towards a bridge device.
434*8ca07176SVladimir Oltean 	 */
435*8ca07176SVladimir Oltean 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
436*8ca07176SVladimir Oltean 		/* Do not propagate FDB entries across bridges */
437*8ca07176SVladimir Oltean 		if (netif_is_bridge_master(lower_dev))
438*8ca07176SVladimir Oltean 			continue;
439*8ca07176SVladimir Oltean 
440*8ca07176SVladimir Oltean 		err = __switchdev_handle_fdb_add_to_device(lower_dev, orig_dev,
441*8ca07176SVladimir Oltean 							   fdb_info, check_cb,
442*8ca07176SVladimir Oltean 							   foreign_dev_check_cb,
443*8ca07176SVladimir Oltean 							   add_cb, lag_add_cb);
444*8ca07176SVladimir Oltean 		if (err && err != -EOPNOTSUPP)
445*8ca07176SVladimir Oltean 			return err;
446*8ca07176SVladimir Oltean 	}
447*8ca07176SVladimir Oltean 
448*8ca07176SVladimir Oltean 	return err;
449*8ca07176SVladimir Oltean }
450*8ca07176SVladimir Oltean 
451*8ca07176SVladimir Oltean int switchdev_handle_fdb_add_to_device(struct net_device *dev,
452*8ca07176SVladimir Oltean 		const struct switchdev_notifier_fdb_info *fdb_info,
453*8ca07176SVladimir Oltean 		bool (*check_cb)(const struct net_device *dev),
454*8ca07176SVladimir Oltean 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
455*8ca07176SVladimir Oltean 					     const struct net_device *foreign_dev),
456*8ca07176SVladimir Oltean 		int (*add_cb)(struct net_device *dev,
457*8ca07176SVladimir Oltean 			      const struct net_device *orig_dev, const void *ctx,
458*8ca07176SVladimir Oltean 			      const struct switchdev_notifier_fdb_info *fdb_info),
459*8ca07176SVladimir Oltean 		int (*lag_add_cb)(struct net_device *dev,
460*8ca07176SVladimir Oltean 				  const struct net_device *orig_dev, const void *ctx,
461*8ca07176SVladimir Oltean 				  const struct switchdev_notifier_fdb_info *fdb_info))
462*8ca07176SVladimir Oltean {
463*8ca07176SVladimir Oltean 	int err;
464*8ca07176SVladimir Oltean 
465*8ca07176SVladimir Oltean 	err = __switchdev_handle_fdb_add_to_device(dev, dev, fdb_info,
466*8ca07176SVladimir Oltean 						   check_cb,
467*8ca07176SVladimir Oltean 						   foreign_dev_check_cb,
468*8ca07176SVladimir Oltean 						   add_cb, lag_add_cb);
469*8ca07176SVladimir Oltean 	if (err == -EOPNOTSUPP)
470*8ca07176SVladimir Oltean 		err = 0;
471*8ca07176SVladimir Oltean 
472*8ca07176SVladimir Oltean 	return err;
473*8ca07176SVladimir Oltean }
474*8ca07176SVladimir Oltean EXPORT_SYMBOL_GPL(switchdev_handle_fdb_add_to_device);
475*8ca07176SVladimir Oltean 
476*8ca07176SVladimir Oltean static int __switchdev_handle_fdb_del_to_device(struct net_device *dev,
477*8ca07176SVladimir Oltean 		const struct net_device *orig_dev,
478*8ca07176SVladimir Oltean 		const struct switchdev_notifier_fdb_info *fdb_info,
479*8ca07176SVladimir Oltean 		bool (*check_cb)(const struct net_device *dev),
480*8ca07176SVladimir Oltean 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
481*8ca07176SVladimir Oltean 					     const struct net_device *foreign_dev),
482*8ca07176SVladimir Oltean 		int (*del_cb)(struct net_device *dev,
483*8ca07176SVladimir Oltean 			      const struct net_device *orig_dev, const void *ctx,
484*8ca07176SVladimir Oltean 			      const struct switchdev_notifier_fdb_info *fdb_info),
485*8ca07176SVladimir Oltean 		int (*lag_del_cb)(struct net_device *dev,
486*8ca07176SVladimir Oltean 				  const struct net_device *orig_dev, const void *ctx,
487*8ca07176SVladimir Oltean 				  const struct switchdev_notifier_fdb_info *fdb_info))
488*8ca07176SVladimir Oltean {
489*8ca07176SVladimir Oltean 	const struct switchdev_notifier_info *info = &fdb_info->info;
490*8ca07176SVladimir Oltean 	struct net_device *lower_dev;
491*8ca07176SVladimir Oltean 	struct list_head *iter;
492*8ca07176SVladimir Oltean 	int err = -EOPNOTSUPP;
493*8ca07176SVladimir Oltean 
494*8ca07176SVladimir Oltean 	if (check_cb(dev)) {
495*8ca07176SVladimir Oltean 		/* Handle FDB entries on foreign interfaces as FDB entries
496*8ca07176SVladimir Oltean 		 * towards the software bridge.
497*8ca07176SVladimir Oltean 		 */
498*8ca07176SVladimir Oltean 		if (foreign_dev_check_cb && foreign_dev_check_cb(dev, orig_dev)) {
499*8ca07176SVladimir Oltean 			struct net_device *br = netdev_master_upper_dev_get_rcu(dev);
500*8ca07176SVladimir Oltean 
501*8ca07176SVladimir Oltean 			if (!br || !netif_is_bridge_master(br))
502*8ca07176SVladimir Oltean 				return 0;
503*8ca07176SVladimir Oltean 
504*8ca07176SVladimir Oltean 			/* No point in handling FDB entries on a foreign bridge */
505*8ca07176SVladimir Oltean 			if (foreign_dev_check_cb(dev, br))
506*8ca07176SVladimir Oltean 				return 0;
507*8ca07176SVladimir Oltean 
508*8ca07176SVladimir Oltean 			return __switchdev_handle_fdb_del_to_device(br, orig_dev,
509*8ca07176SVladimir Oltean 								    fdb_info, check_cb,
510*8ca07176SVladimir Oltean 								    foreign_dev_check_cb,
511*8ca07176SVladimir Oltean 								    del_cb, lag_del_cb);
512*8ca07176SVladimir Oltean 		}
513*8ca07176SVladimir Oltean 
514*8ca07176SVladimir Oltean 		return del_cb(dev, orig_dev, info->ctx, fdb_info);
515*8ca07176SVladimir Oltean 	}
516*8ca07176SVladimir Oltean 
517*8ca07176SVladimir Oltean 	/* If we passed over the foreign check, it means that the LAG interface
518*8ca07176SVladimir Oltean 	 * is offloaded.
519*8ca07176SVladimir Oltean 	 */
520*8ca07176SVladimir Oltean 	if (netif_is_lag_master(dev)) {
521*8ca07176SVladimir Oltean 		if (!lag_del_cb)
522*8ca07176SVladimir Oltean 			return -EOPNOTSUPP;
523*8ca07176SVladimir Oltean 
524*8ca07176SVladimir Oltean 		return lag_del_cb(dev, orig_dev, info->ctx, fdb_info);
525*8ca07176SVladimir Oltean 	}
526*8ca07176SVladimir Oltean 
527*8ca07176SVladimir Oltean 	/* Recurse through lower interfaces in case the FDB entry is pointing
528*8ca07176SVladimir Oltean 	 * towards a bridge device.
529*8ca07176SVladimir Oltean 	 */
530*8ca07176SVladimir Oltean 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
531*8ca07176SVladimir Oltean 		/* Do not propagate FDB entries across bridges */
532*8ca07176SVladimir Oltean 		if (netif_is_bridge_master(lower_dev))
533*8ca07176SVladimir Oltean 			continue;
534*8ca07176SVladimir Oltean 
535*8ca07176SVladimir Oltean 		err = switchdev_handle_fdb_del_to_device(lower_dev, fdb_info,
536*8ca07176SVladimir Oltean 							 check_cb,
537*8ca07176SVladimir Oltean 							 foreign_dev_check_cb,
538*8ca07176SVladimir Oltean 							 del_cb, lag_del_cb);
539*8ca07176SVladimir Oltean 		if (err && err != -EOPNOTSUPP)
540*8ca07176SVladimir Oltean 			return err;
541*8ca07176SVladimir Oltean 	}
542*8ca07176SVladimir Oltean 
543*8ca07176SVladimir Oltean 	return err;
544*8ca07176SVladimir Oltean }
545*8ca07176SVladimir Oltean 
546*8ca07176SVladimir Oltean int switchdev_handle_fdb_del_to_device(struct net_device *dev,
547*8ca07176SVladimir Oltean 		const struct switchdev_notifier_fdb_info *fdb_info,
548*8ca07176SVladimir Oltean 		bool (*check_cb)(const struct net_device *dev),
549*8ca07176SVladimir Oltean 		bool (*foreign_dev_check_cb)(const struct net_device *dev,
550*8ca07176SVladimir Oltean 					     const struct net_device *foreign_dev),
551*8ca07176SVladimir Oltean 		int (*del_cb)(struct net_device *dev,
552*8ca07176SVladimir Oltean 			      const struct net_device *orig_dev, const void *ctx,
553*8ca07176SVladimir Oltean 			      const struct switchdev_notifier_fdb_info *fdb_info),
554*8ca07176SVladimir Oltean 		int (*lag_del_cb)(struct net_device *dev,
555*8ca07176SVladimir Oltean 				  const struct net_device *orig_dev, const void *ctx,
556*8ca07176SVladimir Oltean 				  const struct switchdev_notifier_fdb_info *fdb_info))
557*8ca07176SVladimir Oltean {
558*8ca07176SVladimir Oltean 	int err;
559*8ca07176SVladimir Oltean 
560*8ca07176SVladimir Oltean 	err = __switchdev_handle_fdb_del_to_device(dev, dev, fdb_info,
561*8ca07176SVladimir Oltean 						   check_cb,
562*8ca07176SVladimir Oltean 						   foreign_dev_check_cb,
563*8ca07176SVladimir Oltean 						   del_cb, lag_del_cb);
564*8ca07176SVladimir Oltean 	if (err == -EOPNOTSUPP)
565*8ca07176SVladimir Oltean 		err = 0;
566*8ca07176SVladimir Oltean 
567*8ca07176SVladimir Oltean 	return err;
568*8ca07176SVladimir Oltean }
569*8ca07176SVladimir Oltean EXPORT_SYMBOL_GPL(switchdev_handle_fdb_del_to_device);
570*8ca07176SVladimir Oltean 
571f30f0601SPetr Machata static int __switchdev_handle_port_obj_add(struct net_device *dev,
572f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
573f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
57469bfac96SVladimir Oltean 			int (*add_cb)(struct net_device *dev, const void *ctx,
575f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
57669213513SPetr Machata 				      struct netlink_ext_ack *extack))
577f30f0601SPetr Machata {
57869bfac96SVladimir Oltean 	struct switchdev_notifier_info *info = &port_obj_info->info;
57969213513SPetr Machata 	struct netlink_ext_ack *extack;
580f30f0601SPetr Machata 	struct net_device *lower_dev;
581f30f0601SPetr Machata 	struct list_head *iter;
582f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
583f30f0601SPetr Machata 
58469bfac96SVladimir Oltean 	extack = switchdev_notifier_info_to_extack(info);
58569213513SPetr Machata 
586f30f0601SPetr Machata 	if (check_cb(dev)) {
58769bfac96SVladimir Oltean 		err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
58820776b46SRasmus Villemoes 		if (err != -EOPNOTSUPP)
589f30f0601SPetr Machata 			port_obj_info->handled = true;
59020776b46SRasmus Villemoes 		return err;
591f30f0601SPetr Machata 	}
592f30f0601SPetr Machata 
593f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
594f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
595f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
596f30f0601SPetr Machata 	 *
597f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
598f30f0601SPetr Machata 	 * necessary to go through this helper.
599f30f0601SPetr Machata 	 */
600f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
60107c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
60207c6f980SRussell King 			continue;
60307c6f980SRussell King 
604f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
605f30f0601SPetr Machata 						      check_cb, add_cb);
606f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
607f30f0601SPetr Machata 			return err;
608f30f0601SPetr Machata 	}
609f30f0601SPetr Machata 
610f30f0601SPetr Machata 	return err;
611f30f0601SPetr Machata }
612f30f0601SPetr Machata 
613f30f0601SPetr Machata int switchdev_handle_port_obj_add(struct net_device *dev,
614f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
615f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
61669bfac96SVladimir Oltean 			int (*add_cb)(struct net_device *dev, const void *ctx,
617f30f0601SPetr Machata 				      const struct switchdev_obj *obj,
61869213513SPetr Machata 				      struct netlink_ext_ack *extack))
619f30f0601SPetr Machata {
620f30f0601SPetr Machata 	int err;
621f30f0601SPetr Machata 
622f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
623f30f0601SPetr Machata 					      add_cb);
624f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
625f30f0601SPetr Machata 		err = 0;
626f30f0601SPetr Machata 	return err;
627f30f0601SPetr Machata }
628f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
629f30f0601SPetr Machata 
630f30f0601SPetr Machata static int __switchdev_handle_port_obj_del(struct net_device *dev,
631f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
632f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
63369bfac96SVladimir Oltean 			int (*del_cb)(struct net_device *dev, const void *ctx,
634f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
635f30f0601SPetr Machata {
63669bfac96SVladimir Oltean 	struct switchdev_notifier_info *info = &port_obj_info->info;
637f30f0601SPetr Machata 	struct net_device *lower_dev;
638f30f0601SPetr Machata 	struct list_head *iter;
639f30f0601SPetr Machata 	int err = -EOPNOTSUPP;
640f30f0601SPetr Machata 
641f30f0601SPetr Machata 	if (check_cb(dev)) {
64269bfac96SVladimir Oltean 		err = del_cb(dev, info->ctx, port_obj_info->obj);
64320776b46SRasmus Villemoes 		if (err != -EOPNOTSUPP)
644f30f0601SPetr Machata 			port_obj_info->handled = true;
64520776b46SRasmus Villemoes 		return err;
646f30f0601SPetr Machata 	}
647f30f0601SPetr Machata 
648f30f0601SPetr Machata 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
649f30f0601SPetr Machata 	 * unsupported devices, another driver might be able to handle them. But
650f30f0601SPetr Machata 	 * propagate to the callers any hard errors.
651f30f0601SPetr Machata 	 *
652f30f0601SPetr Machata 	 * If the driver does its own bookkeeping of stacked ports, it's not
653f30f0601SPetr Machata 	 * necessary to go through this helper.
654f30f0601SPetr Machata 	 */
655f30f0601SPetr Machata 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
65607c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
65707c6f980SRussell King 			continue;
65807c6f980SRussell King 
659f30f0601SPetr Machata 		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
660f30f0601SPetr Machata 						      check_cb, del_cb);
661f30f0601SPetr Machata 		if (err && err != -EOPNOTSUPP)
662f30f0601SPetr Machata 			return err;
663f30f0601SPetr Machata 	}
664f30f0601SPetr Machata 
665f30f0601SPetr Machata 	return err;
666f30f0601SPetr Machata }
667f30f0601SPetr Machata 
668f30f0601SPetr Machata int switchdev_handle_port_obj_del(struct net_device *dev,
669f30f0601SPetr Machata 			struct switchdev_notifier_port_obj_info *port_obj_info,
670f30f0601SPetr Machata 			bool (*check_cb)(const struct net_device *dev),
67169bfac96SVladimir Oltean 			int (*del_cb)(struct net_device *dev, const void *ctx,
672f30f0601SPetr Machata 				      const struct switchdev_obj *obj))
673f30f0601SPetr Machata {
674f30f0601SPetr Machata 	int err;
675f30f0601SPetr Machata 
676f30f0601SPetr Machata 	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
677f30f0601SPetr Machata 					      del_cb);
678f30f0601SPetr Machata 	if (err == -EOPNOTSUPP)
679f30f0601SPetr Machata 		err = 0;
680f30f0601SPetr Machata 	return err;
681f30f0601SPetr Machata }
682f30f0601SPetr Machata EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
6831cb33af1SFlorian Fainelli 
6841cb33af1SFlorian Fainelli static int __switchdev_handle_port_attr_set(struct net_device *dev,
6851cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
6861cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
68769bfac96SVladimir Oltean 			int (*set_cb)(struct net_device *dev, const void *ctx,
6884c08c586SVladimir Oltean 				      const struct switchdev_attr *attr,
6894c08c586SVladimir Oltean 				      struct netlink_ext_ack *extack))
6901cb33af1SFlorian Fainelli {
69169bfac96SVladimir Oltean 	struct switchdev_notifier_info *info = &port_attr_info->info;
6924c08c586SVladimir Oltean 	struct netlink_ext_ack *extack;
6931cb33af1SFlorian Fainelli 	struct net_device *lower_dev;
6941cb33af1SFlorian Fainelli 	struct list_head *iter;
6951cb33af1SFlorian Fainelli 	int err = -EOPNOTSUPP;
6961cb33af1SFlorian Fainelli 
69769bfac96SVladimir Oltean 	extack = switchdev_notifier_info_to_extack(info);
6984c08c586SVladimir Oltean 
6991cb33af1SFlorian Fainelli 	if (check_cb(dev)) {
70069bfac96SVladimir Oltean 		err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
70120776b46SRasmus Villemoes 		if (err != -EOPNOTSUPP)
7021cb33af1SFlorian Fainelli 			port_attr_info->handled = true;
70320776b46SRasmus Villemoes 		return err;
7041cb33af1SFlorian Fainelli 	}
7051cb33af1SFlorian Fainelli 
7061cb33af1SFlorian Fainelli 	/* Switch ports might be stacked under e.g. a LAG. Ignore the
7071cb33af1SFlorian Fainelli 	 * unsupported devices, another driver might be able to handle them. But
7081cb33af1SFlorian Fainelli 	 * propagate to the callers any hard errors.
7091cb33af1SFlorian Fainelli 	 *
7101cb33af1SFlorian Fainelli 	 * If the driver does its own bookkeeping of stacked ports, it's not
7111cb33af1SFlorian Fainelli 	 * necessary to go through this helper.
7121cb33af1SFlorian Fainelli 	 */
7131cb33af1SFlorian Fainelli 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
71407c6f980SRussell King 		if (netif_is_bridge_master(lower_dev))
71507c6f980SRussell King 			continue;
71607c6f980SRussell King 
7171cb33af1SFlorian Fainelli 		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
7181cb33af1SFlorian Fainelli 						       check_cb, set_cb);
7191cb33af1SFlorian Fainelli 		if (err && err != -EOPNOTSUPP)
7201cb33af1SFlorian Fainelli 			return err;
7211cb33af1SFlorian Fainelli 	}
7221cb33af1SFlorian Fainelli 
7231cb33af1SFlorian Fainelli 	return err;
7241cb33af1SFlorian Fainelli }
7251cb33af1SFlorian Fainelli 
7261cb33af1SFlorian Fainelli int switchdev_handle_port_attr_set(struct net_device *dev,
7271cb33af1SFlorian Fainelli 			struct switchdev_notifier_port_attr_info *port_attr_info,
7281cb33af1SFlorian Fainelli 			bool (*check_cb)(const struct net_device *dev),
72969bfac96SVladimir Oltean 			int (*set_cb)(struct net_device *dev, const void *ctx,
7304c08c586SVladimir Oltean 				      const struct switchdev_attr *attr,
7314c08c586SVladimir Oltean 				      struct netlink_ext_ack *extack))
7321cb33af1SFlorian Fainelli {
7331cb33af1SFlorian Fainelli 	int err;
7341cb33af1SFlorian Fainelli 
7351cb33af1SFlorian Fainelli 	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
7361cb33af1SFlorian Fainelli 					       set_cb);
7371cb33af1SFlorian Fainelli 	if (err == -EOPNOTSUPP)
7381cb33af1SFlorian Fainelli 		err = 0;
7391cb33af1SFlorian Fainelli 	return err;
7401cb33af1SFlorian Fainelli }
7411cb33af1SFlorian Fainelli EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
742