xref: /linux-6.15/net/switchdev/switchdev.c (revision 22c1f67e)
1007f790cSJiri Pirko /*
2007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
3007f790cSJiri Pirko  * Copyright (c) 2014 Jiri Pirko <[email protected]>
4f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
5007f790cSJiri Pirko  *
6007f790cSJiri Pirko  * This program is free software; you can redistribute it and/or modify
7007f790cSJiri Pirko  * it under the terms of the GNU General Public License as published by
8007f790cSJiri Pirko  * the Free Software Foundation; either version 2 of the License, or
9007f790cSJiri Pirko  * (at your option) any later version.
10007f790cSJiri Pirko  */
11007f790cSJiri Pirko 
12007f790cSJiri Pirko #include <linux/kernel.h>
13007f790cSJiri Pirko #include <linux/types.h>
14007f790cSJiri Pirko #include <linux/init.h>
1503bf0c28SJiri Pirko #include <linux/mutex.h>
1603bf0c28SJiri Pirko #include <linux/notifier.h>
17007f790cSJiri Pirko #include <linux/netdevice.h>
1847f8328bSScott Feldman #include <linux/if_bridge.h>
195e8d9049SScott Feldman #include <net/ip_fib.h>
20007f790cSJiri Pirko #include <net/switchdev.h>
21007f790cSJiri Pirko 
22007f790cSJiri Pirko /**
233094333dSScott Feldman  *	switchdev_port_attr_get - Get port attribute
243094333dSScott Feldman  *
253094333dSScott Feldman  *	@dev: port device
263094333dSScott Feldman  *	@attr: attribute to get
273094333dSScott Feldman  */
283094333dSScott Feldman int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
293094333dSScott Feldman {
303094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
313094333dSScott Feldman 	struct net_device *lower_dev;
323094333dSScott Feldman 	struct list_head *iter;
333094333dSScott Feldman 	struct switchdev_attr first = {
343094333dSScott Feldman 		.id = SWITCHDEV_ATTR_UNDEFINED
353094333dSScott Feldman 	};
363094333dSScott Feldman 	int err = -EOPNOTSUPP;
373094333dSScott Feldman 
383094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
393094333dSScott Feldman 		return ops->switchdev_port_attr_get(dev, attr);
403094333dSScott Feldman 
413094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
423094333dSScott Feldman 		return err;
433094333dSScott Feldman 
443094333dSScott Feldman 	/* Switch device port(s) may be stacked under
453094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to get attr on
463094333dSScott Feldman 	 * each port.  Return -ENODATA if attr values don't
473094333dSScott Feldman 	 * compare across ports.
483094333dSScott Feldman 	 */
493094333dSScott Feldman 
503094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
513094333dSScott Feldman 		err = switchdev_port_attr_get(lower_dev, attr);
523094333dSScott Feldman 		if (err)
533094333dSScott Feldman 			break;
543094333dSScott Feldman 		if (first.id == SWITCHDEV_ATTR_UNDEFINED)
553094333dSScott Feldman 			first = *attr;
563094333dSScott Feldman 		else if (memcmp(&first, attr, sizeof(*attr)))
573094333dSScott Feldman 			return -ENODATA;
583094333dSScott Feldman 	}
593094333dSScott Feldman 
603094333dSScott Feldman 	return err;
613094333dSScott Feldman }
623094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
633094333dSScott Feldman 
643094333dSScott Feldman static int __switchdev_port_attr_set(struct net_device *dev,
653094333dSScott Feldman 				     struct switchdev_attr *attr)
663094333dSScott Feldman {
673094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
683094333dSScott Feldman 	struct net_device *lower_dev;
693094333dSScott Feldman 	struct list_head *iter;
703094333dSScott Feldman 	int err = -EOPNOTSUPP;
713094333dSScott Feldman 
723094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_set)
733094333dSScott Feldman 		return ops->switchdev_port_attr_set(dev, attr);
743094333dSScott Feldman 
753094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
763094333dSScott Feldman 		return err;
773094333dSScott Feldman 
783094333dSScott Feldman 	/* Switch device port(s) may be stacked under
793094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to set attr on
803094333dSScott Feldman 	 * each port.
813094333dSScott Feldman 	 */
823094333dSScott Feldman 
833094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
843094333dSScott Feldman 		err = __switchdev_port_attr_set(lower_dev, attr);
853094333dSScott Feldman 		if (err)
863094333dSScott Feldman 			break;
873094333dSScott Feldman 	}
883094333dSScott Feldman 
893094333dSScott Feldman 	return err;
903094333dSScott Feldman }
913094333dSScott Feldman 
923094333dSScott Feldman struct switchdev_attr_set_work {
933094333dSScott Feldman 	struct work_struct work;
943094333dSScott Feldman 	struct net_device *dev;
953094333dSScott Feldman 	struct switchdev_attr attr;
963094333dSScott Feldman };
973094333dSScott Feldman 
983094333dSScott Feldman static void switchdev_port_attr_set_work(struct work_struct *work)
993094333dSScott Feldman {
1003094333dSScott Feldman 	struct switchdev_attr_set_work *asw =
1013094333dSScott Feldman 		container_of(work, struct switchdev_attr_set_work, work);
1023094333dSScott Feldman 	int err;
1033094333dSScott Feldman 
1043094333dSScott Feldman 	rtnl_lock();
1053094333dSScott Feldman 	err = switchdev_port_attr_set(asw->dev, &asw->attr);
1063094333dSScott Feldman 	BUG_ON(err);
1073094333dSScott Feldman 	rtnl_unlock();
1083094333dSScott Feldman 
1093094333dSScott Feldman 	dev_put(asw->dev);
1103094333dSScott Feldman 	kfree(work);
1113094333dSScott Feldman }
1123094333dSScott Feldman 
1133094333dSScott Feldman static int switchdev_port_attr_set_defer(struct net_device *dev,
1143094333dSScott Feldman 					 struct switchdev_attr *attr)
1153094333dSScott Feldman {
1163094333dSScott Feldman 	struct switchdev_attr_set_work *asw;
1173094333dSScott Feldman 
1183094333dSScott Feldman 	asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
1193094333dSScott Feldman 	if (!asw)
1203094333dSScott Feldman 		return -ENOMEM;
1213094333dSScott Feldman 
1223094333dSScott Feldman 	INIT_WORK(&asw->work, switchdev_port_attr_set_work);
1233094333dSScott Feldman 
1243094333dSScott Feldman 	dev_hold(dev);
1253094333dSScott Feldman 	asw->dev = dev;
1263094333dSScott Feldman 	memcpy(&asw->attr, attr, sizeof(asw->attr));
1273094333dSScott Feldman 
1283094333dSScott Feldman 	schedule_work(&asw->work);
1293094333dSScott Feldman 
1303094333dSScott Feldman 	return 0;
1313094333dSScott Feldman }
1323094333dSScott Feldman 
1333094333dSScott Feldman /**
1343094333dSScott Feldman  *	switchdev_port_attr_set - Set port attribute
1353094333dSScott Feldman  *
1363094333dSScott Feldman  *	@dev: port device
1373094333dSScott Feldman  *	@attr: attribute to set
1383094333dSScott Feldman  *
1393094333dSScott Feldman  *	Use a 2-phase prepare-commit transaction model to ensure
1403094333dSScott Feldman  *	system is not left in a partially updated state due to
1413094333dSScott Feldman  *	failure from driver/device.
1423094333dSScott Feldman  */
1433094333dSScott Feldman int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
1443094333dSScott Feldman {
1453094333dSScott Feldman 	int err;
1463094333dSScott Feldman 
1473094333dSScott Feldman 	if (!rtnl_is_locked()) {
1483094333dSScott Feldman 		/* Running prepare-commit transaction across stacked
1493094333dSScott Feldman 		 * devices requires nothing moves, so if rtnl_lock is
1503094333dSScott Feldman 		 * not held, schedule a worker thread to hold rtnl_lock
1513094333dSScott Feldman 		 * while setting attr.
1523094333dSScott Feldman 		 */
1533094333dSScott Feldman 
1543094333dSScott Feldman 		return switchdev_port_attr_set_defer(dev, attr);
1553094333dSScott Feldman 	}
1563094333dSScott Feldman 
1573094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
1583094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
1593094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
1603094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
1613094333dSScott Feldman 	 * but should not commit the attr.
1623094333dSScott Feldman 	 */
1633094333dSScott Feldman 
1643094333dSScott Feldman 	attr->trans = SWITCHDEV_TRANS_PREPARE;
1653094333dSScott Feldman 	err = __switchdev_port_attr_set(dev, attr);
1663094333dSScott Feldman 	if (err) {
1673094333dSScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
1683094333dSScott Feldman 		 * resources reserved in the prepare phase are
1693094333dSScott Feldman 		 * released.
1703094333dSScott Feldman 		 */
1713094333dSScott Feldman 
1723094333dSScott Feldman 		attr->trans = SWITCHDEV_TRANS_ABORT;
1733094333dSScott Feldman 		__switchdev_port_attr_set(dev, attr);
1743094333dSScott Feldman 
1753094333dSScott Feldman 		return err;
1763094333dSScott Feldman 	}
1773094333dSScott Feldman 
1783094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
1793094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
1803094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
1813094333dSScott Feldman 	 */
1823094333dSScott Feldman 
1833094333dSScott Feldman 	attr->trans = SWITCHDEV_TRANS_COMMIT;
1843094333dSScott Feldman 	err = __switchdev_port_attr_set(dev, attr);
1853094333dSScott Feldman 	BUG_ON(err);
1863094333dSScott Feldman 
1873094333dSScott Feldman 	return err;
1883094333dSScott Feldman }
1893094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
1903094333dSScott Feldman 
191*22c1f67eSScott Feldman static int __switchdev_port_obj_add(struct net_device *dev,
192*22c1f67eSScott Feldman 				    struct switchdev_obj *obj)
193491d0f15SScott Feldman {
194491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
195491d0f15SScott Feldman 	struct net_device *lower_dev;
196491d0f15SScott Feldman 	struct list_head *iter;
197491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
198491d0f15SScott Feldman 
199491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_add)
200491d0f15SScott Feldman 		return ops->switchdev_port_obj_add(dev, obj);
201491d0f15SScott Feldman 
202491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
203491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to add object on
204491d0f15SScott Feldman 	 * each port.
205491d0f15SScott Feldman 	 */
206491d0f15SScott Feldman 
207491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
208491d0f15SScott Feldman 		err = __switchdev_port_obj_add(lower_dev, obj);
209491d0f15SScott Feldman 		if (err)
210491d0f15SScott Feldman 			break;
211491d0f15SScott Feldman 	}
212491d0f15SScott Feldman 
213491d0f15SScott Feldman 	return err;
214491d0f15SScott Feldman }
215491d0f15SScott Feldman 
216491d0f15SScott Feldman /**
217491d0f15SScott Feldman  *	switchdev_port_obj_add - Add port object
218491d0f15SScott Feldman  *
219491d0f15SScott Feldman  *	@dev: port device
220491d0f15SScott Feldman  *	@obj: object to add
221491d0f15SScott Feldman  *
222491d0f15SScott Feldman  *	Use a 2-phase prepare-commit transaction model to ensure
223491d0f15SScott Feldman  *	system is not left in a partially updated state due to
224491d0f15SScott Feldman  *	failure from driver/device.
225491d0f15SScott Feldman  *
226491d0f15SScott Feldman  *	rtnl_lock must be held.
227491d0f15SScott Feldman  */
228491d0f15SScott Feldman int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
229491d0f15SScott Feldman {
230491d0f15SScott Feldman 	int err;
231491d0f15SScott Feldman 
232491d0f15SScott Feldman 	ASSERT_RTNL();
233491d0f15SScott Feldman 
234491d0f15SScott Feldman 	/* Phase I: prepare for obj add. Driver/device should fail
235491d0f15SScott Feldman 	 * here if there are going to be issues in the commit phase,
236491d0f15SScott Feldman 	 * such as lack of resources or support.  The driver/device
237491d0f15SScott Feldman 	 * should reserve resources needed for the commit phase here,
238491d0f15SScott Feldman 	 * but should not commit the obj.
239491d0f15SScott Feldman 	 */
240491d0f15SScott Feldman 
241491d0f15SScott Feldman 	obj->trans = SWITCHDEV_TRANS_PREPARE;
242491d0f15SScott Feldman 	err = __switchdev_port_obj_add(dev, obj);
243491d0f15SScott Feldman 	if (err) {
244491d0f15SScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
245491d0f15SScott Feldman 		 * resources reserved in the prepare phase are
246491d0f15SScott Feldman 		 * released.
247491d0f15SScott Feldman 		 */
248491d0f15SScott Feldman 
249491d0f15SScott Feldman 		obj->trans = SWITCHDEV_TRANS_ABORT;
250491d0f15SScott Feldman 		__switchdev_port_obj_add(dev, obj);
251491d0f15SScott Feldman 
252491d0f15SScott Feldman 		return err;
253491d0f15SScott Feldman 	}
254491d0f15SScott Feldman 
255491d0f15SScott Feldman 	/* Phase II: commit obj add.  This cannot fail as a fault
256491d0f15SScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
257491d0f15SScott Feldman 	 * because the driver said everythings was OK in phase I.
258491d0f15SScott Feldman 	 */
259491d0f15SScott Feldman 
260491d0f15SScott Feldman 	obj->trans = SWITCHDEV_TRANS_COMMIT;
261491d0f15SScott Feldman 	err = __switchdev_port_obj_add(dev, obj);
262491d0f15SScott Feldman 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
263491d0f15SScott Feldman 
264491d0f15SScott Feldman 	return err;
265491d0f15SScott Feldman }
266491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
267491d0f15SScott Feldman 
268491d0f15SScott Feldman /**
269491d0f15SScott Feldman  *	switchdev_port_obj_del - Delete port object
270491d0f15SScott Feldman  *
271491d0f15SScott Feldman  *	@dev: port device
272491d0f15SScott Feldman  *	@obj: object to delete
273491d0f15SScott Feldman  */
274491d0f15SScott Feldman int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
275491d0f15SScott Feldman {
276491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
277491d0f15SScott Feldman 	struct net_device *lower_dev;
278491d0f15SScott Feldman 	struct list_head *iter;
279491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
280491d0f15SScott Feldman 
281491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_del)
282491d0f15SScott Feldman 		return ops->switchdev_port_obj_del(dev, obj);
283491d0f15SScott Feldman 
284491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
285491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to delete object on
286491d0f15SScott Feldman 	 * each port.
287491d0f15SScott Feldman 	 */
288491d0f15SScott Feldman 
289491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
290491d0f15SScott Feldman 		err = switchdev_port_obj_del(lower_dev, obj);
291491d0f15SScott Feldman 		if (err)
292491d0f15SScott Feldman 			break;
293491d0f15SScott Feldman 	}
294491d0f15SScott Feldman 
295491d0f15SScott Feldman 	return err;
296491d0f15SScott Feldman }
297491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
298491d0f15SScott Feldman 
299ebb9a03aSJiri Pirko static DEFINE_MUTEX(switchdev_mutex);
300ebb9a03aSJiri Pirko static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
30103bf0c28SJiri Pirko 
30203bf0c28SJiri Pirko /**
303ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
30403bf0c28SJiri Pirko  *	@nb: notifier_block
30503bf0c28SJiri Pirko  *
30603bf0c28SJiri Pirko  *	Register switch device notifier. This should be used by code
30703bf0c28SJiri Pirko  *	which needs to monitor events happening in particular device.
30803bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_register().
30903bf0c28SJiri Pirko  */
310ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
31103bf0c28SJiri Pirko {
31203bf0c28SJiri Pirko 	int err;
31303bf0c28SJiri Pirko 
314ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
315ebb9a03aSJiri Pirko 	err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
316ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
31703bf0c28SJiri Pirko 	return err;
31803bf0c28SJiri Pirko }
319ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
32003bf0c28SJiri Pirko 
32103bf0c28SJiri Pirko /**
322ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
32303bf0c28SJiri Pirko  *	@nb: notifier_block
32403bf0c28SJiri Pirko  *
32503bf0c28SJiri Pirko  *	Unregister switch device notifier.
32603bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_unregister().
32703bf0c28SJiri Pirko  */
328ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
32903bf0c28SJiri Pirko {
33003bf0c28SJiri Pirko 	int err;
33103bf0c28SJiri Pirko 
332ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
333ebb9a03aSJiri Pirko 	err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
334ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
33503bf0c28SJiri Pirko 	return err;
33603bf0c28SJiri Pirko }
337ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
33803bf0c28SJiri Pirko 
33903bf0c28SJiri Pirko /**
340ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
34103bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
34203bf0c28SJiri Pirko  *	@dev: port device
34303bf0c28SJiri Pirko  *	@info: notifier information data
34403bf0c28SJiri Pirko  *
34503bf0c28SJiri Pirko  *	Call all network notifier blocks. This should be called by driver
34603bf0c28SJiri Pirko  *	when it needs to propagate hardware event.
34703bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_call_chain().
34803bf0c28SJiri Pirko  */
349ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
350ebb9a03aSJiri Pirko 			     struct switchdev_notifier_info *info)
35103bf0c28SJiri Pirko {
35203bf0c28SJiri Pirko 	int err;
35303bf0c28SJiri Pirko 
35403bf0c28SJiri Pirko 	info->dev = dev;
355ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
356ebb9a03aSJiri Pirko 	err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
357ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
35803bf0c28SJiri Pirko 	return err;
35903bf0c28SJiri Pirko }
360ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
3618a44dbb2SRoopa Prabhu 
3628793d0a6SScott Feldman /**
3638793d0a6SScott Feldman  *	switchdev_port_bridge_getlink - Get bridge port attributes
3648793d0a6SScott Feldman  *
3658793d0a6SScott Feldman  *	@dev: port device
3668793d0a6SScott Feldman  *
3678793d0a6SScott Feldman  *	Called for SELF on rtnl_bridge_getlink to get bridge port
3688793d0a6SScott Feldman  *	attributes.
3698793d0a6SScott Feldman  */
3708793d0a6SScott Feldman int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3718793d0a6SScott Feldman 				  struct net_device *dev, u32 filter_mask,
3728793d0a6SScott Feldman 				  int nlflags)
3738793d0a6SScott Feldman {
3748793d0a6SScott Feldman 	struct switchdev_attr attr = {
3758793d0a6SScott Feldman 		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
3768793d0a6SScott Feldman 	};
3778793d0a6SScott Feldman 	u16 mode = BRIDGE_MODE_UNDEF;
3788793d0a6SScott Feldman 	u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
3798793d0a6SScott Feldman 	int err;
3808793d0a6SScott Feldman 
3818793d0a6SScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
3828793d0a6SScott Feldman 	if (err)
3838793d0a6SScott Feldman 		return err;
3848793d0a6SScott Feldman 
3858793d0a6SScott Feldman 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
3868793d0a6SScott Feldman 				       attr.brport_flags, mask, nlflags);
3878793d0a6SScott Feldman }
3888793d0a6SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
3898793d0a6SScott Feldman 
39047f8328bSScott Feldman static int switchdev_port_br_setflag(struct net_device *dev,
39147f8328bSScott Feldman 				     struct nlattr *nlattr,
39247f8328bSScott Feldman 				     unsigned long brport_flag)
39347f8328bSScott Feldman {
39447f8328bSScott Feldman 	struct switchdev_attr attr = {
39547f8328bSScott Feldman 		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
39647f8328bSScott Feldman 	};
39747f8328bSScott Feldman 	u8 flag = nla_get_u8(nlattr);
39847f8328bSScott Feldman 	int err;
39947f8328bSScott Feldman 
40047f8328bSScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
40147f8328bSScott Feldman 	if (err)
40247f8328bSScott Feldman 		return err;
40347f8328bSScott Feldman 
40447f8328bSScott Feldman 	if (flag)
40547f8328bSScott Feldman 		attr.brport_flags |= brport_flag;
40647f8328bSScott Feldman 	else
40747f8328bSScott Feldman 		attr.brport_flags &= ~brport_flag;
40847f8328bSScott Feldman 
40947f8328bSScott Feldman 	return switchdev_port_attr_set(dev, &attr);
41047f8328bSScott Feldman }
41147f8328bSScott Feldman 
41247f8328bSScott Feldman static const struct nla_policy
41347f8328bSScott Feldman switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
41447f8328bSScott Feldman 	[IFLA_BRPORT_STATE]		= { .type = NLA_U8 },
41547f8328bSScott Feldman 	[IFLA_BRPORT_COST]		= { .type = NLA_U32 },
41647f8328bSScott Feldman 	[IFLA_BRPORT_PRIORITY]		= { .type = NLA_U16 },
41747f8328bSScott Feldman 	[IFLA_BRPORT_MODE]		= { .type = NLA_U8 },
41847f8328bSScott Feldman 	[IFLA_BRPORT_GUARD]		= { .type = NLA_U8 },
41947f8328bSScott Feldman 	[IFLA_BRPORT_PROTECT]		= { .type = NLA_U8 },
42047f8328bSScott Feldman 	[IFLA_BRPORT_FAST_LEAVE]	= { .type = NLA_U8 },
42147f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING]		= { .type = NLA_U8 },
42247f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING_SYNC]	= { .type = NLA_U8 },
42347f8328bSScott Feldman 	[IFLA_BRPORT_UNICAST_FLOOD]	= { .type = NLA_U8 },
42447f8328bSScott Feldman };
42547f8328bSScott Feldman 
42647f8328bSScott Feldman static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
42747f8328bSScott Feldman 					      struct nlattr *protinfo)
42847f8328bSScott Feldman {
42947f8328bSScott Feldman 	struct nlattr *attr;
43047f8328bSScott Feldman 	int rem;
43147f8328bSScott Feldman 	int err;
43247f8328bSScott Feldman 
43347f8328bSScott Feldman 	err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
43447f8328bSScott Feldman 				  switchdev_port_bridge_policy);
43547f8328bSScott Feldman 	if (err)
43647f8328bSScott Feldman 		return err;
43747f8328bSScott Feldman 
43847f8328bSScott Feldman 	nla_for_each_nested(attr, protinfo, rem) {
43947f8328bSScott Feldman 		switch (nla_type(attr)) {
44047f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING:
44147f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
44247f8328bSScott Feldman 							BR_LEARNING);
44347f8328bSScott Feldman 			break;
44447f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING_SYNC:
44547f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
44647f8328bSScott Feldman 							BR_LEARNING_SYNC);
44747f8328bSScott Feldman 			break;
44847f8328bSScott Feldman 		default:
44947f8328bSScott Feldman 			err = -EOPNOTSUPP;
45047f8328bSScott Feldman 			break;
45147f8328bSScott Feldman 		}
45247f8328bSScott Feldman 		if (err)
45347f8328bSScott Feldman 			return err;
45447f8328bSScott Feldman 	}
45547f8328bSScott Feldman 
45647f8328bSScott Feldman 	return 0;
45747f8328bSScott Feldman }
45847f8328bSScott Feldman 
45947f8328bSScott Feldman static int switchdev_port_br_afspec(struct net_device *dev,
46047f8328bSScott Feldman 				    struct nlattr *afspec,
46147f8328bSScott Feldman 				    int (*f)(struct net_device *dev,
46247f8328bSScott Feldman 					     struct switchdev_obj *obj))
46347f8328bSScott Feldman {
46447f8328bSScott Feldman 	struct nlattr *attr;
46547f8328bSScott Feldman 	struct bridge_vlan_info *vinfo;
46647f8328bSScott Feldman 	struct switchdev_obj obj = {
46747f8328bSScott Feldman 		.id = SWITCHDEV_OBJ_PORT_VLAN,
46847f8328bSScott Feldman 	};
46947f8328bSScott Feldman 	int rem;
47047f8328bSScott Feldman 	int err;
47147f8328bSScott Feldman 
47247f8328bSScott Feldman 	nla_for_each_nested(attr, afspec, rem) {
47347f8328bSScott Feldman 		if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
47447f8328bSScott Feldman 			continue;
47547f8328bSScott Feldman 		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
47647f8328bSScott Feldman 			return -EINVAL;
47747f8328bSScott Feldman 		vinfo = nla_data(attr);
47847f8328bSScott Feldman 		obj.vlan.flags = vinfo->flags;
47947f8328bSScott Feldman 		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
48047f8328bSScott Feldman 			if (obj.vlan.vid_start)
48147f8328bSScott Feldman 				return -EINVAL;
48247f8328bSScott Feldman 			obj.vlan.vid_start = vinfo->vid;
48347f8328bSScott Feldman 		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
48447f8328bSScott Feldman 			if (!obj.vlan.vid_start)
48547f8328bSScott Feldman 				return -EINVAL;
48647f8328bSScott Feldman 			obj.vlan.vid_end = vinfo->vid;
48747f8328bSScott Feldman 			if (obj.vlan.vid_end <= obj.vlan.vid_start)
48847f8328bSScott Feldman 				return -EINVAL;
48947f8328bSScott Feldman 			err = f(dev, &obj);
49047f8328bSScott Feldman 			if (err)
49147f8328bSScott Feldman 				return err;
49247f8328bSScott Feldman 			memset(&obj.vlan, 0, sizeof(obj.vlan));
49347f8328bSScott Feldman 		} else {
49447f8328bSScott Feldman 			if (obj.vlan.vid_start)
49547f8328bSScott Feldman 				return -EINVAL;
49647f8328bSScott Feldman 			obj.vlan.vid_start = vinfo->vid;
49747f8328bSScott Feldman 			obj.vlan.vid_end = vinfo->vid;
49847f8328bSScott Feldman 			err = f(dev, &obj);
49947f8328bSScott Feldman 			if (err)
50047f8328bSScott Feldman 				return err;
50147f8328bSScott Feldman 			memset(&obj.vlan, 0, sizeof(obj.vlan));
50247f8328bSScott Feldman 		}
50347f8328bSScott Feldman 	}
50447f8328bSScott Feldman 
50547f8328bSScott Feldman 	return 0;
50647f8328bSScott Feldman }
50747f8328bSScott Feldman 
5088a44dbb2SRoopa Prabhu /**
50947f8328bSScott Feldman  *	switchdev_port_bridge_setlink - Set bridge port attributes
5108a44dbb2SRoopa Prabhu  *
5118a44dbb2SRoopa Prabhu  *	@dev: port device
51247f8328bSScott Feldman  *	@nlh: netlink header
51347f8328bSScott Feldman  *	@flags: netlink flags
5148a44dbb2SRoopa Prabhu  *
51547f8328bSScott Feldman  *	Called for SELF on rtnl_bridge_setlink to set bridge port
51647f8328bSScott Feldman  *	attributes.
5178a44dbb2SRoopa Prabhu  */
518ebb9a03aSJiri Pirko int switchdev_port_bridge_setlink(struct net_device *dev,
5198a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
5208a44dbb2SRoopa Prabhu {
52147f8328bSScott Feldman 	struct nlattr *protinfo;
52247f8328bSScott Feldman 	struct nlattr *afspec;
52347f8328bSScott Feldman 	int err = 0;
5248a44dbb2SRoopa Prabhu 
52547f8328bSScott Feldman 	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
52647f8328bSScott Feldman 				   IFLA_PROTINFO);
52747f8328bSScott Feldman 	if (protinfo) {
52847f8328bSScott Feldman 		err = switchdev_port_br_setlink_protinfo(dev, protinfo);
52947f8328bSScott Feldman 		if (err)
53047f8328bSScott Feldman 			return err;
53147f8328bSScott Feldman 	}
5328a44dbb2SRoopa Prabhu 
53347f8328bSScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
53447f8328bSScott Feldman 				 IFLA_AF_SPEC);
53547f8328bSScott Feldman 	if (afspec)
53647f8328bSScott Feldman 		err = switchdev_port_br_afspec(dev, afspec,
53747f8328bSScott Feldman 					       switchdev_port_obj_add);
5388a44dbb2SRoopa Prabhu 
53947f8328bSScott Feldman 	return err;
5408a44dbb2SRoopa Prabhu }
541ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
5428a44dbb2SRoopa Prabhu 
5438a44dbb2SRoopa Prabhu /**
5445c34e022SScott Feldman  *	switchdev_port_bridge_dellink - Set bridge port attributes
5458a44dbb2SRoopa Prabhu  *
5468a44dbb2SRoopa Prabhu  *	@dev: port device
5475c34e022SScott Feldman  *	@nlh: netlink header
5485c34e022SScott Feldman  *	@flags: netlink flags
5498a44dbb2SRoopa Prabhu  *
5505c34e022SScott Feldman  *	Called for SELF on rtnl_bridge_dellink to set bridge port
5515c34e022SScott Feldman  *	attributes.
5528a44dbb2SRoopa Prabhu  */
553ebb9a03aSJiri Pirko int switchdev_port_bridge_dellink(struct net_device *dev,
5548a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
5558a44dbb2SRoopa Prabhu {
5565c34e022SScott Feldman 	struct nlattr *afspec;
5578a44dbb2SRoopa Prabhu 
5585c34e022SScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
5595c34e022SScott Feldman 				 IFLA_AF_SPEC);
5605c34e022SScott Feldman 	if (afspec)
5615c34e022SScott Feldman 		return switchdev_port_br_afspec(dev, afspec,
5625c34e022SScott Feldman 						switchdev_port_obj_del);
5635c34e022SScott Feldman 
5648a44dbb2SRoopa Prabhu 	return 0;
5658a44dbb2SRoopa Prabhu }
566ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
5678a44dbb2SRoopa Prabhu 
568ebb9a03aSJiri Pirko static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
569b5d6fbdeSScott Feldman {
5709d47c0a2SJiri Pirko 	const struct switchdev_ops *ops = dev->switchdev_ops;
571b5d6fbdeSScott Feldman 	struct net_device *lower_dev;
572b5d6fbdeSScott Feldman 	struct net_device *port_dev;
573b5d6fbdeSScott Feldman 	struct list_head *iter;
574b5d6fbdeSScott Feldman 
575b5d6fbdeSScott Feldman 	/* Recusively search down until we find a sw port dev.
576f8e20a9fSScott Feldman 	 * (A sw port dev supports switchdev_port_attr_get).
577b5d6fbdeSScott Feldman 	 */
578b5d6fbdeSScott Feldman 
579f8e20a9fSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
580b5d6fbdeSScott Feldman 		return dev;
581b5d6fbdeSScott Feldman 
582b5d6fbdeSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
583ebb9a03aSJiri Pirko 		port_dev = switchdev_get_lowest_dev(lower_dev);
584b5d6fbdeSScott Feldman 		if (port_dev)
585b5d6fbdeSScott Feldman 			return port_dev;
586b5d6fbdeSScott Feldman 	}
587b5d6fbdeSScott Feldman 
588b5d6fbdeSScott Feldman 	return NULL;
589b5d6fbdeSScott Feldman }
590b5d6fbdeSScott Feldman 
591ebb9a03aSJiri Pirko static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
592b5d6fbdeSScott Feldman {
593f8e20a9fSScott Feldman 	struct switchdev_attr attr = {
594f8e20a9fSScott Feldman 		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
595f8e20a9fSScott Feldman 	};
596f8e20a9fSScott Feldman 	struct switchdev_attr prev_attr;
597b5d6fbdeSScott Feldman 	struct net_device *dev = NULL;
598b5d6fbdeSScott Feldman 	int nhsel;
599b5d6fbdeSScott Feldman 
600b5d6fbdeSScott Feldman 	/* For this route, all nexthop devs must be on the same switch. */
601b5d6fbdeSScott Feldman 
602b5d6fbdeSScott Feldman 	for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
603b5d6fbdeSScott Feldman 		const struct fib_nh *nh = &fi->fib_nh[nhsel];
604b5d6fbdeSScott Feldman 
605b5d6fbdeSScott Feldman 		if (!nh->nh_dev)
606b5d6fbdeSScott Feldman 			return NULL;
607b5d6fbdeSScott Feldman 
608ebb9a03aSJiri Pirko 		dev = switchdev_get_lowest_dev(nh->nh_dev);
609b5d6fbdeSScott Feldman 		if (!dev)
610b5d6fbdeSScott Feldman 			return NULL;
611b5d6fbdeSScott Feldman 
612f8e20a9fSScott Feldman 		if (switchdev_port_attr_get(dev, &attr))
613b5d6fbdeSScott Feldman 			return NULL;
614b5d6fbdeSScott Feldman 
615b5d6fbdeSScott Feldman 		if (nhsel > 0) {
616f8e20a9fSScott Feldman 			if (prev_attr.ppid.id_len != attr.ppid.id_len)
617b5d6fbdeSScott Feldman 				return NULL;
618f8e20a9fSScott Feldman 			if (memcmp(prev_attr.ppid.id, attr.ppid.id,
619f8e20a9fSScott Feldman 				   attr.ppid.id_len))
620b5d6fbdeSScott Feldman 				return NULL;
621b5d6fbdeSScott Feldman 		}
622b5d6fbdeSScott Feldman 
623f8e20a9fSScott Feldman 		prev_attr = attr;
624b5d6fbdeSScott Feldman 	}
625b5d6fbdeSScott Feldman 
626b5d6fbdeSScott Feldman 	return dev;
627b5d6fbdeSScott Feldman }
628b5d6fbdeSScott Feldman 
6295e8d9049SScott Feldman /**
630ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_add - Add IPv4 route entry to switch
6315e8d9049SScott Feldman  *
6325e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
6335e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
6345e8d9049SScott Feldman  *	@fi: route FIB info structure
6355e8d9049SScott Feldman  *	@tos: route TOS
6365e8d9049SScott Feldman  *	@type: route type
637f8f21471SScott Feldman  *	@nlflags: netlink flags passed in (NLM_F_*)
6385e8d9049SScott Feldman  *	@tb_id: route table ID
6395e8d9049SScott Feldman  *
6405e8d9049SScott Feldman  *	Add IPv4 route entry to switch device.
6415e8d9049SScott Feldman  */
642ebb9a03aSJiri Pirko int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
643f8f21471SScott Feldman 			   u8 tos, u8 type, u32 nlflags, u32 tb_id)
6445e8d9049SScott Feldman {
64558c2cb16SScott Feldman 	struct switchdev_obj fib_obj = {
64658c2cb16SScott Feldman 		.id = SWITCHDEV_OBJ_IPV4_FIB,
64758c2cb16SScott Feldman 		.ipv4_fib = {
64858c2cb16SScott Feldman 			.dst = htonl(dst),
64958c2cb16SScott Feldman 			.dst_len = dst_len,
65058c2cb16SScott Feldman 			.fi = fi,
65158c2cb16SScott Feldman 			.tos = tos,
65258c2cb16SScott Feldman 			.type = type,
65358c2cb16SScott Feldman 			.nlflags = nlflags,
65458c2cb16SScott Feldman 			.tb_id = tb_id,
65558c2cb16SScott Feldman 		},
65658c2cb16SScott Feldman 	};
657b5d6fbdeSScott Feldman 	struct net_device *dev;
658b5d6fbdeSScott Feldman 	int err = 0;
659b5d6fbdeSScott Feldman 
6608e05fd71SScott Feldman 	/* Don't offload route if using custom ip rules or if
6618e05fd71SScott Feldman 	 * IPv4 FIB offloading has been disabled completely.
6628e05fd71SScott Feldman 	 */
6638e05fd71SScott Feldman 
664e1315db1SScott Feldman #ifdef CONFIG_IP_MULTIPLE_TABLES
665e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_has_custom_rules)
666e1315db1SScott Feldman 		return 0;
667e1315db1SScott Feldman #endif
668e1315db1SScott Feldman 
669e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_offload_disabled)
670104616e7SScott Feldman 		return 0;
671104616e7SScott Feldman 
672ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
673b5d6fbdeSScott Feldman 	if (!dev)
6745e8d9049SScott Feldman 		return 0;
675b5d6fbdeSScott Feldman 
67658c2cb16SScott Feldman 	err = switchdev_port_obj_add(dev, &fib_obj);
677b5d6fbdeSScott Feldman 	if (!err)
678b5d6fbdeSScott Feldman 		fi->fib_flags |= RTNH_F_EXTERNAL;
679b5d6fbdeSScott Feldman 
680b5d6fbdeSScott Feldman 	return err;
6815e8d9049SScott Feldman }
682ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
6835e8d9049SScott Feldman 
6845e8d9049SScott Feldman /**
685ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
6865e8d9049SScott Feldman  *
6875e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
6885e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
6895e8d9049SScott Feldman  *	@fi: route FIB info structure
6905e8d9049SScott Feldman  *	@tos: route TOS
6915e8d9049SScott Feldman  *	@type: route type
6925e8d9049SScott Feldman  *	@tb_id: route table ID
6935e8d9049SScott Feldman  *
6945e8d9049SScott Feldman  *	Delete IPv4 route entry from switch device.
6955e8d9049SScott Feldman  */
696ebb9a03aSJiri Pirko int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
6975e8d9049SScott Feldman 			   u8 tos, u8 type, u32 tb_id)
6985e8d9049SScott Feldman {
69958c2cb16SScott Feldman 	struct switchdev_obj fib_obj = {
70058c2cb16SScott Feldman 		.id = SWITCHDEV_OBJ_IPV4_FIB,
70158c2cb16SScott Feldman 		.ipv4_fib = {
70258c2cb16SScott Feldman 			.dst = htonl(dst),
70358c2cb16SScott Feldman 			.dst_len = dst_len,
70458c2cb16SScott Feldman 			.fi = fi,
70558c2cb16SScott Feldman 			.tos = tos,
70658c2cb16SScott Feldman 			.type = type,
70758c2cb16SScott Feldman 			.nlflags = 0,
70858c2cb16SScott Feldman 			.tb_id = tb_id,
70958c2cb16SScott Feldman 		},
71058c2cb16SScott Feldman 	};
711b5d6fbdeSScott Feldman 	struct net_device *dev;
712b5d6fbdeSScott Feldman 	int err = 0;
713b5d6fbdeSScott Feldman 
714b5d6fbdeSScott Feldman 	if (!(fi->fib_flags & RTNH_F_EXTERNAL))
7155e8d9049SScott Feldman 		return 0;
716b5d6fbdeSScott Feldman 
717ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
718b5d6fbdeSScott Feldman 	if (!dev)
719b5d6fbdeSScott Feldman 		return 0;
720b5d6fbdeSScott Feldman 
72158c2cb16SScott Feldman 	err = switchdev_port_obj_del(dev, &fib_obj);
722b5d6fbdeSScott Feldman 	if (!err)
723b5d6fbdeSScott Feldman 		fi->fib_flags &= ~RTNH_F_EXTERNAL;
724b5d6fbdeSScott Feldman 
725b5d6fbdeSScott Feldman 	return err;
7265e8d9049SScott Feldman }
727ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
7288e05fd71SScott Feldman 
7298e05fd71SScott Feldman /**
730ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
7318e05fd71SScott Feldman  *
7328e05fd71SScott Feldman  *	@fi: route FIB info structure
7338e05fd71SScott Feldman  */
734ebb9a03aSJiri Pirko void switchdev_fib_ipv4_abort(struct fib_info *fi)
7358e05fd71SScott Feldman {
7368e05fd71SScott Feldman 	/* There was a problem installing this route to the offload
7378e05fd71SScott Feldman 	 * device.  For now, until we come up with more refined
7388e05fd71SScott Feldman 	 * policy handling, abruptly end IPv4 fib offloading for
7398e05fd71SScott Feldman 	 * for entire net by flushing offload device(s) of all
7408e05fd71SScott Feldman 	 * IPv4 routes, and mark IPv4 fib offloading broken from
7418e05fd71SScott Feldman 	 * this point forward.
7428e05fd71SScott Feldman 	 */
7438e05fd71SScott Feldman 
7448e05fd71SScott Feldman 	fib_flush_external(fi->fib_net);
7458e05fd71SScott Feldman 	fi->fib_net->ipv4.fib_offload_disabled = true;
7468e05fd71SScott Feldman }
747ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
748