xref: /linux-6.15/net/switchdev/switchdev.c (revision 58c2cb16)
1007f790cSJiri Pirko /*
2007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
3007f790cSJiri Pirko  * Copyright (c) 2014 Jiri Pirko <[email protected]>
4f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
5007f790cSJiri Pirko  *
6007f790cSJiri Pirko  * This program is free software; you can redistribute it and/or modify
7007f790cSJiri Pirko  * it under the terms of the GNU General Public License as published by
8007f790cSJiri Pirko  * the Free Software Foundation; either version 2 of the License, or
9007f790cSJiri Pirko  * (at your option) any later version.
10007f790cSJiri Pirko  */
11007f790cSJiri Pirko 
12007f790cSJiri Pirko #include <linux/kernel.h>
13007f790cSJiri Pirko #include <linux/types.h>
14007f790cSJiri Pirko #include <linux/init.h>
1503bf0c28SJiri Pirko #include <linux/mutex.h>
1603bf0c28SJiri Pirko #include <linux/notifier.h>
17007f790cSJiri Pirko #include <linux/netdevice.h>
1847f8328bSScott Feldman #include <linux/if_bridge.h>
195e8d9049SScott Feldman #include <net/ip_fib.h>
20007f790cSJiri Pirko #include <net/switchdev.h>
21007f790cSJiri Pirko 
22007f790cSJiri Pirko /**
233094333dSScott Feldman  *	switchdev_port_attr_get - Get port attribute
243094333dSScott Feldman  *
253094333dSScott Feldman  *	@dev: port device
263094333dSScott Feldman  *	@attr: attribute to get
273094333dSScott Feldman  */
283094333dSScott Feldman int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
293094333dSScott Feldman {
303094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
313094333dSScott Feldman 	struct net_device *lower_dev;
323094333dSScott Feldman 	struct list_head *iter;
333094333dSScott Feldman 	struct switchdev_attr first = {
343094333dSScott Feldman 		.id = SWITCHDEV_ATTR_UNDEFINED
353094333dSScott Feldman 	};
363094333dSScott Feldman 	int err = -EOPNOTSUPP;
373094333dSScott Feldman 
383094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
393094333dSScott Feldman 		return ops->switchdev_port_attr_get(dev, attr);
403094333dSScott Feldman 
413094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
423094333dSScott Feldman 		return err;
433094333dSScott Feldman 
443094333dSScott Feldman 	/* Switch device port(s) may be stacked under
453094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to get attr on
463094333dSScott Feldman 	 * each port.  Return -ENODATA if attr values don't
473094333dSScott Feldman 	 * compare across ports.
483094333dSScott Feldman 	 */
493094333dSScott Feldman 
503094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
513094333dSScott Feldman 		err = switchdev_port_attr_get(lower_dev, attr);
523094333dSScott Feldman 		if (err)
533094333dSScott Feldman 			break;
543094333dSScott Feldman 		if (first.id == SWITCHDEV_ATTR_UNDEFINED)
553094333dSScott Feldman 			first = *attr;
563094333dSScott Feldman 		else if (memcmp(&first, attr, sizeof(*attr)))
573094333dSScott Feldman 			return -ENODATA;
583094333dSScott Feldman 	}
593094333dSScott Feldman 
603094333dSScott Feldman 	return err;
613094333dSScott Feldman }
623094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
633094333dSScott Feldman 
643094333dSScott Feldman static int __switchdev_port_attr_set(struct net_device *dev,
653094333dSScott Feldman 				     struct switchdev_attr *attr)
663094333dSScott Feldman {
673094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
683094333dSScott Feldman 	struct net_device *lower_dev;
693094333dSScott Feldman 	struct list_head *iter;
703094333dSScott Feldman 	int err = -EOPNOTSUPP;
713094333dSScott Feldman 
723094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_set)
733094333dSScott Feldman 		return ops->switchdev_port_attr_set(dev, attr);
743094333dSScott Feldman 
753094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
763094333dSScott Feldman 		return err;
773094333dSScott Feldman 
783094333dSScott Feldman 	/* Switch device port(s) may be stacked under
793094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to set attr on
803094333dSScott Feldman 	 * each port.
813094333dSScott Feldman 	 */
823094333dSScott Feldman 
833094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
843094333dSScott Feldman 		err = __switchdev_port_attr_set(lower_dev, attr);
853094333dSScott Feldman 		if (err)
863094333dSScott Feldman 			break;
873094333dSScott Feldman 	}
883094333dSScott Feldman 
893094333dSScott Feldman 	return err;
903094333dSScott Feldman }
913094333dSScott Feldman 
923094333dSScott Feldman struct switchdev_attr_set_work {
933094333dSScott Feldman 	struct work_struct work;
943094333dSScott Feldman 	struct net_device *dev;
953094333dSScott Feldman 	struct switchdev_attr attr;
963094333dSScott Feldman };
973094333dSScott Feldman 
983094333dSScott Feldman static void switchdev_port_attr_set_work(struct work_struct *work)
993094333dSScott Feldman {
1003094333dSScott Feldman 	struct switchdev_attr_set_work *asw =
1013094333dSScott Feldman 		container_of(work, struct switchdev_attr_set_work, work);
1023094333dSScott Feldman 	int err;
1033094333dSScott Feldman 
1043094333dSScott Feldman 	rtnl_lock();
1053094333dSScott Feldman 	err = switchdev_port_attr_set(asw->dev, &asw->attr);
1063094333dSScott Feldman 	BUG_ON(err);
1073094333dSScott Feldman 	rtnl_unlock();
1083094333dSScott Feldman 
1093094333dSScott Feldman 	dev_put(asw->dev);
1103094333dSScott Feldman 	kfree(work);
1113094333dSScott Feldman }
1123094333dSScott Feldman 
1133094333dSScott Feldman static int switchdev_port_attr_set_defer(struct net_device *dev,
1143094333dSScott Feldman 					 struct switchdev_attr *attr)
1153094333dSScott Feldman {
1163094333dSScott Feldman 	struct switchdev_attr_set_work *asw;
1173094333dSScott Feldman 
1183094333dSScott Feldman 	asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
1193094333dSScott Feldman 	if (!asw)
1203094333dSScott Feldman 		return -ENOMEM;
1213094333dSScott Feldman 
1223094333dSScott Feldman 	INIT_WORK(&asw->work, switchdev_port_attr_set_work);
1233094333dSScott Feldman 
1243094333dSScott Feldman 	dev_hold(dev);
1253094333dSScott Feldman 	asw->dev = dev;
1263094333dSScott Feldman 	memcpy(&asw->attr, attr, sizeof(asw->attr));
1273094333dSScott Feldman 
1283094333dSScott Feldman 	schedule_work(&asw->work);
1293094333dSScott Feldman 
1303094333dSScott Feldman 	return 0;
1313094333dSScott Feldman }
1323094333dSScott Feldman 
1333094333dSScott Feldman /**
1343094333dSScott Feldman  *	switchdev_port_attr_set - Set port attribute
1353094333dSScott Feldman  *
1363094333dSScott Feldman  *	@dev: port device
1373094333dSScott Feldman  *	@attr: attribute to set
1383094333dSScott Feldman  *
1393094333dSScott Feldman  *	Use a 2-phase prepare-commit transaction model to ensure
1403094333dSScott Feldman  *	system is not left in a partially updated state due to
1413094333dSScott Feldman  *	failure from driver/device.
1423094333dSScott Feldman  */
1433094333dSScott Feldman int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
1443094333dSScott Feldman {
1453094333dSScott Feldman 	int err;
1463094333dSScott Feldman 
1473094333dSScott Feldman 	if (!rtnl_is_locked()) {
1483094333dSScott Feldman 		/* Running prepare-commit transaction across stacked
1493094333dSScott Feldman 		 * devices requires nothing moves, so if rtnl_lock is
1503094333dSScott Feldman 		 * not held, schedule a worker thread to hold rtnl_lock
1513094333dSScott Feldman 		 * while setting attr.
1523094333dSScott Feldman 		 */
1533094333dSScott Feldman 
1543094333dSScott Feldman 		return switchdev_port_attr_set_defer(dev, attr);
1553094333dSScott Feldman 	}
1563094333dSScott Feldman 
1573094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
1583094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
1593094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
1603094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
1613094333dSScott Feldman 	 * but should not commit the attr.
1623094333dSScott Feldman 	 */
1633094333dSScott Feldman 
1643094333dSScott Feldman 	attr->trans = SWITCHDEV_TRANS_PREPARE;
1653094333dSScott Feldman 	err = __switchdev_port_attr_set(dev, attr);
1663094333dSScott Feldman 	if (err) {
1673094333dSScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
1683094333dSScott Feldman 		 * resources reserved in the prepare phase are
1693094333dSScott Feldman 		 * released.
1703094333dSScott Feldman 		 */
1713094333dSScott Feldman 
1723094333dSScott Feldman 		attr->trans = SWITCHDEV_TRANS_ABORT;
1733094333dSScott Feldman 		__switchdev_port_attr_set(dev, attr);
1743094333dSScott Feldman 
1753094333dSScott Feldman 		return err;
1763094333dSScott Feldman 	}
1773094333dSScott Feldman 
1783094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
1793094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
1803094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
1813094333dSScott Feldman 	 */
1823094333dSScott Feldman 
1833094333dSScott Feldman 	attr->trans = SWITCHDEV_TRANS_COMMIT;
1843094333dSScott Feldman 	err = __switchdev_port_attr_set(dev, attr);
1853094333dSScott Feldman 	BUG_ON(err);
1863094333dSScott Feldman 
1873094333dSScott Feldman 	return err;
1883094333dSScott Feldman }
1893094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
1903094333dSScott Feldman 
191491d0f15SScott Feldman int __switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
192491d0f15SScott Feldman {
193491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
194491d0f15SScott Feldman 	struct net_device *lower_dev;
195491d0f15SScott Feldman 	struct list_head *iter;
196491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
197491d0f15SScott Feldman 
198491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_add)
199491d0f15SScott Feldman 		return ops->switchdev_port_obj_add(dev, obj);
200491d0f15SScott Feldman 
201491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
202491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to add object on
203491d0f15SScott Feldman 	 * each port.
204491d0f15SScott Feldman 	 */
205491d0f15SScott Feldman 
206491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
207491d0f15SScott Feldman 		err = __switchdev_port_obj_add(lower_dev, obj);
208491d0f15SScott Feldman 		if (err)
209491d0f15SScott Feldman 			break;
210491d0f15SScott Feldman 	}
211491d0f15SScott Feldman 
212491d0f15SScott Feldman 	return err;
213491d0f15SScott Feldman }
214491d0f15SScott Feldman 
215491d0f15SScott Feldman /**
216491d0f15SScott Feldman  *	switchdev_port_obj_add - Add port object
217491d0f15SScott Feldman  *
218491d0f15SScott Feldman  *	@dev: port device
219491d0f15SScott Feldman  *	@obj: object to add
220491d0f15SScott Feldman  *
221491d0f15SScott Feldman  *	Use a 2-phase prepare-commit transaction model to ensure
222491d0f15SScott Feldman  *	system is not left in a partially updated state due to
223491d0f15SScott Feldman  *	failure from driver/device.
224491d0f15SScott Feldman  *
225491d0f15SScott Feldman  *	rtnl_lock must be held.
226491d0f15SScott Feldman  */
227491d0f15SScott Feldman int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
228491d0f15SScott Feldman {
229491d0f15SScott Feldman 	int err;
230491d0f15SScott Feldman 
231491d0f15SScott Feldman 	ASSERT_RTNL();
232491d0f15SScott Feldman 
233491d0f15SScott Feldman 	/* Phase I: prepare for obj add. Driver/device should fail
234491d0f15SScott Feldman 	 * here if there are going to be issues in the commit phase,
235491d0f15SScott Feldman 	 * such as lack of resources or support.  The driver/device
236491d0f15SScott Feldman 	 * should reserve resources needed for the commit phase here,
237491d0f15SScott Feldman 	 * but should not commit the obj.
238491d0f15SScott Feldman 	 */
239491d0f15SScott Feldman 
240491d0f15SScott Feldman 	obj->trans = SWITCHDEV_TRANS_PREPARE;
241491d0f15SScott Feldman 	err = __switchdev_port_obj_add(dev, obj);
242491d0f15SScott Feldman 	if (err) {
243491d0f15SScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
244491d0f15SScott Feldman 		 * resources reserved in the prepare phase are
245491d0f15SScott Feldman 		 * released.
246491d0f15SScott Feldman 		 */
247491d0f15SScott Feldman 
248491d0f15SScott Feldman 		obj->trans = SWITCHDEV_TRANS_ABORT;
249491d0f15SScott Feldman 		__switchdev_port_obj_add(dev, obj);
250491d0f15SScott Feldman 
251491d0f15SScott Feldman 		return err;
252491d0f15SScott Feldman 	}
253491d0f15SScott Feldman 
254491d0f15SScott Feldman 	/* Phase II: commit obj add.  This cannot fail as a fault
255491d0f15SScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
256491d0f15SScott Feldman 	 * because the driver said everythings was OK in phase I.
257491d0f15SScott Feldman 	 */
258491d0f15SScott Feldman 
259491d0f15SScott Feldman 	obj->trans = SWITCHDEV_TRANS_COMMIT;
260491d0f15SScott Feldman 	err = __switchdev_port_obj_add(dev, obj);
261491d0f15SScott Feldman 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
262491d0f15SScott Feldman 
263491d0f15SScott Feldman 	return err;
264491d0f15SScott Feldman }
265491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
266491d0f15SScott Feldman 
267491d0f15SScott Feldman /**
268491d0f15SScott Feldman  *	switchdev_port_obj_del - Delete port object
269491d0f15SScott Feldman  *
270491d0f15SScott Feldman  *	@dev: port device
271491d0f15SScott Feldman  *	@obj: object to delete
272491d0f15SScott Feldman  */
273491d0f15SScott Feldman int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
274491d0f15SScott Feldman {
275491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
276491d0f15SScott Feldman 	struct net_device *lower_dev;
277491d0f15SScott Feldman 	struct list_head *iter;
278491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
279491d0f15SScott Feldman 
280491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_del)
281491d0f15SScott Feldman 		return ops->switchdev_port_obj_del(dev, obj);
282491d0f15SScott Feldman 
283491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
284491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to delete object on
285491d0f15SScott Feldman 	 * each port.
286491d0f15SScott Feldman 	 */
287491d0f15SScott Feldman 
288491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
289491d0f15SScott Feldman 		err = switchdev_port_obj_del(lower_dev, obj);
290491d0f15SScott Feldman 		if (err)
291491d0f15SScott Feldman 			break;
292491d0f15SScott Feldman 	}
293491d0f15SScott Feldman 
294491d0f15SScott Feldman 	return err;
295491d0f15SScott Feldman }
296491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
297491d0f15SScott Feldman 
298ebb9a03aSJiri Pirko static DEFINE_MUTEX(switchdev_mutex);
299ebb9a03aSJiri Pirko static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
30003bf0c28SJiri Pirko 
30103bf0c28SJiri Pirko /**
302ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
30303bf0c28SJiri Pirko  *	@nb: notifier_block
30403bf0c28SJiri Pirko  *
30503bf0c28SJiri Pirko  *	Register switch device notifier. This should be used by code
30603bf0c28SJiri Pirko  *	which needs to monitor events happening in particular device.
30703bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_register().
30803bf0c28SJiri Pirko  */
309ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
31003bf0c28SJiri Pirko {
31103bf0c28SJiri Pirko 	int err;
31203bf0c28SJiri Pirko 
313ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
314ebb9a03aSJiri Pirko 	err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
315ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
31603bf0c28SJiri Pirko 	return err;
31703bf0c28SJiri Pirko }
318ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
31903bf0c28SJiri Pirko 
32003bf0c28SJiri Pirko /**
321ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
32203bf0c28SJiri Pirko  *	@nb: notifier_block
32303bf0c28SJiri Pirko  *
32403bf0c28SJiri Pirko  *	Unregister switch device notifier.
32503bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_unregister().
32603bf0c28SJiri Pirko  */
327ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
32803bf0c28SJiri Pirko {
32903bf0c28SJiri Pirko 	int err;
33003bf0c28SJiri Pirko 
331ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
332ebb9a03aSJiri Pirko 	err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
333ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
33403bf0c28SJiri Pirko 	return err;
33503bf0c28SJiri Pirko }
336ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
33703bf0c28SJiri Pirko 
33803bf0c28SJiri Pirko /**
339ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
34003bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
34103bf0c28SJiri Pirko  *	@dev: port device
34203bf0c28SJiri Pirko  *	@info: notifier information data
34303bf0c28SJiri Pirko  *
34403bf0c28SJiri Pirko  *	Call all network notifier blocks. This should be called by driver
34503bf0c28SJiri Pirko  *	when it needs to propagate hardware event.
34603bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_call_chain().
34703bf0c28SJiri Pirko  */
348ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
349ebb9a03aSJiri Pirko 			     struct switchdev_notifier_info *info)
35003bf0c28SJiri Pirko {
35103bf0c28SJiri Pirko 	int err;
35203bf0c28SJiri Pirko 
35303bf0c28SJiri Pirko 	info->dev = dev;
354ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
355ebb9a03aSJiri Pirko 	err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
356ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
35703bf0c28SJiri Pirko 	return err;
35803bf0c28SJiri Pirko }
359ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
3608a44dbb2SRoopa Prabhu 
3618793d0a6SScott Feldman /**
3628793d0a6SScott Feldman  *	switchdev_port_bridge_getlink - Get bridge port attributes
3638793d0a6SScott Feldman  *
3648793d0a6SScott Feldman  *	@dev: port device
3658793d0a6SScott Feldman  *
3668793d0a6SScott Feldman  *	Called for SELF on rtnl_bridge_getlink to get bridge port
3678793d0a6SScott Feldman  *	attributes.
3688793d0a6SScott Feldman  */
3698793d0a6SScott Feldman int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3708793d0a6SScott Feldman 				  struct net_device *dev, u32 filter_mask,
3718793d0a6SScott Feldman 				  int nlflags)
3728793d0a6SScott Feldman {
3738793d0a6SScott Feldman 	struct switchdev_attr attr = {
3748793d0a6SScott Feldman 		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
3758793d0a6SScott Feldman 	};
3768793d0a6SScott Feldman 	u16 mode = BRIDGE_MODE_UNDEF;
3778793d0a6SScott Feldman 	u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
3788793d0a6SScott Feldman 	int err;
3798793d0a6SScott Feldman 
3808793d0a6SScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
3818793d0a6SScott Feldman 	if (err)
3828793d0a6SScott Feldman 		return err;
3838793d0a6SScott Feldman 
3848793d0a6SScott Feldman 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
3858793d0a6SScott Feldman 				       attr.brport_flags, mask, nlflags);
3868793d0a6SScott Feldman }
3878793d0a6SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
3888793d0a6SScott Feldman 
38947f8328bSScott Feldman static int switchdev_port_br_setflag(struct net_device *dev,
39047f8328bSScott Feldman 				     struct nlattr *nlattr,
39147f8328bSScott Feldman 				     unsigned long brport_flag)
39247f8328bSScott Feldman {
39347f8328bSScott Feldman 	struct switchdev_attr attr = {
39447f8328bSScott Feldman 		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
39547f8328bSScott Feldman 	};
39647f8328bSScott Feldman 	u8 flag = nla_get_u8(nlattr);
39747f8328bSScott Feldman 	int err;
39847f8328bSScott Feldman 
39947f8328bSScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
40047f8328bSScott Feldman 	if (err)
40147f8328bSScott Feldman 		return err;
40247f8328bSScott Feldman 
40347f8328bSScott Feldman 	if (flag)
40447f8328bSScott Feldman 		attr.brport_flags |= brport_flag;
40547f8328bSScott Feldman 	else
40647f8328bSScott Feldman 		attr.brport_flags &= ~brport_flag;
40747f8328bSScott Feldman 
40847f8328bSScott Feldman 	return switchdev_port_attr_set(dev, &attr);
40947f8328bSScott Feldman }
41047f8328bSScott Feldman 
41147f8328bSScott Feldman static const struct nla_policy
41247f8328bSScott Feldman switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
41347f8328bSScott Feldman 	[IFLA_BRPORT_STATE]		= { .type = NLA_U8 },
41447f8328bSScott Feldman 	[IFLA_BRPORT_COST]		= { .type = NLA_U32 },
41547f8328bSScott Feldman 	[IFLA_BRPORT_PRIORITY]		= { .type = NLA_U16 },
41647f8328bSScott Feldman 	[IFLA_BRPORT_MODE]		= { .type = NLA_U8 },
41747f8328bSScott Feldman 	[IFLA_BRPORT_GUARD]		= { .type = NLA_U8 },
41847f8328bSScott Feldman 	[IFLA_BRPORT_PROTECT]		= { .type = NLA_U8 },
41947f8328bSScott Feldman 	[IFLA_BRPORT_FAST_LEAVE]	= { .type = NLA_U8 },
42047f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING]		= { .type = NLA_U8 },
42147f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING_SYNC]	= { .type = NLA_U8 },
42247f8328bSScott Feldman 	[IFLA_BRPORT_UNICAST_FLOOD]	= { .type = NLA_U8 },
42347f8328bSScott Feldman };
42447f8328bSScott Feldman 
42547f8328bSScott Feldman static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
42647f8328bSScott Feldman 					      struct nlattr *protinfo)
42747f8328bSScott Feldman {
42847f8328bSScott Feldman 	struct nlattr *attr;
42947f8328bSScott Feldman 	int rem;
43047f8328bSScott Feldman 	int err;
43147f8328bSScott Feldman 
43247f8328bSScott Feldman 	err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
43347f8328bSScott Feldman 				  switchdev_port_bridge_policy);
43447f8328bSScott Feldman 	if (err)
43547f8328bSScott Feldman 		return err;
43647f8328bSScott Feldman 
43747f8328bSScott Feldman 	nla_for_each_nested(attr, protinfo, rem) {
43847f8328bSScott Feldman 		switch (nla_type(attr)) {
43947f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING:
44047f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
44147f8328bSScott Feldman 							BR_LEARNING);
44247f8328bSScott Feldman 			break;
44347f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING_SYNC:
44447f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
44547f8328bSScott Feldman 							BR_LEARNING_SYNC);
44647f8328bSScott Feldman 			break;
44747f8328bSScott Feldman 		default:
44847f8328bSScott Feldman 			err = -EOPNOTSUPP;
44947f8328bSScott Feldman 			break;
45047f8328bSScott Feldman 		}
45147f8328bSScott Feldman 		if (err)
45247f8328bSScott Feldman 			return err;
45347f8328bSScott Feldman 	}
45447f8328bSScott Feldman 
45547f8328bSScott Feldman 	return 0;
45647f8328bSScott Feldman }
45747f8328bSScott Feldman 
45847f8328bSScott Feldman static int switchdev_port_br_afspec(struct net_device *dev,
45947f8328bSScott Feldman 				    struct nlattr *afspec,
46047f8328bSScott Feldman 				    int (*f)(struct net_device *dev,
46147f8328bSScott Feldman 					     struct switchdev_obj *obj))
46247f8328bSScott Feldman {
46347f8328bSScott Feldman 	struct nlattr *attr;
46447f8328bSScott Feldman 	struct bridge_vlan_info *vinfo;
46547f8328bSScott Feldman 	struct switchdev_obj obj = {
46647f8328bSScott Feldman 		.id = SWITCHDEV_OBJ_PORT_VLAN,
46747f8328bSScott Feldman 	};
46847f8328bSScott Feldman 	int rem;
46947f8328bSScott Feldman 	int err;
47047f8328bSScott Feldman 
47147f8328bSScott Feldman 	nla_for_each_nested(attr, afspec, rem) {
47247f8328bSScott Feldman 		if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
47347f8328bSScott Feldman 			continue;
47447f8328bSScott Feldman 		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
47547f8328bSScott Feldman 			return -EINVAL;
47647f8328bSScott Feldman 		vinfo = nla_data(attr);
47747f8328bSScott Feldman 		obj.vlan.flags = vinfo->flags;
47847f8328bSScott Feldman 		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
47947f8328bSScott Feldman 			if (obj.vlan.vid_start)
48047f8328bSScott Feldman 				return -EINVAL;
48147f8328bSScott Feldman 			obj.vlan.vid_start = vinfo->vid;
48247f8328bSScott Feldman 		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
48347f8328bSScott Feldman 			if (!obj.vlan.vid_start)
48447f8328bSScott Feldman 				return -EINVAL;
48547f8328bSScott Feldman 			obj.vlan.vid_end = vinfo->vid;
48647f8328bSScott Feldman 			if (obj.vlan.vid_end <= obj.vlan.vid_start)
48747f8328bSScott Feldman 				return -EINVAL;
48847f8328bSScott Feldman 			err = f(dev, &obj);
48947f8328bSScott Feldman 			if (err)
49047f8328bSScott Feldman 				return err;
49147f8328bSScott Feldman 			memset(&obj.vlan, 0, sizeof(obj.vlan));
49247f8328bSScott Feldman 		} else {
49347f8328bSScott Feldman 			if (obj.vlan.vid_start)
49447f8328bSScott Feldman 				return -EINVAL;
49547f8328bSScott Feldman 			obj.vlan.vid_start = vinfo->vid;
49647f8328bSScott Feldman 			obj.vlan.vid_end = vinfo->vid;
49747f8328bSScott Feldman 			err = f(dev, &obj);
49847f8328bSScott Feldman 			if (err)
49947f8328bSScott Feldman 				return err;
50047f8328bSScott Feldman 			memset(&obj.vlan, 0, sizeof(obj.vlan));
50147f8328bSScott Feldman 		}
50247f8328bSScott Feldman 	}
50347f8328bSScott Feldman 
50447f8328bSScott Feldman 	return 0;
50547f8328bSScott Feldman }
50647f8328bSScott Feldman 
5078a44dbb2SRoopa Prabhu /**
50847f8328bSScott Feldman  *	switchdev_port_bridge_setlink - Set bridge port attributes
5098a44dbb2SRoopa Prabhu  *
5108a44dbb2SRoopa Prabhu  *	@dev: port device
51147f8328bSScott Feldman  *	@nlh: netlink header
51247f8328bSScott Feldman  *	@flags: netlink flags
5138a44dbb2SRoopa Prabhu  *
51447f8328bSScott Feldman  *	Called for SELF on rtnl_bridge_setlink to set bridge port
51547f8328bSScott Feldman  *	attributes.
5168a44dbb2SRoopa Prabhu  */
517ebb9a03aSJiri Pirko int switchdev_port_bridge_setlink(struct net_device *dev,
5188a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
5198a44dbb2SRoopa Prabhu {
52047f8328bSScott Feldman 	struct nlattr *protinfo;
52147f8328bSScott Feldman 	struct nlattr *afspec;
52247f8328bSScott Feldman 	int err = 0;
5238a44dbb2SRoopa Prabhu 
52447f8328bSScott Feldman 	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
52547f8328bSScott Feldman 				   IFLA_PROTINFO);
52647f8328bSScott Feldman 	if (protinfo) {
52747f8328bSScott Feldman 		err = switchdev_port_br_setlink_protinfo(dev, protinfo);
52847f8328bSScott Feldman 		if (err)
52947f8328bSScott Feldman 			return err;
53047f8328bSScott Feldman 	}
5318a44dbb2SRoopa Prabhu 
53247f8328bSScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
53347f8328bSScott Feldman 				 IFLA_AF_SPEC);
53447f8328bSScott Feldman 	if (afspec)
53547f8328bSScott Feldman 		err = switchdev_port_br_afspec(dev, afspec,
53647f8328bSScott Feldman 					       switchdev_port_obj_add);
5378a44dbb2SRoopa Prabhu 
53847f8328bSScott Feldman 	return err;
5398a44dbb2SRoopa Prabhu }
540ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
5418a44dbb2SRoopa Prabhu 
5428a44dbb2SRoopa Prabhu /**
5435c34e022SScott Feldman  *	switchdev_port_bridge_dellink - Set bridge port attributes
5448a44dbb2SRoopa Prabhu  *
5458a44dbb2SRoopa Prabhu  *	@dev: port device
5465c34e022SScott Feldman  *	@nlh: netlink header
5475c34e022SScott Feldman  *	@flags: netlink flags
5488a44dbb2SRoopa Prabhu  *
5495c34e022SScott Feldman  *	Called for SELF on rtnl_bridge_dellink to set bridge port
5505c34e022SScott Feldman  *	attributes.
5518a44dbb2SRoopa Prabhu  */
552ebb9a03aSJiri Pirko int switchdev_port_bridge_dellink(struct net_device *dev,
5538a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
5548a44dbb2SRoopa Prabhu {
5555c34e022SScott Feldman 	struct nlattr *afspec;
5568a44dbb2SRoopa Prabhu 
5575c34e022SScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
5585c34e022SScott Feldman 				 IFLA_AF_SPEC);
5595c34e022SScott Feldman 	if (afspec)
5605c34e022SScott Feldman 		return switchdev_port_br_afspec(dev, afspec,
5615c34e022SScott Feldman 						switchdev_port_obj_del);
5625c34e022SScott Feldman 
5638a44dbb2SRoopa Prabhu 	return 0;
5648a44dbb2SRoopa Prabhu }
565ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
5668a44dbb2SRoopa Prabhu 
567ebb9a03aSJiri Pirko static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
568b5d6fbdeSScott Feldman {
5699d47c0a2SJiri Pirko 	const struct switchdev_ops *ops = dev->switchdev_ops;
570b5d6fbdeSScott Feldman 	struct net_device *lower_dev;
571b5d6fbdeSScott Feldman 	struct net_device *port_dev;
572b5d6fbdeSScott Feldman 	struct list_head *iter;
573b5d6fbdeSScott Feldman 
574b5d6fbdeSScott Feldman 	/* Recusively search down until we find a sw port dev.
575f8e20a9fSScott Feldman 	 * (A sw port dev supports switchdev_port_attr_get).
576b5d6fbdeSScott Feldman 	 */
577b5d6fbdeSScott Feldman 
578f8e20a9fSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
579b5d6fbdeSScott Feldman 		return dev;
580b5d6fbdeSScott Feldman 
581b5d6fbdeSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
582ebb9a03aSJiri Pirko 		port_dev = switchdev_get_lowest_dev(lower_dev);
583b5d6fbdeSScott Feldman 		if (port_dev)
584b5d6fbdeSScott Feldman 			return port_dev;
585b5d6fbdeSScott Feldman 	}
586b5d6fbdeSScott Feldman 
587b5d6fbdeSScott Feldman 	return NULL;
588b5d6fbdeSScott Feldman }
589b5d6fbdeSScott Feldman 
590ebb9a03aSJiri Pirko static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
591b5d6fbdeSScott Feldman {
592f8e20a9fSScott Feldman 	struct switchdev_attr attr = {
593f8e20a9fSScott Feldman 		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
594f8e20a9fSScott Feldman 	};
595f8e20a9fSScott Feldman 	struct switchdev_attr prev_attr;
596b5d6fbdeSScott Feldman 	struct net_device *dev = NULL;
597b5d6fbdeSScott Feldman 	int nhsel;
598b5d6fbdeSScott Feldman 
599b5d6fbdeSScott Feldman 	/* For this route, all nexthop devs must be on the same switch. */
600b5d6fbdeSScott Feldman 
601b5d6fbdeSScott Feldman 	for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
602b5d6fbdeSScott Feldman 		const struct fib_nh *nh = &fi->fib_nh[nhsel];
603b5d6fbdeSScott Feldman 
604b5d6fbdeSScott Feldman 		if (!nh->nh_dev)
605b5d6fbdeSScott Feldman 			return NULL;
606b5d6fbdeSScott Feldman 
607ebb9a03aSJiri Pirko 		dev = switchdev_get_lowest_dev(nh->nh_dev);
608b5d6fbdeSScott Feldman 		if (!dev)
609b5d6fbdeSScott Feldman 			return NULL;
610b5d6fbdeSScott Feldman 
611f8e20a9fSScott Feldman 		if (switchdev_port_attr_get(dev, &attr))
612b5d6fbdeSScott Feldman 			return NULL;
613b5d6fbdeSScott Feldman 
614b5d6fbdeSScott Feldman 		if (nhsel > 0) {
615f8e20a9fSScott Feldman 			if (prev_attr.ppid.id_len != attr.ppid.id_len)
616b5d6fbdeSScott Feldman 				return NULL;
617f8e20a9fSScott Feldman 			if (memcmp(prev_attr.ppid.id, attr.ppid.id,
618f8e20a9fSScott Feldman 				   attr.ppid.id_len))
619b5d6fbdeSScott Feldman 				return NULL;
620b5d6fbdeSScott Feldman 		}
621b5d6fbdeSScott Feldman 
622f8e20a9fSScott Feldman 		prev_attr = attr;
623b5d6fbdeSScott Feldman 	}
624b5d6fbdeSScott Feldman 
625b5d6fbdeSScott Feldman 	return dev;
626b5d6fbdeSScott Feldman }
627b5d6fbdeSScott Feldman 
6285e8d9049SScott Feldman /**
629ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_add - Add IPv4 route entry to switch
6305e8d9049SScott Feldman  *
6315e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
6325e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
6335e8d9049SScott Feldman  *	@fi: route FIB info structure
6345e8d9049SScott Feldman  *	@tos: route TOS
6355e8d9049SScott Feldman  *	@type: route type
636f8f21471SScott Feldman  *	@nlflags: netlink flags passed in (NLM_F_*)
6375e8d9049SScott Feldman  *	@tb_id: route table ID
6385e8d9049SScott Feldman  *
6395e8d9049SScott Feldman  *	Add IPv4 route entry to switch device.
6405e8d9049SScott Feldman  */
641ebb9a03aSJiri Pirko int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
642f8f21471SScott Feldman 			   u8 tos, u8 type, u32 nlflags, u32 tb_id)
6435e8d9049SScott Feldman {
644*58c2cb16SScott Feldman 	struct switchdev_obj fib_obj = {
645*58c2cb16SScott Feldman 		.id = SWITCHDEV_OBJ_IPV4_FIB,
646*58c2cb16SScott Feldman 		.ipv4_fib = {
647*58c2cb16SScott Feldman 			.dst = htonl(dst),
648*58c2cb16SScott Feldman 			.dst_len = dst_len,
649*58c2cb16SScott Feldman 			.fi = fi,
650*58c2cb16SScott Feldman 			.tos = tos,
651*58c2cb16SScott Feldman 			.type = type,
652*58c2cb16SScott Feldman 			.nlflags = nlflags,
653*58c2cb16SScott Feldman 			.tb_id = tb_id,
654*58c2cb16SScott Feldman 		},
655*58c2cb16SScott Feldman 	};
656b5d6fbdeSScott Feldman 	struct net_device *dev;
657b5d6fbdeSScott Feldman 	int err = 0;
658b5d6fbdeSScott Feldman 
6598e05fd71SScott Feldman 	/* Don't offload route if using custom ip rules or if
6608e05fd71SScott Feldman 	 * IPv4 FIB offloading has been disabled completely.
6618e05fd71SScott Feldman 	 */
6628e05fd71SScott Feldman 
663e1315db1SScott Feldman #ifdef CONFIG_IP_MULTIPLE_TABLES
664e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_has_custom_rules)
665e1315db1SScott Feldman 		return 0;
666e1315db1SScott Feldman #endif
667e1315db1SScott Feldman 
668e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_offload_disabled)
669104616e7SScott Feldman 		return 0;
670104616e7SScott Feldman 
671ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
672b5d6fbdeSScott Feldman 	if (!dev)
6735e8d9049SScott Feldman 		return 0;
674b5d6fbdeSScott Feldman 
675*58c2cb16SScott Feldman 	err = switchdev_port_obj_add(dev, &fib_obj);
676b5d6fbdeSScott Feldman 	if (!err)
677b5d6fbdeSScott Feldman 		fi->fib_flags |= RTNH_F_EXTERNAL;
678b5d6fbdeSScott Feldman 
679b5d6fbdeSScott Feldman 	return err;
6805e8d9049SScott Feldman }
681ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
6825e8d9049SScott Feldman 
6835e8d9049SScott Feldman /**
684ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
6855e8d9049SScott Feldman  *
6865e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
6875e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
6885e8d9049SScott Feldman  *	@fi: route FIB info structure
6895e8d9049SScott Feldman  *	@tos: route TOS
6905e8d9049SScott Feldman  *	@type: route type
6915e8d9049SScott Feldman  *	@tb_id: route table ID
6925e8d9049SScott Feldman  *
6935e8d9049SScott Feldman  *	Delete IPv4 route entry from switch device.
6945e8d9049SScott Feldman  */
695ebb9a03aSJiri Pirko int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
6965e8d9049SScott Feldman 			   u8 tos, u8 type, u32 tb_id)
6975e8d9049SScott Feldman {
698*58c2cb16SScott Feldman 	struct switchdev_obj fib_obj = {
699*58c2cb16SScott Feldman 		.id = SWITCHDEV_OBJ_IPV4_FIB,
700*58c2cb16SScott Feldman 		.ipv4_fib = {
701*58c2cb16SScott Feldman 			.dst = htonl(dst),
702*58c2cb16SScott Feldman 			.dst_len = dst_len,
703*58c2cb16SScott Feldman 			.fi = fi,
704*58c2cb16SScott Feldman 			.tos = tos,
705*58c2cb16SScott Feldman 			.type = type,
706*58c2cb16SScott Feldman 			.nlflags = 0,
707*58c2cb16SScott Feldman 			.tb_id = tb_id,
708*58c2cb16SScott Feldman 		},
709*58c2cb16SScott Feldman 	};
710b5d6fbdeSScott Feldman 	struct net_device *dev;
711b5d6fbdeSScott Feldman 	int err = 0;
712b5d6fbdeSScott Feldman 
713b5d6fbdeSScott Feldman 	if (!(fi->fib_flags & RTNH_F_EXTERNAL))
7145e8d9049SScott Feldman 		return 0;
715b5d6fbdeSScott Feldman 
716ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
717b5d6fbdeSScott Feldman 	if (!dev)
718b5d6fbdeSScott Feldman 		return 0;
719b5d6fbdeSScott Feldman 
720*58c2cb16SScott Feldman 	err = switchdev_port_obj_del(dev, &fib_obj);
721b5d6fbdeSScott Feldman 	if (!err)
722b5d6fbdeSScott Feldman 		fi->fib_flags &= ~RTNH_F_EXTERNAL;
723b5d6fbdeSScott Feldman 
724b5d6fbdeSScott Feldman 	return err;
7255e8d9049SScott Feldman }
726ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
7278e05fd71SScott Feldman 
7288e05fd71SScott Feldman /**
729ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
7308e05fd71SScott Feldman  *
7318e05fd71SScott Feldman  *	@fi: route FIB info structure
7328e05fd71SScott Feldman  */
733ebb9a03aSJiri Pirko void switchdev_fib_ipv4_abort(struct fib_info *fi)
7348e05fd71SScott Feldman {
7358e05fd71SScott Feldman 	/* There was a problem installing this route to the offload
7368e05fd71SScott Feldman 	 * device.  For now, until we come up with more refined
7378e05fd71SScott Feldman 	 * policy handling, abruptly end IPv4 fib offloading for
7388e05fd71SScott Feldman 	 * for entire net by flushing offload device(s) of all
7398e05fd71SScott Feldman 	 * IPv4 routes, and mark IPv4 fib offloading broken from
7408e05fd71SScott Feldman 	 * this point forward.
7418e05fd71SScott Feldman 	 */
7428e05fd71SScott Feldman 
7438e05fd71SScott Feldman 	fib_flush_external(fi->fib_net);
7448e05fd71SScott Feldman 	fi->fib_net->ipv4.fib_offload_disabled = true;
7458e05fd71SScott Feldman }
746ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
747