xref: /linux-6.15/net/switchdev/switchdev.c (revision 47f8328b)
1007f790cSJiri Pirko /*
2007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
3007f790cSJiri Pirko  * Copyright (c) 2014 Jiri Pirko <[email protected]>
4f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
5007f790cSJiri Pirko  *
6007f790cSJiri Pirko  * This program is free software; you can redistribute it and/or modify
7007f790cSJiri Pirko  * it under the terms of the GNU General Public License as published by
8007f790cSJiri Pirko  * the Free Software Foundation; either version 2 of the License, or
9007f790cSJiri Pirko  * (at your option) any later version.
10007f790cSJiri Pirko  */
11007f790cSJiri Pirko 
12007f790cSJiri Pirko #include <linux/kernel.h>
13007f790cSJiri Pirko #include <linux/types.h>
14007f790cSJiri Pirko #include <linux/init.h>
1503bf0c28SJiri Pirko #include <linux/mutex.h>
1603bf0c28SJiri Pirko #include <linux/notifier.h>
17007f790cSJiri Pirko #include <linux/netdevice.h>
18*47f8328bSScott Feldman #include <linux/if_bridge.h>
195e8d9049SScott Feldman #include <net/ip_fib.h>
20007f790cSJiri Pirko #include <net/switchdev.h>
21007f790cSJiri Pirko 
22007f790cSJiri Pirko /**
233094333dSScott Feldman  *	switchdev_port_attr_get - Get port attribute
243094333dSScott Feldman  *
253094333dSScott Feldman  *	@dev: port device
263094333dSScott Feldman  *	@attr: attribute to get
273094333dSScott Feldman  */
283094333dSScott Feldman int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
293094333dSScott Feldman {
303094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
313094333dSScott Feldman 	struct net_device *lower_dev;
323094333dSScott Feldman 	struct list_head *iter;
333094333dSScott Feldman 	struct switchdev_attr first = {
343094333dSScott Feldman 		.id = SWITCHDEV_ATTR_UNDEFINED
353094333dSScott Feldman 	};
363094333dSScott Feldman 	int err = -EOPNOTSUPP;
373094333dSScott Feldman 
383094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
393094333dSScott Feldman 		return ops->switchdev_port_attr_get(dev, attr);
403094333dSScott Feldman 
413094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
423094333dSScott Feldman 		return err;
433094333dSScott Feldman 
443094333dSScott Feldman 	/* Switch device port(s) may be stacked under
453094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to get attr on
463094333dSScott Feldman 	 * each port.  Return -ENODATA if attr values don't
473094333dSScott Feldman 	 * compare across ports.
483094333dSScott Feldman 	 */
493094333dSScott Feldman 
503094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
513094333dSScott Feldman 		err = switchdev_port_attr_get(lower_dev, attr);
523094333dSScott Feldman 		if (err)
533094333dSScott Feldman 			break;
543094333dSScott Feldman 		if (first.id == SWITCHDEV_ATTR_UNDEFINED)
553094333dSScott Feldman 			first = *attr;
563094333dSScott Feldman 		else if (memcmp(&first, attr, sizeof(*attr)))
573094333dSScott Feldman 			return -ENODATA;
583094333dSScott Feldman 	}
593094333dSScott Feldman 
603094333dSScott Feldman 	return err;
613094333dSScott Feldman }
623094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
633094333dSScott Feldman 
643094333dSScott Feldman static int __switchdev_port_attr_set(struct net_device *dev,
653094333dSScott Feldman 				     struct switchdev_attr *attr)
663094333dSScott Feldman {
673094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
683094333dSScott Feldman 	struct net_device *lower_dev;
693094333dSScott Feldman 	struct list_head *iter;
703094333dSScott Feldman 	int err = -EOPNOTSUPP;
713094333dSScott Feldman 
723094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_set)
733094333dSScott Feldman 		return ops->switchdev_port_attr_set(dev, attr);
743094333dSScott Feldman 
753094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
763094333dSScott Feldman 		return err;
773094333dSScott Feldman 
783094333dSScott Feldman 	/* Switch device port(s) may be stacked under
793094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to set attr on
803094333dSScott Feldman 	 * each port.
813094333dSScott Feldman 	 */
823094333dSScott Feldman 
833094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
843094333dSScott Feldman 		err = __switchdev_port_attr_set(lower_dev, attr);
853094333dSScott Feldman 		if (err)
863094333dSScott Feldman 			break;
873094333dSScott Feldman 	}
883094333dSScott Feldman 
893094333dSScott Feldman 	return err;
903094333dSScott Feldman }
913094333dSScott Feldman 
923094333dSScott Feldman struct switchdev_attr_set_work {
933094333dSScott Feldman 	struct work_struct work;
943094333dSScott Feldman 	struct net_device *dev;
953094333dSScott Feldman 	struct switchdev_attr attr;
963094333dSScott Feldman };
973094333dSScott Feldman 
983094333dSScott Feldman static void switchdev_port_attr_set_work(struct work_struct *work)
993094333dSScott Feldman {
1003094333dSScott Feldman 	struct switchdev_attr_set_work *asw =
1013094333dSScott Feldman 		container_of(work, struct switchdev_attr_set_work, work);
1023094333dSScott Feldman 	int err;
1033094333dSScott Feldman 
1043094333dSScott Feldman 	rtnl_lock();
1053094333dSScott Feldman 	err = switchdev_port_attr_set(asw->dev, &asw->attr);
1063094333dSScott Feldman 	BUG_ON(err);
1073094333dSScott Feldman 	rtnl_unlock();
1083094333dSScott Feldman 
1093094333dSScott Feldman 	dev_put(asw->dev);
1103094333dSScott Feldman 	kfree(work);
1113094333dSScott Feldman }
1123094333dSScott Feldman 
1133094333dSScott Feldman static int switchdev_port_attr_set_defer(struct net_device *dev,
1143094333dSScott Feldman 					 struct switchdev_attr *attr)
1153094333dSScott Feldman {
1163094333dSScott Feldman 	struct switchdev_attr_set_work *asw;
1173094333dSScott Feldman 
1183094333dSScott Feldman 	asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
1193094333dSScott Feldman 	if (!asw)
1203094333dSScott Feldman 		return -ENOMEM;
1213094333dSScott Feldman 
1223094333dSScott Feldman 	INIT_WORK(&asw->work, switchdev_port_attr_set_work);
1233094333dSScott Feldman 
1243094333dSScott Feldman 	dev_hold(dev);
1253094333dSScott Feldman 	asw->dev = dev;
1263094333dSScott Feldman 	memcpy(&asw->attr, attr, sizeof(asw->attr));
1273094333dSScott Feldman 
1283094333dSScott Feldman 	schedule_work(&asw->work);
1293094333dSScott Feldman 
1303094333dSScott Feldman 	return 0;
1313094333dSScott Feldman }
1323094333dSScott Feldman 
1333094333dSScott Feldman /**
1343094333dSScott Feldman  *	switchdev_port_attr_set - Set port attribute
1353094333dSScott Feldman  *
1363094333dSScott Feldman  *	@dev: port device
1373094333dSScott Feldman  *	@attr: attribute to set
1383094333dSScott Feldman  *
1393094333dSScott Feldman  *	Use a 2-phase prepare-commit transaction model to ensure
1403094333dSScott Feldman  *	system is not left in a partially updated state due to
1413094333dSScott Feldman  *	failure from driver/device.
1423094333dSScott Feldman  */
1433094333dSScott Feldman int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
1443094333dSScott Feldman {
1453094333dSScott Feldman 	int err;
1463094333dSScott Feldman 
1473094333dSScott Feldman 	if (!rtnl_is_locked()) {
1483094333dSScott Feldman 		/* Running prepare-commit transaction across stacked
1493094333dSScott Feldman 		 * devices requires nothing moves, so if rtnl_lock is
1503094333dSScott Feldman 		 * not held, schedule a worker thread to hold rtnl_lock
1513094333dSScott Feldman 		 * while setting attr.
1523094333dSScott Feldman 		 */
1533094333dSScott Feldman 
1543094333dSScott Feldman 		return switchdev_port_attr_set_defer(dev, attr);
1553094333dSScott Feldman 	}
1563094333dSScott Feldman 
1573094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
1583094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
1593094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
1603094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
1613094333dSScott Feldman 	 * but should not commit the attr.
1623094333dSScott Feldman 	 */
1633094333dSScott Feldman 
1643094333dSScott Feldman 	attr->trans = SWITCHDEV_TRANS_PREPARE;
1653094333dSScott Feldman 	err = __switchdev_port_attr_set(dev, attr);
1663094333dSScott Feldman 	if (err) {
1673094333dSScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
1683094333dSScott Feldman 		 * resources reserved in the prepare phase are
1693094333dSScott Feldman 		 * released.
1703094333dSScott Feldman 		 */
1713094333dSScott Feldman 
1723094333dSScott Feldman 		attr->trans = SWITCHDEV_TRANS_ABORT;
1733094333dSScott Feldman 		__switchdev_port_attr_set(dev, attr);
1743094333dSScott Feldman 
1753094333dSScott Feldman 		return err;
1763094333dSScott Feldman 	}
1773094333dSScott Feldman 
1783094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
1793094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
1803094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
1813094333dSScott Feldman 	 */
1823094333dSScott Feldman 
1833094333dSScott Feldman 	attr->trans = SWITCHDEV_TRANS_COMMIT;
1843094333dSScott Feldman 	err = __switchdev_port_attr_set(dev, attr);
1853094333dSScott Feldman 	BUG_ON(err);
1863094333dSScott Feldman 
1873094333dSScott Feldman 	return err;
1883094333dSScott Feldman }
1893094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
1903094333dSScott Feldman 
191491d0f15SScott Feldman int __switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
192491d0f15SScott Feldman {
193491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
194491d0f15SScott Feldman 	struct net_device *lower_dev;
195491d0f15SScott Feldman 	struct list_head *iter;
196491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
197491d0f15SScott Feldman 
198491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_add)
199491d0f15SScott Feldman 		return ops->switchdev_port_obj_add(dev, obj);
200491d0f15SScott Feldman 
201491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
202491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to add object on
203491d0f15SScott Feldman 	 * each port.
204491d0f15SScott Feldman 	 */
205491d0f15SScott Feldman 
206491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
207491d0f15SScott Feldman 		err = __switchdev_port_obj_add(lower_dev, obj);
208491d0f15SScott Feldman 		if (err)
209491d0f15SScott Feldman 			break;
210491d0f15SScott Feldman 	}
211491d0f15SScott Feldman 
212491d0f15SScott Feldman 	return err;
213491d0f15SScott Feldman }
214491d0f15SScott Feldman 
215491d0f15SScott Feldman /**
216491d0f15SScott Feldman  *	switchdev_port_obj_add - Add port object
217491d0f15SScott Feldman  *
218491d0f15SScott Feldman  *	@dev: port device
219491d0f15SScott Feldman  *	@obj: object to add
220491d0f15SScott Feldman  *
221491d0f15SScott Feldman  *	Use a 2-phase prepare-commit transaction model to ensure
222491d0f15SScott Feldman  *	system is not left in a partially updated state due to
223491d0f15SScott Feldman  *	failure from driver/device.
224491d0f15SScott Feldman  *
225491d0f15SScott Feldman  *	rtnl_lock must be held.
226491d0f15SScott Feldman  */
227491d0f15SScott Feldman int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
228491d0f15SScott Feldman {
229491d0f15SScott Feldman 	int err;
230491d0f15SScott Feldman 
231491d0f15SScott Feldman 	ASSERT_RTNL();
232491d0f15SScott Feldman 
233491d0f15SScott Feldman 	/* Phase I: prepare for obj add. Driver/device should fail
234491d0f15SScott Feldman 	 * here if there are going to be issues in the commit phase,
235491d0f15SScott Feldman 	 * such as lack of resources or support.  The driver/device
236491d0f15SScott Feldman 	 * should reserve resources needed for the commit phase here,
237491d0f15SScott Feldman 	 * but should not commit the obj.
238491d0f15SScott Feldman 	 */
239491d0f15SScott Feldman 
240491d0f15SScott Feldman 	obj->trans = SWITCHDEV_TRANS_PREPARE;
241491d0f15SScott Feldman 	err = __switchdev_port_obj_add(dev, obj);
242491d0f15SScott Feldman 	if (err) {
243491d0f15SScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
244491d0f15SScott Feldman 		 * resources reserved in the prepare phase are
245491d0f15SScott Feldman 		 * released.
246491d0f15SScott Feldman 		 */
247491d0f15SScott Feldman 
248491d0f15SScott Feldman 		obj->trans = SWITCHDEV_TRANS_ABORT;
249491d0f15SScott Feldman 		__switchdev_port_obj_add(dev, obj);
250491d0f15SScott Feldman 
251491d0f15SScott Feldman 		return err;
252491d0f15SScott Feldman 	}
253491d0f15SScott Feldman 
254491d0f15SScott Feldman 	/* Phase II: commit obj add.  This cannot fail as a fault
255491d0f15SScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
256491d0f15SScott Feldman 	 * because the driver said everythings was OK in phase I.
257491d0f15SScott Feldman 	 */
258491d0f15SScott Feldman 
259491d0f15SScott Feldman 	obj->trans = SWITCHDEV_TRANS_COMMIT;
260491d0f15SScott Feldman 	err = __switchdev_port_obj_add(dev, obj);
261491d0f15SScott Feldman 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
262491d0f15SScott Feldman 
263491d0f15SScott Feldman 	return err;
264491d0f15SScott Feldman }
265491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
266491d0f15SScott Feldman 
267491d0f15SScott Feldman /**
268491d0f15SScott Feldman  *	switchdev_port_obj_del - Delete port object
269491d0f15SScott Feldman  *
270491d0f15SScott Feldman  *	@dev: port device
271491d0f15SScott Feldman  *	@obj: object to delete
272491d0f15SScott Feldman  */
273491d0f15SScott Feldman int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
274491d0f15SScott Feldman {
275491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
276491d0f15SScott Feldman 	struct net_device *lower_dev;
277491d0f15SScott Feldman 	struct list_head *iter;
278491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
279491d0f15SScott Feldman 
280491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_del)
281491d0f15SScott Feldman 		return ops->switchdev_port_obj_del(dev, obj);
282491d0f15SScott Feldman 
283491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
284491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to delete object on
285491d0f15SScott Feldman 	 * each port.
286491d0f15SScott Feldman 	 */
287491d0f15SScott Feldman 
288491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
289491d0f15SScott Feldman 		err = switchdev_port_obj_del(lower_dev, obj);
290491d0f15SScott Feldman 		if (err)
291491d0f15SScott Feldman 			break;
292491d0f15SScott Feldman 	}
293491d0f15SScott Feldman 
294491d0f15SScott Feldman 	return err;
295491d0f15SScott Feldman }
296491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
297491d0f15SScott Feldman 
298ebb9a03aSJiri Pirko static DEFINE_MUTEX(switchdev_mutex);
299ebb9a03aSJiri Pirko static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
30003bf0c28SJiri Pirko 
30103bf0c28SJiri Pirko /**
302ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
30303bf0c28SJiri Pirko  *	@nb: notifier_block
30403bf0c28SJiri Pirko  *
30503bf0c28SJiri Pirko  *	Register switch device notifier. This should be used by code
30603bf0c28SJiri Pirko  *	which needs to monitor events happening in particular device.
30703bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_register().
30803bf0c28SJiri Pirko  */
309ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
31003bf0c28SJiri Pirko {
31103bf0c28SJiri Pirko 	int err;
31203bf0c28SJiri Pirko 
313ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
314ebb9a03aSJiri Pirko 	err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
315ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
31603bf0c28SJiri Pirko 	return err;
31703bf0c28SJiri Pirko }
318ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
31903bf0c28SJiri Pirko 
32003bf0c28SJiri Pirko /**
321ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
32203bf0c28SJiri Pirko  *	@nb: notifier_block
32303bf0c28SJiri Pirko  *
32403bf0c28SJiri Pirko  *	Unregister switch device notifier.
32503bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_unregister().
32603bf0c28SJiri Pirko  */
327ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
32803bf0c28SJiri Pirko {
32903bf0c28SJiri Pirko 	int err;
33003bf0c28SJiri Pirko 
331ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
332ebb9a03aSJiri Pirko 	err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
333ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
33403bf0c28SJiri Pirko 	return err;
33503bf0c28SJiri Pirko }
336ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
33703bf0c28SJiri Pirko 
33803bf0c28SJiri Pirko /**
339ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
34003bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
34103bf0c28SJiri Pirko  *	@dev: port device
34203bf0c28SJiri Pirko  *	@info: notifier information data
34303bf0c28SJiri Pirko  *
34403bf0c28SJiri Pirko  *	Call all network notifier blocks. This should be called by driver
34503bf0c28SJiri Pirko  *	when it needs to propagate hardware event.
34603bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_call_chain().
34703bf0c28SJiri Pirko  */
348ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
349ebb9a03aSJiri Pirko 			     struct switchdev_notifier_info *info)
35003bf0c28SJiri Pirko {
35103bf0c28SJiri Pirko 	int err;
35203bf0c28SJiri Pirko 
35303bf0c28SJiri Pirko 	info->dev = dev;
354ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
355ebb9a03aSJiri Pirko 	err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
356ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
35703bf0c28SJiri Pirko 	return err;
35803bf0c28SJiri Pirko }
359ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
3608a44dbb2SRoopa Prabhu 
361*47f8328bSScott Feldman static int switchdev_port_br_setflag(struct net_device *dev,
362*47f8328bSScott Feldman 				     struct nlattr *nlattr,
363*47f8328bSScott Feldman 				     unsigned long brport_flag)
364*47f8328bSScott Feldman {
365*47f8328bSScott Feldman 	struct switchdev_attr attr = {
366*47f8328bSScott Feldman 		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
367*47f8328bSScott Feldman 	};
368*47f8328bSScott Feldman 	u8 flag = nla_get_u8(nlattr);
369*47f8328bSScott Feldman 	int err;
370*47f8328bSScott Feldman 
371*47f8328bSScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
372*47f8328bSScott Feldman 	if (err)
373*47f8328bSScott Feldman 		return err;
374*47f8328bSScott Feldman 
375*47f8328bSScott Feldman 	if (flag)
376*47f8328bSScott Feldman 		attr.brport_flags |= brport_flag;
377*47f8328bSScott Feldman 	else
378*47f8328bSScott Feldman 		attr.brport_flags &= ~brport_flag;
379*47f8328bSScott Feldman 
380*47f8328bSScott Feldman 	return switchdev_port_attr_set(dev, &attr);
381*47f8328bSScott Feldman }
382*47f8328bSScott Feldman 
383*47f8328bSScott Feldman static const struct nla_policy
384*47f8328bSScott Feldman switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
385*47f8328bSScott Feldman 	[IFLA_BRPORT_STATE]		= { .type = NLA_U8 },
386*47f8328bSScott Feldman 	[IFLA_BRPORT_COST]		= { .type = NLA_U32 },
387*47f8328bSScott Feldman 	[IFLA_BRPORT_PRIORITY]		= { .type = NLA_U16 },
388*47f8328bSScott Feldman 	[IFLA_BRPORT_MODE]		= { .type = NLA_U8 },
389*47f8328bSScott Feldman 	[IFLA_BRPORT_GUARD]		= { .type = NLA_U8 },
390*47f8328bSScott Feldman 	[IFLA_BRPORT_PROTECT]		= { .type = NLA_U8 },
391*47f8328bSScott Feldman 	[IFLA_BRPORT_FAST_LEAVE]	= { .type = NLA_U8 },
392*47f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING]		= { .type = NLA_U8 },
393*47f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING_SYNC]	= { .type = NLA_U8 },
394*47f8328bSScott Feldman 	[IFLA_BRPORT_UNICAST_FLOOD]	= { .type = NLA_U8 },
395*47f8328bSScott Feldman };
396*47f8328bSScott Feldman 
397*47f8328bSScott Feldman static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
398*47f8328bSScott Feldman 					      struct nlattr *protinfo)
399*47f8328bSScott Feldman {
400*47f8328bSScott Feldman 	struct nlattr *attr;
401*47f8328bSScott Feldman 	int rem;
402*47f8328bSScott Feldman 	int err;
403*47f8328bSScott Feldman 
404*47f8328bSScott Feldman 	err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
405*47f8328bSScott Feldman 				  switchdev_port_bridge_policy);
406*47f8328bSScott Feldman 	if (err)
407*47f8328bSScott Feldman 		return err;
408*47f8328bSScott Feldman 
409*47f8328bSScott Feldman 	nla_for_each_nested(attr, protinfo, rem) {
410*47f8328bSScott Feldman 		switch (nla_type(attr)) {
411*47f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING:
412*47f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
413*47f8328bSScott Feldman 							BR_LEARNING);
414*47f8328bSScott Feldman 			break;
415*47f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING_SYNC:
416*47f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
417*47f8328bSScott Feldman 							BR_LEARNING_SYNC);
418*47f8328bSScott Feldman 			break;
419*47f8328bSScott Feldman 		default:
420*47f8328bSScott Feldman 			err = -EOPNOTSUPP;
421*47f8328bSScott Feldman 			break;
422*47f8328bSScott Feldman 		}
423*47f8328bSScott Feldman 		if (err)
424*47f8328bSScott Feldman 			return err;
425*47f8328bSScott Feldman 	}
426*47f8328bSScott Feldman 
427*47f8328bSScott Feldman 	return 0;
428*47f8328bSScott Feldman }
429*47f8328bSScott Feldman 
430*47f8328bSScott Feldman static int switchdev_port_br_afspec(struct net_device *dev,
431*47f8328bSScott Feldman 				    struct nlattr *afspec,
432*47f8328bSScott Feldman 				    int (*f)(struct net_device *dev,
433*47f8328bSScott Feldman 					     struct switchdev_obj *obj))
434*47f8328bSScott Feldman {
435*47f8328bSScott Feldman 	struct nlattr *attr;
436*47f8328bSScott Feldman 	struct bridge_vlan_info *vinfo;
437*47f8328bSScott Feldman 	struct switchdev_obj obj = {
438*47f8328bSScott Feldman 		.id = SWITCHDEV_OBJ_PORT_VLAN,
439*47f8328bSScott Feldman 	};
440*47f8328bSScott Feldman 	int rem;
441*47f8328bSScott Feldman 	int err;
442*47f8328bSScott Feldman 
443*47f8328bSScott Feldman 	nla_for_each_nested(attr, afspec, rem) {
444*47f8328bSScott Feldman 		if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
445*47f8328bSScott Feldman 			continue;
446*47f8328bSScott Feldman 		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
447*47f8328bSScott Feldman 			return -EINVAL;
448*47f8328bSScott Feldman 		vinfo = nla_data(attr);
449*47f8328bSScott Feldman 		obj.vlan.flags = vinfo->flags;
450*47f8328bSScott Feldman 		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
451*47f8328bSScott Feldman 			if (obj.vlan.vid_start)
452*47f8328bSScott Feldman 				return -EINVAL;
453*47f8328bSScott Feldman 			obj.vlan.vid_start = vinfo->vid;
454*47f8328bSScott Feldman 		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
455*47f8328bSScott Feldman 			if (!obj.vlan.vid_start)
456*47f8328bSScott Feldman 				return -EINVAL;
457*47f8328bSScott Feldman 			obj.vlan.vid_end = vinfo->vid;
458*47f8328bSScott Feldman 			if (obj.vlan.vid_end <= obj.vlan.vid_start)
459*47f8328bSScott Feldman 				return -EINVAL;
460*47f8328bSScott Feldman 			err = f(dev, &obj);
461*47f8328bSScott Feldman 			if (err)
462*47f8328bSScott Feldman 				return err;
463*47f8328bSScott Feldman 			memset(&obj.vlan, 0, sizeof(obj.vlan));
464*47f8328bSScott Feldman 		} else {
465*47f8328bSScott Feldman 			if (obj.vlan.vid_start)
466*47f8328bSScott Feldman 				return -EINVAL;
467*47f8328bSScott Feldman 			obj.vlan.vid_start = vinfo->vid;
468*47f8328bSScott Feldman 			obj.vlan.vid_end = vinfo->vid;
469*47f8328bSScott Feldman 			err = f(dev, &obj);
470*47f8328bSScott Feldman 			if (err)
471*47f8328bSScott Feldman 				return err;
472*47f8328bSScott Feldman 			memset(&obj.vlan, 0, sizeof(obj.vlan));
473*47f8328bSScott Feldman 		}
474*47f8328bSScott Feldman 	}
475*47f8328bSScott Feldman 
476*47f8328bSScott Feldman 	return 0;
477*47f8328bSScott Feldman }
478*47f8328bSScott Feldman 
4798a44dbb2SRoopa Prabhu /**
480*47f8328bSScott Feldman  *	switchdev_port_bridge_setlink - Set bridge port attributes
4818a44dbb2SRoopa Prabhu  *
4828a44dbb2SRoopa Prabhu  *	@dev: port device
483*47f8328bSScott Feldman  *	@nlh: netlink header
484*47f8328bSScott Feldman  *	@flags: netlink flags
4858a44dbb2SRoopa Prabhu  *
486*47f8328bSScott Feldman  *	Called for SELF on rtnl_bridge_setlink to set bridge port
487*47f8328bSScott Feldman  *	attributes.
4888a44dbb2SRoopa Prabhu  */
489ebb9a03aSJiri Pirko int switchdev_port_bridge_setlink(struct net_device *dev,
4908a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
4918a44dbb2SRoopa Prabhu {
492*47f8328bSScott Feldman 	struct nlattr *protinfo;
493*47f8328bSScott Feldman 	struct nlattr *afspec;
494*47f8328bSScott Feldman 	int err = 0;
4958a44dbb2SRoopa Prabhu 
496*47f8328bSScott Feldman 	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
497*47f8328bSScott Feldman 				   IFLA_PROTINFO);
498*47f8328bSScott Feldman 	if (protinfo) {
499*47f8328bSScott Feldman 		err = switchdev_port_br_setlink_protinfo(dev, protinfo);
500*47f8328bSScott Feldman 		if (err)
501*47f8328bSScott Feldman 			return err;
502*47f8328bSScott Feldman 	}
5038a44dbb2SRoopa Prabhu 
504*47f8328bSScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
505*47f8328bSScott Feldman 				 IFLA_AF_SPEC);
506*47f8328bSScott Feldman 	if (afspec)
507*47f8328bSScott Feldman 		err = switchdev_port_br_afspec(dev, afspec,
508*47f8328bSScott Feldman 					       switchdev_port_obj_add);
5098a44dbb2SRoopa Prabhu 
510*47f8328bSScott Feldman 	return err;
5118a44dbb2SRoopa Prabhu }
512ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
5138a44dbb2SRoopa Prabhu 
5148a44dbb2SRoopa Prabhu /**
515ebb9a03aSJiri Pirko  *	switchdev_port_bridge_dellink - Notify switch device port of bridge
5168a44dbb2SRoopa Prabhu  *	port attribute delete
5178a44dbb2SRoopa Prabhu  *
5188a44dbb2SRoopa Prabhu  *	@dev: port device
5198a44dbb2SRoopa Prabhu  *	@nlh: netlink msg with bridge port attributes
5208a44dbb2SRoopa Prabhu  *	@flags: bridge setlink flags
5218a44dbb2SRoopa Prabhu  *
5228a44dbb2SRoopa Prabhu  *	Notify switch device port of bridge port attribute delete
5238a44dbb2SRoopa Prabhu  */
524ebb9a03aSJiri Pirko int switchdev_port_bridge_dellink(struct net_device *dev,
5258a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
5268a44dbb2SRoopa Prabhu {
5278a44dbb2SRoopa Prabhu 	const struct net_device_ops *ops = dev->netdev_ops;
5288a44dbb2SRoopa Prabhu 
5298a44dbb2SRoopa Prabhu 	if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
5308a44dbb2SRoopa Prabhu 		return 0;
5318a44dbb2SRoopa Prabhu 
5328a44dbb2SRoopa Prabhu 	if (!ops->ndo_bridge_dellink)
5338a44dbb2SRoopa Prabhu 		return -EOPNOTSUPP;
5348a44dbb2SRoopa Prabhu 
5358a44dbb2SRoopa Prabhu 	return ops->ndo_bridge_dellink(dev, nlh, flags);
5368a44dbb2SRoopa Prabhu }
537ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
5388a44dbb2SRoopa Prabhu 
5398a44dbb2SRoopa Prabhu /**
540ebb9a03aSJiri Pirko  *	ndo_dflt_switchdev_port_bridge_setlink - default ndo bridge setlink
5418a44dbb2SRoopa Prabhu  *						 op for master devices
5428a44dbb2SRoopa Prabhu  *
5438a44dbb2SRoopa Prabhu  *	@dev: port device
5448a44dbb2SRoopa Prabhu  *	@nlh: netlink msg with bridge port attributes
5458a44dbb2SRoopa Prabhu  *	@flags: bridge setlink flags
5468a44dbb2SRoopa Prabhu  *
5478a44dbb2SRoopa Prabhu  *	Notify master device slaves of bridge port attributes
5488a44dbb2SRoopa Prabhu  */
549ebb9a03aSJiri Pirko int ndo_dflt_switchdev_port_bridge_setlink(struct net_device *dev,
5508a44dbb2SRoopa Prabhu 					   struct nlmsghdr *nlh, u16 flags)
5518a44dbb2SRoopa Prabhu {
5528a44dbb2SRoopa Prabhu 	struct net_device *lower_dev;
5538a44dbb2SRoopa Prabhu 	struct list_head *iter;
5548a44dbb2SRoopa Prabhu 	int ret = 0, err = 0;
5558a44dbb2SRoopa Prabhu 
5568a44dbb2SRoopa Prabhu 	if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
5578a44dbb2SRoopa Prabhu 		return ret;
5588a44dbb2SRoopa Prabhu 
5598a44dbb2SRoopa Prabhu 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
560ebb9a03aSJiri Pirko 		err = switchdev_port_bridge_setlink(lower_dev, nlh, flags);
5618a44dbb2SRoopa Prabhu 		if (err && err != -EOPNOTSUPP)
5628a44dbb2SRoopa Prabhu 			ret = err;
5638a44dbb2SRoopa Prabhu 	}
5648a44dbb2SRoopa Prabhu 
5658a44dbb2SRoopa Prabhu 	return ret;
5668a44dbb2SRoopa Prabhu }
567ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(ndo_dflt_switchdev_port_bridge_setlink);
5688a44dbb2SRoopa Prabhu 
5698a44dbb2SRoopa Prabhu /**
570ebb9a03aSJiri Pirko  *	ndo_dflt_switchdev_port_bridge_dellink - default ndo bridge dellink
5718a44dbb2SRoopa Prabhu  *						 op for master devices
5728a44dbb2SRoopa Prabhu  *
5738a44dbb2SRoopa Prabhu  *	@dev: port device
5748a44dbb2SRoopa Prabhu  *	@nlh: netlink msg with bridge port attributes
5758a44dbb2SRoopa Prabhu  *	@flags: bridge dellink flags
5768a44dbb2SRoopa Prabhu  *
5778a44dbb2SRoopa Prabhu  *	Notify master device slaves of bridge port attribute deletes
5788a44dbb2SRoopa Prabhu  */
579ebb9a03aSJiri Pirko int ndo_dflt_switchdev_port_bridge_dellink(struct net_device *dev,
5808a44dbb2SRoopa Prabhu 					   struct nlmsghdr *nlh, u16 flags)
5818a44dbb2SRoopa Prabhu {
5828a44dbb2SRoopa Prabhu 	struct net_device *lower_dev;
5838a44dbb2SRoopa Prabhu 	struct list_head *iter;
5848a44dbb2SRoopa Prabhu 	int ret = 0, err = 0;
5858a44dbb2SRoopa Prabhu 
5868a44dbb2SRoopa Prabhu 	if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
5878a44dbb2SRoopa Prabhu 		return ret;
5888a44dbb2SRoopa Prabhu 
5898a44dbb2SRoopa Prabhu 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
590ebb9a03aSJiri Pirko 		err = switchdev_port_bridge_dellink(lower_dev, nlh, flags);
5918a44dbb2SRoopa Prabhu 		if (err && err != -EOPNOTSUPP)
5928a44dbb2SRoopa Prabhu 			ret = err;
5938a44dbb2SRoopa Prabhu 	}
5948a44dbb2SRoopa Prabhu 
5958a44dbb2SRoopa Prabhu 	return ret;
5968a44dbb2SRoopa Prabhu }
597ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(ndo_dflt_switchdev_port_bridge_dellink);
5985e8d9049SScott Feldman 
599ebb9a03aSJiri Pirko static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
600b5d6fbdeSScott Feldman {
6019d47c0a2SJiri Pirko 	const struct switchdev_ops *ops = dev->switchdev_ops;
602b5d6fbdeSScott Feldman 	struct net_device *lower_dev;
603b5d6fbdeSScott Feldman 	struct net_device *port_dev;
604b5d6fbdeSScott Feldman 	struct list_head *iter;
605b5d6fbdeSScott Feldman 
606b5d6fbdeSScott Feldman 	/* Recusively search down until we find a sw port dev.
607f8e20a9fSScott Feldman 	 * (A sw port dev supports switchdev_port_attr_get).
608b5d6fbdeSScott Feldman 	 */
609b5d6fbdeSScott Feldman 
610f8e20a9fSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
611b5d6fbdeSScott Feldman 		return dev;
612b5d6fbdeSScott Feldman 
613b5d6fbdeSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
614ebb9a03aSJiri Pirko 		port_dev = switchdev_get_lowest_dev(lower_dev);
615b5d6fbdeSScott Feldman 		if (port_dev)
616b5d6fbdeSScott Feldman 			return port_dev;
617b5d6fbdeSScott Feldman 	}
618b5d6fbdeSScott Feldman 
619b5d6fbdeSScott Feldman 	return NULL;
620b5d6fbdeSScott Feldman }
621b5d6fbdeSScott Feldman 
622ebb9a03aSJiri Pirko static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
623b5d6fbdeSScott Feldman {
624f8e20a9fSScott Feldman 	struct switchdev_attr attr = {
625f8e20a9fSScott Feldman 		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
626f8e20a9fSScott Feldman 	};
627f8e20a9fSScott Feldman 	struct switchdev_attr prev_attr;
628b5d6fbdeSScott Feldman 	struct net_device *dev = NULL;
629b5d6fbdeSScott Feldman 	int nhsel;
630b5d6fbdeSScott Feldman 
631b5d6fbdeSScott Feldman 	/* For this route, all nexthop devs must be on the same switch. */
632b5d6fbdeSScott Feldman 
633b5d6fbdeSScott Feldman 	for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
634b5d6fbdeSScott Feldman 		const struct fib_nh *nh = &fi->fib_nh[nhsel];
635b5d6fbdeSScott Feldman 
636b5d6fbdeSScott Feldman 		if (!nh->nh_dev)
637b5d6fbdeSScott Feldman 			return NULL;
638b5d6fbdeSScott Feldman 
639ebb9a03aSJiri Pirko 		dev = switchdev_get_lowest_dev(nh->nh_dev);
640b5d6fbdeSScott Feldman 		if (!dev)
641b5d6fbdeSScott Feldman 			return NULL;
642b5d6fbdeSScott Feldman 
643f8e20a9fSScott Feldman 		if (switchdev_port_attr_get(dev, &attr))
644b5d6fbdeSScott Feldman 			return NULL;
645b5d6fbdeSScott Feldman 
646b5d6fbdeSScott Feldman 		if (nhsel > 0) {
647f8e20a9fSScott Feldman 			if (prev_attr.ppid.id_len != attr.ppid.id_len)
648b5d6fbdeSScott Feldman 				return NULL;
649f8e20a9fSScott Feldman 			if (memcmp(prev_attr.ppid.id, attr.ppid.id,
650f8e20a9fSScott Feldman 				   attr.ppid.id_len))
651b5d6fbdeSScott Feldman 				return NULL;
652b5d6fbdeSScott Feldman 		}
653b5d6fbdeSScott Feldman 
654f8e20a9fSScott Feldman 		prev_attr = attr;
655b5d6fbdeSScott Feldman 	}
656b5d6fbdeSScott Feldman 
657b5d6fbdeSScott Feldman 	return dev;
658b5d6fbdeSScott Feldman }
659b5d6fbdeSScott Feldman 
6605e8d9049SScott Feldman /**
661ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_add - Add IPv4 route entry to switch
6625e8d9049SScott Feldman  *
6635e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
6645e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
6655e8d9049SScott Feldman  *	@fi: route FIB info structure
6665e8d9049SScott Feldman  *	@tos: route TOS
6675e8d9049SScott Feldman  *	@type: route type
668f8f21471SScott Feldman  *	@nlflags: netlink flags passed in (NLM_F_*)
6695e8d9049SScott Feldman  *	@tb_id: route table ID
6705e8d9049SScott Feldman  *
6715e8d9049SScott Feldman  *	Add IPv4 route entry to switch device.
6725e8d9049SScott Feldman  */
673ebb9a03aSJiri Pirko int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
674f8f21471SScott Feldman 			   u8 tos, u8 type, u32 nlflags, u32 tb_id)
6755e8d9049SScott Feldman {
676b5d6fbdeSScott Feldman 	struct net_device *dev;
6779d47c0a2SJiri Pirko 	const struct switchdev_ops *ops;
678b5d6fbdeSScott Feldman 	int err = 0;
679b5d6fbdeSScott Feldman 
6808e05fd71SScott Feldman 	/* Don't offload route if using custom ip rules or if
6818e05fd71SScott Feldman 	 * IPv4 FIB offloading has been disabled completely.
6828e05fd71SScott Feldman 	 */
6838e05fd71SScott Feldman 
684e1315db1SScott Feldman #ifdef CONFIG_IP_MULTIPLE_TABLES
685e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_has_custom_rules)
686e1315db1SScott Feldman 		return 0;
687e1315db1SScott Feldman #endif
688e1315db1SScott Feldman 
689e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_offload_disabled)
690104616e7SScott Feldman 		return 0;
691104616e7SScott Feldman 
692ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
693b5d6fbdeSScott Feldman 	if (!dev)
6945e8d9049SScott Feldman 		return 0;
6959d47c0a2SJiri Pirko 	ops = dev->switchdev_ops;
696b5d6fbdeSScott Feldman 
6979d47c0a2SJiri Pirko 	if (ops->switchdev_fib_ipv4_add) {
6989d47c0a2SJiri Pirko 		err = ops->switchdev_fib_ipv4_add(dev, htonl(dst), dst_len,
699f8f21471SScott Feldman 						  fi, tos, type, nlflags,
700f8f21471SScott Feldman 						  tb_id);
701b5d6fbdeSScott Feldman 		if (!err)
702b5d6fbdeSScott Feldman 			fi->fib_flags |= RTNH_F_EXTERNAL;
703b5d6fbdeSScott Feldman 	}
704b5d6fbdeSScott Feldman 
705b5d6fbdeSScott Feldman 	return err;
7065e8d9049SScott Feldman }
707ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
7085e8d9049SScott Feldman 
7095e8d9049SScott Feldman /**
710ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
7115e8d9049SScott Feldman  *
7125e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
7135e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
7145e8d9049SScott Feldman  *	@fi: route FIB info structure
7155e8d9049SScott Feldman  *	@tos: route TOS
7165e8d9049SScott Feldman  *	@type: route type
7175e8d9049SScott Feldman  *	@tb_id: route table ID
7185e8d9049SScott Feldman  *
7195e8d9049SScott Feldman  *	Delete IPv4 route entry from switch device.
7205e8d9049SScott Feldman  */
721ebb9a03aSJiri Pirko int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
7225e8d9049SScott Feldman 			   u8 tos, u8 type, u32 tb_id)
7235e8d9049SScott Feldman {
724b5d6fbdeSScott Feldman 	struct net_device *dev;
7259d47c0a2SJiri Pirko 	const struct switchdev_ops *ops;
726b5d6fbdeSScott Feldman 	int err = 0;
727b5d6fbdeSScott Feldman 
728b5d6fbdeSScott Feldman 	if (!(fi->fib_flags & RTNH_F_EXTERNAL))
7295e8d9049SScott Feldman 		return 0;
730b5d6fbdeSScott Feldman 
731ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
732b5d6fbdeSScott Feldman 	if (!dev)
733b5d6fbdeSScott Feldman 		return 0;
7349d47c0a2SJiri Pirko 	ops = dev->switchdev_ops;
735b5d6fbdeSScott Feldman 
7369d47c0a2SJiri Pirko 	if (ops->switchdev_fib_ipv4_del) {
7379d47c0a2SJiri Pirko 		err = ops->switchdev_fib_ipv4_del(dev, htonl(dst), dst_len,
738b5d6fbdeSScott Feldman 						  fi, tos, type, tb_id);
739b5d6fbdeSScott Feldman 		if (!err)
740b5d6fbdeSScott Feldman 			fi->fib_flags &= ~RTNH_F_EXTERNAL;
741b5d6fbdeSScott Feldman 	}
742b5d6fbdeSScott Feldman 
743b5d6fbdeSScott Feldman 	return err;
7445e8d9049SScott Feldman }
745ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
7468e05fd71SScott Feldman 
7478e05fd71SScott Feldman /**
748ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
7498e05fd71SScott Feldman  *
7508e05fd71SScott Feldman  *	@fi: route FIB info structure
7518e05fd71SScott Feldman  */
752ebb9a03aSJiri Pirko void switchdev_fib_ipv4_abort(struct fib_info *fi)
7538e05fd71SScott Feldman {
7548e05fd71SScott Feldman 	/* There was a problem installing this route to the offload
7558e05fd71SScott Feldman 	 * device.  For now, until we come up with more refined
7568e05fd71SScott Feldman 	 * policy handling, abruptly end IPv4 fib offloading for
7578e05fd71SScott Feldman 	 * for entire net by flushing offload device(s) of all
7588e05fd71SScott Feldman 	 * IPv4 routes, and mark IPv4 fib offloading broken from
7598e05fd71SScott Feldman 	 * this point forward.
7608e05fd71SScott Feldman 	 */
7618e05fd71SScott Feldman 
7628e05fd71SScott Feldman 	fib_flush_external(fi->fib_net);
7638e05fd71SScott Feldman 	fi->fib_net->ipv4.fib_offload_disabled = true;
7648e05fd71SScott Feldman }
765ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
766