xref: /linux-6.15/net/switchdev/switchdev.c (revision 793f4014)
1007f790cSJiri Pirko /*
2007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
37ea6eb3fSJiri Pirko  * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
4f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
5007f790cSJiri Pirko  *
6007f790cSJiri Pirko  * This program is free software; you can redistribute it and/or modify
7007f790cSJiri Pirko  * it under the terms of the GNU General Public License as published by
8007f790cSJiri Pirko  * the Free Software Foundation; either version 2 of the License, or
9007f790cSJiri Pirko  * (at your option) any later version.
10007f790cSJiri Pirko  */
11007f790cSJiri Pirko 
12007f790cSJiri Pirko #include <linux/kernel.h>
13007f790cSJiri Pirko #include <linux/types.h>
14007f790cSJiri Pirko #include <linux/init.h>
1503bf0c28SJiri Pirko #include <linux/mutex.h>
1603bf0c28SJiri Pirko #include <linux/notifier.h>
17007f790cSJiri Pirko #include <linux/netdevice.h>
1847f8328bSScott Feldman #include <linux/if_bridge.h>
197ea6eb3fSJiri Pirko #include <linux/list.h>
20*793f4014SJiri Pirko #include <linux/workqueue.h>
215e8d9049SScott Feldman #include <net/ip_fib.h>
22007f790cSJiri Pirko #include <net/switchdev.h>
23007f790cSJiri Pirko 
24007f790cSJiri Pirko /**
257ea6eb3fSJiri Pirko  *	switchdev_trans_item_enqueue - Enqueue data item to transaction queue
267ea6eb3fSJiri Pirko  *
277ea6eb3fSJiri Pirko  *	@trans: transaction
287ea6eb3fSJiri Pirko  *	@data: pointer to data being queued
297ea6eb3fSJiri Pirko  *	@destructor: data destructor
307ea6eb3fSJiri Pirko  *	@tritem: transaction item being queued
317ea6eb3fSJiri Pirko  *
327ea6eb3fSJiri Pirko  *	Enqeueue data item to transaction queue. tritem is typically placed in
337ea6eb3fSJiri Pirko  *	cointainter pointed at by data pointer. Destructor is called on
347ea6eb3fSJiri Pirko  *	transaction abort and after successful commit phase in case
357ea6eb3fSJiri Pirko  *	the caller did not dequeue the item before.
367ea6eb3fSJiri Pirko  */
377ea6eb3fSJiri Pirko void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
387ea6eb3fSJiri Pirko 				  void *data, void (*destructor)(void const *),
397ea6eb3fSJiri Pirko 				  struct switchdev_trans_item *tritem)
407ea6eb3fSJiri Pirko {
417ea6eb3fSJiri Pirko 	tritem->data = data;
427ea6eb3fSJiri Pirko 	tritem->destructor = destructor;
437ea6eb3fSJiri Pirko 	list_add_tail(&tritem->list, &trans->item_list);
447ea6eb3fSJiri Pirko }
457ea6eb3fSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
467ea6eb3fSJiri Pirko 
477ea6eb3fSJiri Pirko static struct switchdev_trans_item *
487ea6eb3fSJiri Pirko __switchdev_trans_item_dequeue(struct switchdev_trans *trans)
497ea6eb3fSJiri Pirko {
507ea6eb3fSJiri Pirko 	struct switchdev_trans_item *tritem;
517ea6eb3fSJiri Pirko 
527ea6eb3fSJiri Pirko 	if (list_empty(&trans->item_list))
537ea6eb3fSJiri Pirko 		return NULL;
547ea6eb3fSJiri Pirko 	tritem = list_first_entry(&trans->item_list,
557ea6eb3fSJiri Pirko 				  struct switchdev_trans_item, list);
567ea6eb3fSJiri Pirko 	list_del(&tritem->list);
577ea6eb3fSJiri Pirko 	return tritem;
587ea6eb3fSJiri Pirko }
597ea6eb3fSJiri Pirko 
607ea6eb3fSJiri Pirko /**
617ea6eb3fSJiri Pirko  *	switchdev_trans_item_dequeue - Dequeue data item from transaction queue
627ea6eb3fSJiri Pirko  *
637ea6eb3fSJiri Pirko  *	@trans: transaction
647ea6eb3fSJiri Pirko  */
657ea6eb3fSJiri Pirko void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
667ea6eb3fSJiri Pirko {
677ea6eb3fSJiri Pirko 	struct switchdev_trans_item *tritem;
687ea6eb3fSJiri Pirko 
697ea6eb3fSJiri Pirko 	tritem = __switchdev_trans_item_dequeue(trans);
707ea6eb3fSJiri Pirko 	BUG_ON(!tritem);
717ea6eb3fSJiri Pirko 	return tritem->data;
727ea6eb3fSJiri Pirko }
737ea6eb3fSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
747ea6eb3fSJiri Pirko 
757ea6eb3fSJiri Pirko static void switchdev_trans_init(struct switchdev_trans *trans)
767ea6eb3fSJiri Pirko {
777ea6eb3fSJiri Pirko 	INIT_LIST_HEAD(&trans->item_list);
787ea6eb3fSJiri Pirko }
797ea6eb3fSJiri Pirko 
807ea6eb3fSJiri Pirko static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
817ea6eb3fSJiri Pirko {
827ea6eb3fSJiri Pirko 	struct switchdev_trans_item *tritem;
837ea6eb3fSJiri Pirko 
847ea6eb3fSJiri Pirko 	while ((tritem = __switchdev_trans_item_dequeue(trans)))
857ea6eb3fSJiri Pirko 		tritem->destructor(tritem->data);
867ea6eb3fSJiri Pirko }
877ea6eb3fSJiri Pirko 
887ea6eb3fSJiri Pirko static void switchdev_trans_items_warn_destroy(struct net_device *dev,
897ea6eb3fSJiri Pirko 					       struct switchdev_trans *trans)
907ea6eb3fSJiri Pirko {
917ea6eb3fSJiri Pirko 	WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
927ea6eb3fSJiri Pirko 	     dev->name);
937ea6eb3fSJiri Pirko 	switchdev_trans_items_destroy(trans);
947ea6eb3fSJiri Pirko }
957ea6eb3fSJiri Pirko 
96*793f4014SJiri Pirko static LIST_HEAD(deferred);
97*793f4014SJiri Pirko static DEFINE_SPINLOCK(deferred_lock);
98*793f4014SJiri Pirko 
99*793f4014SJiri Pirko typedef void switchdev_deferred_func_t(struct net_device *dev,
100*793f4014SJiri Pirko 				       const void *data);
101*793f4014SJiri Pirko 
102*793f4014SJiri Pirko struct switchdev_deferred_item {
103*793f4014SJiri Pirko 	struct list_head list;
104*793f4014SJiri Pirko 	struct net_device *dev;
105*793f4014SJiri Pirko 	switchdev_deferred_func_t *func;
106*793f4014SJiri Pirko 	unsigned long data[0];
107*793f4014SJiri Pirko };
108*793f4014SJiri Pirko 
109*793f4014SJiri Pirko static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
110*793f4014SJiri Pirko {
111*793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
112*793f4014SJiri Pirko 
113*793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
114*793f4014SJiri Pirko 	if (list_empty(&deferred)) {
115*793f4014SJiri Pirko 		dfitem = NULL;
116*793f4014SJiri Pirko 		goto unlock;
117*793f4014SJiri Pirko 	}
118*793f4014SJiri Pirko 	dfitem = list_first_entry(&deferred,
119*793f4014SJiri Pirko 				  struct switchdev_deferred_item, list);
120*793f4014SJiri Pirko 	list_del(&dfitem->list);
121*793f4014SJiri Pirko unlock:
122*793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
123*793f4014SJiri Pirko 	return dfitem;
124*793f4014SJiri Pirko }
125*793f4014SJiri Pirko 
126*793f4014SJiri Pirko /**
127*793f4014SJiri Pirko  *	switchdev_deferred_process - Process ops in deferred queue
128*793f4014SJiri Pirko  *
129*793f4014SJiri Pirko  *	Called to flush the ops currently queued in deferred ops queue.
130*793f4014SJiri Pirko  *	rtnl_lock must be held.
131*793f4014SJiri Pirko  */
132*793f4014SJiri Pirko void switchdev_deferred_process(void)
133*793f4014SJiri Pirko {
134*793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
135*793f4014SJiri Pirko 
136*793f4014SJiri Pirko 	ASSERT_RTNL();
137*793f4014SJiri Pirko 
138*793f4014SJiri Pirko 	while ((dfitem = switchdev_deferred_dequeue())) {
139*793f4014SJiri Pirko 		dfitem->func(dfitem->dev, dfitem->data);
140*793f4014SJiri Pirko 		dev_put(dfitem->dev);
141*793f4014SJiri Pirko 		kfree(dfitem);
142*793f4014SJiri Pirko 	}
143*793f4014SJiri Pirko }
144*793f4014SJiri Pirko EXPORT_SYMBOL_GPL(switchdev_deferred_process);
145*793f4014SJiri Pirko 
146*793f4014SJiri Pirko static void switchdev_deferred_process_work(struct work_struct *work)
147*793f4014SJiri Pirko {
148*793f4014SJiri Pirko 	rtnl_lock();
149*793f4014SJiri Pirko 	switchdev_deferred_process();
150*793f4014SJiri Pirko 	rtnl_unlock();
151*793f4014SJiri Pirko }
152*793f4014SJiri Pirko 
153*793f4014SJiri Pirko static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
154*793f4014SJiri Pirko 
155*793f4014SJiri Pirko static int switchdev_deferred_enqueue(struct net_device *dev,
156*793f4014SJiri Pirko 				      const void *data, size_t data_len,
157*793f4014SJiri Pirko 				      switchdev_deferred_func_t *func)
158*793f4014SJiri Pirko {
159*793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
160*793f4014SJiri Pirko 
161*793f4014SJiri Pirko 	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
162*793f4014SJiri Pirko 	if (!dfitem)
163*793f4014SJiri Pirko 		return -ENOMEM;
164*793f4014SJiri Pirko 	dfitem->dev = dev;
165*793f4014SJiri Pirko 	dfitem->func = func;
166*793f4014SJiri Pirko 	memcpy(dfitem->data, data, data_len);
167*793f4014SJiri Pirko 	dev_hold(dev);
168*793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
169*793f4014SJiri Pirko 	list_add_tail(&dfitem->list, &deferred);
170*793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
171*793f4014SJiri Pirko 	schedule_work(&deferred_process_work);
172*793f4014SJiri Pirko 	return 0;
173*793f4014SJiri Pirko }
174*793f4014SJiri Pirko 
1757ea6eb3fSJiri Pirko /**
1763094333dSScott Feldman  *	switchdev_port_attr_get - Get port attribute
1773094333dSScott Feldman  *
1783094333dSScott Feldman  *	@dev: port device
1793094333dSScott Feldman  *	@attr: attribute to get
1803094333dSScott Feldman  */
1813094333dSScott Feldman int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
1823094333dSScott Feldman {
1833094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
1843094333dSScott Feldman 	struct net_device *lower_dev;
1853094333dSScott Feldman 	struct list_head *iter;
1863094333dSScott Feldman 	struct switchdev_attr first = {
1871f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_UNDEFINED
1883094333dSScott Feldman 	};
1893094333dSScott Feldman 	int err = -EOPNOTSUPP;
1903094333dSScott Feldman 
1913094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
1923094333dSScott Feldman 		return ops->switchdev_port_attr_get(dev, attr);
1933094333dSScott Feldman 
1943094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
1953094333dSScott Feldman 		return err;
1963094333dSScott Feldman 
1973094333dSScott Feldman 	/* Switch device port(s) may be stacked under
1983094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to get attr on
1993094333dSScott Feldman 	 * each port.  Return -ENODATA if attr values don't
2003094333dSScott Feldman 	 * compare across ports.
2013094333dSScott Feldman 	 */
2023094333dSScott Feldman 
2033094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
2043094333dSScott Feldman 		err = switchdev_port_attr_get(lower_dev, attr);
2053094333dSScott Feldman 		if (err)
2063094333dSScott Feldman 			break;
2071f868398SJiri Pirko 		if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
2083094333dSScott Feldman 			first = *attr;
2093094333dSScott Feldman 		else if (memcmp(&first, attr, sizeof(*attr)))
2103094333dSScott Feldman 			return -ENODATA;
2113094333dSScott Feldman 	}
2123094333dSScott Feldman 
2133094333dSScott Feldman 	return err;
2143094333dSScott Feldman }
2153094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
2163094333dSScott Feldman 
2173094333dSScott Feldman static int __switchdev_port_attr_set(struct net_device *dev,
2187ea6eb3fSJiri Pirko 				     struct switchdev_attr *attr,
2197ea6eb3fSJiri Pirko 				     struct switchdev_trans *trans)
2203094333dSScott Feldman {
2213094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
2223094333dSScott Feldman 	struct net_device *lower_dev;
2233094333dSScott Feldman 	struct list_head *iter;
2243094333dSScott Feldman 	int err = -EOPNOTSUPP;
2253094333dSScott Feldman 
2263094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_set)
2277ea6eb3fSJiri Pirko 		return ops->switchdev_port_attr_set(dev, attr, trans);
2283094333dSScott Feldman 
2293094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
230464314eaSScott Feldman 		goto done;
2313094333dSScott Feldman 
2323094333dSScott Feldman 	/* Switch device port(s) may be stacked under
2333094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to set attr on
2343094333dSScott Feldman 	 * each port.
2353094333dSScott Feldman 	 */
2363094333dSScott Feldman 
2373094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
2387ea6eb3fSJiri Pirko 		err = __switchdev_port_attr_set(lower_dev, attr, trans);
239464314eaSScott Feldman 		if (err == -EOPNOTSUPP &&
240464314eaSScott Feldman 		    attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
241464314eaSScott Feldman 			continue;
2423094333dSScott Feldman 		if (err)
2433094333dSScott Feldman 			break;
2443094333dSScott Feldman 	}
2453094333dSScott Feldman 
246464314eaSScott Feldman done:
247464314eaSScott Feldman 	if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
248464314eaSScott Feldman 		err = 0;
249464314eaSScott Feldman 
2503094333dSScott Feldman 	return err;
2513094333dSScott Feldman }
2523094333dSScott Feldman 
2533094333dSScott Feldman struct switchdev_attr_set_work {
2543094333dSScott Feldman 	struct work_struct work;
2553094333dSScott Feldman 	struct net_device *dev;
2563094333dSScott Feldman 	struct switchdev_attr attr;
2573094333dSScott Feldman };
2583094333dSScott Feldman 
2593094333dSScott Feldman static void switchdev_port_attr_set_work(struct work_struct *work)
2603094333dSScott Feldman {
2613094333dSScott Feldman 	struct switchdev_attr_set_work *asw =
2623094333dSScott Feldman 		container_of(work, struct switchdev_attr_set_work, work);
2633094333dSScott Feldman 	int err;
2643094333dSScott Feldman 
2653094333dSScott Feldman 	rtnl_lock();
2663094333dSScott Feldman 	err = switchdev_port_attr_set(asw->dev, &asw->attr);
26757225e77SScott Feldman 	if (err && err != -EOPNOTSUPP)
26857225e77SScott Feldman 		netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n",
26957225e77SScott Feldman 			   err, asw->attr.id);
2703094333dSScott Feldman 	rtnl_unlock();
2713094333dSScott Feldman 
2723094333dSScott Feldman 	dev_put(asw->dev);
2733094333dSScott Feldman 	kfree(work);
2743094333dSScott Feldman }
2753094333dSScott Feldman 
2763094333dSScott Feldman static int switchdev_port_attr_set_defer(struct net_device *dev,
2773094333dSScott Feldman 					 struct switchdev_attr *attr)
2783094333dSScott Feldman {
2793094333dSScott Feldman 	struct switchdev_attr_set_work *asw;
2803094333dSScott Feldman 
2813094333dSScott Feldman 	asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
2823094333dSScott Feldman 	if (!asw)
2833094333dSScott Feldman 		return -ENOMEM;
2843094333dSScott Feldman 
2853094333dSScott Feldman 	INIT_WORK(&asw->work, switchdev_port_attr_set_work);
2863094333dSScott Feldman 
2873094333dSScott Feldman 	dev_hold(dev);
2883094333dSScott Feldman 	asw->dev = dev;
2893094333dSScott Feldman 	memcpy(&asw->attr, attr, sizeof(asw->attr));
2903094333dSScott Feldman 
2913094333dSScott Feldman 	schedule_work(&asw->work);
2923094333dSScott Feldman 
2933094333dSScott Feldman 	return 0;
2943094333dSScott Feldman }
2953094333dSScott Feldman 
2963094333dSScott Feldman /**
2973094333dSScott Feldman  *	switchdev_port_attr_set - Set port attribute
2983094333dSScott Feldman  *
2993094333dSScott Feldman  *	@dev: port device
3003094333dSScott Feldman  *	@attr: attribute to set
3013094333dSScott Feldman  *
3023094333dSScott Feldman  *	Use a 2-phase prepare-commit transaction model to ensure
3033094333dSScott Feldman  *	system is not left in a partially updated state due to
3043094333dSScott Feldman  *	failure from driver/device.
3053094333dSScott Feldman  */
3063094333dSScott Feldman int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
3073094333dSScott Feldman {
3087ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
3093094333dSScott Feldman 	int err;
3103094333dSScott Feldman 
3113094333dSScott Feldman 	if (!rtnl_is_locked()) {
3123094333dSScott Feldman 		/* Running prepare-commit transaction across stacked
3133094333dSScott Feldman 		 * devices requires nothing moves, so if rtnl_lock is
3143094333dSScott Feldman 		 * not held, schedule a worker thread to hold rtnl_lock
3153094333dSScott Feldman 		 * while setting attr.
3163094333dSScott Feldman 		 */
3173094333dSScott Feldman 
3183094333dSScott Feldman 		return switchdev_port_attr_set_defer(dev, attr);
3193094333dSScott Feldman 	}
3203094333dSScott Feldman 
3217ea6eb3fSJiri Pirko 	switchdev_trans_init(&trans);
3227ea6eb3fSJiri Pirko 
3233094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
3243094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
3253094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
3263094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
3273094333dSScott Feldman 	 * but should not commit the attr.
3283094333dSScott Feldman 	 */
3293094333dSScott Feldman 
330f623ab7fSJiri Pirko 	trans.ph_prepare = true;
3317ea6eb3fSJiri Pirko 	err = __switchdev_port_attr_set(dev, attr, &trans);
3323094333dSScott Feldman 	if (err) {
3333094333dSScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
3343094333dSScott Feldman 		 * resources reserved in the prepare phase are
3353094333dSScott Feldman 		 * released.
3363094333dSScott Feldman 		 */
3373094333dSScott Feldman 
3389f6467cfSJiri Pirko 		if (err != -EOPNOTSUPP)
3397ea6eb3fSJiri Pirko 			switchdev_trans_items_destroy(&trans);
3403094333dSScott Feldman 
3413094333dSScott Feldman 		return err;
3423094333dSScott Feldman 	}
3433094333dSScott Feldman 
3443094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
3453094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
3463094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
3473094333dSScott Feldman 	 */
3483094333dSScott Feldman 
349f623ab7fSJiri Pirko 	trans.ph_prepare = false;
3507ea6eb3fSJiri Pirko 	err = __switchdev_port_attr_set(dev, attr, &trans);
351e9fdaec0SScott Feldman 	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
352e9fdaec0SScott Feldman 	     dev->name, attr->id);
3537ea6eb3fSJiri Pirko 	switchdev_trans_items_warn_destroy(dev, &trans);
3543094333dSScott Feldman 
3553094333dSScott Feldman 	return err;
3563094333dSScott Feldman }
3573094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
3583094333dSScott Feldman 
35922c1f67eSScott Feldman static int __switchdev_port_obj_add(struct net_device *dev,
360648b4a99SJiri Pirko 				    const struct switchdev_obj *obj,
3617ea6eb3fSJiri Pirko 				    struct switchdev_trans *trans)
362491d0f15SScott Feldman {
363491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
364491d0f15SScott Feldman 	struct net_device *lower_dev;
365491d0f15SScott Feldman 	struct list_head *iter;
366491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
367491d0f15SScott Feldman 
368491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_add)
3699e8f4a54SJiri Pirko 		return ops->switchdev_port_obj_add(dev, obj, trans);
370491d0f15SScott Feldman 
371491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
372491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to add object on
373491d0f15SScott Feldman 	 * each port.
374491d0f15SScott Feldman 	 */
375491d0f15SScott Feldman 
376491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
3779e8f4a54SJiri Pirko 		err = __switchdev_port_obj_add(lower_dev, obj, trans);
378491d0f15SScott Feldman 		if (err)
379491d0f15SScott Feldman 			break;
380491d0f15SScott Feldman 	}
381491d0f15SScott Feldman 
382491d0f15SScott Feldman 	return err;
383491d0f15SScott Feldman }
384491d0f15SScott Feldman 
385491d0f15SScott Feldman /**
386491d0f15SScott Feldman  *	switchdev_port_obj_add - Add port object
387491d0f15SScott Feldman  *
388491d0f15SScott Feldman  *	@dev: port device
389ab069002SVivien Didelot  *	@id: object ID
390491d0f15SScott Feldman  *	@obj: object to add
391491d0f15SScott Feldman  *
392491d0f15SScott Feldman  *	Use a 2-phase prepare-commit transaction model to ensure
393491d0f15SScott Feldman  *	system is not left in a partially updated state due to
394491d0f15SScott Feldman  *	failure from driver/device.
395491d0f15SScott Feldman  *
396491d0f15SScott Feldman  *	rtnl_lock must be held.
397491d0f15SScott Feldman  */
3989e8f4a54SJiri Pirko int switchdev_port_obj_add(struct net_device *dev,
399648b4a99SJiri Pirko 			   const struct switchdev_obj *obj)
400491d0f15SScott Feldman {
4017ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
402491d0f15SScott Feldman 	int err;
403491d0f15SScott Feldman 
404491d0f15SScott Feldman 	ASSERT_RTNL();
405491d0f15SScott Feldman 
4067ea6eb3fSJiri Pirko 	switchdev_trans_init(&trans);
4077ea6eb3fSJiri Pirko 
408491d0f15SScott Feldman 	/* Phase I: prepare for obj add. Driver/device should fail
409491d0f15SScott Feldman 	 * here if there are going to be issues in the commit phase,
410491d0f15SScott Feldman 	 * such as lack of resources or support.  The driver/device
411491d0f15SScott Feldman 	 * should reserve resources needed for the commit phase here,
412491d0f15SScott Feldman 	 * but should not commit the obj.
413491d0f15SScott Feldman 	 */
414491d0f15SScott Feldman 
415f623ab7fSJiri Pirko 	trans.ph_prepare = true;
4169e8f4a54SJiri Pirko 	err = __switchdev_port_obj_add(dev, obj, &trans);
417491d0f15SScott Feldman 	if (err) {
418491d0f15SScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
419491d0f15SScott Feldman 		 * resources reserved in the prepare phase are
420491d0f15SScott Feldman 		 * released.
421491d0f15SScott Feldman 		 */
422491d0f15SScott Feldman 
4239f6467cfSJiri Pirko 		if (err != -EOPNOTSUPP)
4247ea6eb3fSJiri Pirko 			switchdev_trans_items_destroy(&trans);
425491d0f15SScott Feldman 
426491d0f15SScott Feldman 		return err;
427491d0f15SScott Feldman 	}
428491d0f15SScott Feldman 
429491d0f15SScott Feldman 	/* Phase II: commit obj add.  This cannot fail as a fault
430491d0f15SScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
431491d0f15SScott Feldman 	 * because the driver said everythings was OK in phase I.
432491d0f15SScott Feldman 	 */
433491d0f15SScott Feldman 
434f623ab7fSJiri Pirko 	trans.ph_prepare = false;
4359e8f4a54SJiri Pirko 	err = __switchdev_port_obj_add(dev, obj, &trans);
4369e8f4a54SJiri Pirko 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
4377ea6eb3fSJiri Pirko 	switchdev_trans_items_warn_destroy(dev, &trans);
438491d0f15SScott Feldman 
439491d0f15SScott Feldman 	return err;
440491d0f15SScott Feldman }
441491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
442491d0f15SScott Feldman 
443491d0f15SScott Feldman /**
444491d0f15SScott Feldman  *	switchdev_port_obj_del - Delete port object
445491d0f15SScott Feldman  *
446491d0f15SScott Feldman  *	@dev: port device
447ab069002SVivien Didelot  *	@id: object ID
448491d0f15SScott Feldman  *	@obj: object to delete
449491d0f15SScott Feldman  */
4509e8f4a54SJiri Pirko int switchdev_port_obj_del(struct net_device *dev,
451648b4a99SJiri Pirko 			   const struct switchdev_obj *obj)
452491d0f15SScott Feldman {
453491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
454491d0f15SScott Feldman 	struct net_device *lower_dev;
455491d0f15SScott Feldman 	struct list_head *iter;
456491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
457491d0f15SScott Feldman 
458491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_del)
4599e8f4a54SJiri Pirko 		return ops->switchdev_port_obj_del(dev, obj);
460491d0f15SScott Feldman 
461491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
462491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to delete object on
463491d0f15SScott Feldman 	 * each port.
464491d0f15SScott Feldman 	 */
465491d0f15SScott Feldman 
466491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
4679e8f4a54SJiri Pirko 		err = switchdev_port_obj_del(lower_dev, obj);
468491d0f15SScott Feldman 		if (err)
469491d0f15SScott Feldman 			break;
470491d0f15SScott Feldman 	}
471491d0f15SScott Feldman 
472491d0f15SScott Feldman 	return err;
473491d0f15SScott Feldman }
474491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
475491d0f15SScott Feldman 
47645d4122cSSamudrala, Sridhar /**
47745d4122cSSamudrala, Sridhar  *	switchdev_port_obj_dump - Dump port objects
47845d4122cSSamudrala, Sridhar  *
47945d4122cSSamudrala, Sridhar  *	@dev: port device
48025f07adcSVivien Didelot  *	@id: object ID
48145d4122cSSamudrala, Sridhar  *	@obj: object to dump
48225f07adcSVivien Didelot  *	@cb: function to call with a filled object
48345d4122cSSamudrala, Sridhar  */
4849e8f4a54SJiri Pirko int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
485648b4a99SJiri Pirko 			    switchdev_obj_dump_cb_t *cb)
48645d4122cSSamudrala, Sridhar {
48745d4122cSSamudrala, Sridhar 	const struct switchdev_ops *ops = dev->switchdev_ops;
48845d4122cSSamudrala, Sridhar 	struct net_device *lower_dev;
48945d4122cSSamudrala, Sridhar 	struct list_head *iter;
49045d4122cSSamudrala, Sridhar 	int err = -EOPNOTSUPP;
49145d4122cSSamudrala, Sridhar 
49245d4122cSSamudrala, Sridhar 	if (ops && ops->switchdev_port_obj_dump)
4939e8f4a54SJiri Pirko 		return ops->switchdev_port_obj_dump(dev, obj, cb);
49445d4122cSSamudrala, Sridhar 
49545d4122cSSamudrala, Sridhar 	/* Switch device port(s) may be stacked under
49645d4122cSSamudrala, Sridhar 	 * bond/team/vlan dev, so recurse down to dump objects on
49745d4122cSSamudrala, Sridhar 	 * first port at bottom of stack.
49845d4122cSSamudrala, Sridhar 	 */
49945d4122cSSamudrala, Sridhar 
50045d4122cSSamudrala, Sridhar 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
5019e8f4a54SJiri Pirko 		err = switchdev_port_obj_dump(lower_dev, obj, cb);
50245d4122cSSamudrala, Sridhar 		break;
50345d4122cSSamudrala, Sridhar 	}
50445d4122cSSamudrala, Sridhar 
50545d4122cSSamudrala, Sridhar 	return err;
50645d4122cSSamudrala, Sridhar }
50745d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
50845d4122cSSamudrala, Sridhar 
509ebb9a03aSJiri Pirko static DEFINE_MUTEX(switchdev_mutex);
510ebb9a03aSJiri Pirko static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
51103bf0c28SJiri Pirko 
51203bf0c28SJiri Pirko /**
513ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
51403bf0c28SJiri Pirko  *	@nb: notifier_block
51503bf0c28SJiri Pirko  *
51603bf0c28SJiri Pirko  *	Register switch device notifier. This should be used by code
51703bf0c28SJiri Pirko  *	which needs to monitor events happening in particular device.
51803bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_register().
51903bf0c28SJiri Pirko  */
520ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
52103bf0c28SJiri Pirko {
52203bf0c28SJiri Pirko 	int err;
52303bf0c28SJiri Pirko 
524ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
525ebb9a03aSJiri Pirko 	err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
526ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
52703bf0c28SJiri Pirko 	return err;
52803bf0c28SJiri Pirko }
529ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
53003bf0c28SJiri Pirko 
53103bf0c28SJiri Pirko /**
532ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
53303bf0c28SJiri Pirko  *	@nb: notifier_block
53403bf0c28SJiri Pirko  *
53503bf0c28SJiri Pirko  *	Unregister switch device notifier.
53603bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_unregister().
53703bf0c28SJiri Pirko  */
538ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
53903bf0c28SJiri Pirko {
54003bf0c28SJiri Pirko 	int err;
54103bf0c28SJiri Pirko 
542ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
543ebb9a03aSJiri Pirko 	err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
544ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
54503bf0c28SJiri Pirko 	return err;
54603bf0c28SJiri Pirko }
547ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
54803bf0c28SJiri Pirko 
54903bf0c28SJiri Pirko /**
550ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
55103bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
55203bf0c28SJiri Pirko  *	@dev: port device
55303bf0c28SJiri Pirko  *	@info: notifier information data
55403bf0c28SJiri Pirko  *
55503bf0c28SJiri Pirko  *	Call all network notifier blocks. This should be called by driver
55603bf0c28SJiri Pirko  *	when it needs to propagate hardware event.
55703bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_call_chain().
55803bf0c28SJiri Pirko  */
559ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
560ebb9a03aSJiri Pirko 			     struct switchdev_notifier_info *info)
56103bf0c28SJiri Pirko {
56203bf0c28SJiri Pirko 	int err;
56303bf0c28SJiri Pirko 
56403bf0c28SJiri Pirko 	info->dev = dev;
565ebb9a03aSJiri Pirko 	mutex_lock(&switchdev_mutex);
566ebb9a03aSJiri Pirko 	err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
567ebb9a03aSJiri Pirko 	mutex_unlock(&switchdev_mutex);
56803bf0c28SJiri Pirko 	return err;
56903bf0c28SJiri Pirko }
570ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
5718a44dbb2SRoopa Prabhu 
5727d4f8d87SScott Feldman struct switchdev_vlan_dump {
5738f24f309SJiri Pirko 	struct switchdev_obj_port_vlan vlan;
5747d4f8d87SScott Feldman 	struct sk_buff *skb;
5757d4f8d87SScott Feldman 	u32 filter_mask;
5767d4f8d87SScott Feldman 	u16 flags;
5777d4f8d87SScott Feldman 	u16 begin;
5787d4f8d87SScott Feldman 	u16 end;
5797d4f8d87SScott Feldman };
5807d4f8d87SScott Feldman 
581e23b002bSVivien Didelot static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
5827d4f8d87SScott Feldman {
5837d4f8d87SScott Feldman 	struct bridge_vlan_info vinfo;
5847d4f8d87SScott Feldman 
5857d4f8d87SScott Feldman 	vinfo.flags = dump->flags;
5867d4f8d87SScott Feldman 
5877d4f8d87SScott Feldman 	if (dump->begin == 0 && dump->end == 0) {
5887d4f8d87SScott Feldman 		return 0;
5897d4f8d87SScott Feldman 	} else if (dump->begin == dump->end) {
5907d4f8d87SScott Feldman 		vinfo.vid = dump->begin;
5917d4f8d87SScott Feldman 		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
5927d4f8d87SScott Feldman 			    sizeof(vinfo), &vinfo))
5937d4f8d87SScott Feldman 			return -EMSGSIZE;
5947d4f8d87SScott Feldman 	} else {
5957d4f8d87SScott Feldman 		vinfo.vid = dump->begin;
5967d4f8d87SScott Feldman 		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
5977d4f8d87SScott Feldman 		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
5987d4f8d87SScott Feldman 			    sizeof(vinfo), &vinfo))
5997d4f8d87SScott Feldman 			return -EMSGSIZE;
6007d4f8d87SScott Feldman 		vinfo.vid = dump->end;
6017d4f8d87SScott Feldman 		vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
6027d4f8d87SScott Feldman 		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END;
6037d4f8d87SScott Feldman 		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
6047d4f8d87SScott Feldman 			    sizeof(vinfo), &vinfo))
6057d4f8d87SScott Feldman 			return -EMSGSIZE;
6067d4f8d87SScott Feldman 	}
6077d4f8d87SScott Feldman 
6087d4f8d87SScott Feldman 	return 0;
6097d4f8d87SScott Feldman }
6107d4f8d87SScott Feldman 
611648b4a99SJiri Pirko static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
6127d4f8d87SScott Feldman {
613648b4a99SJiri Pirko 	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
6147d4f8d87SScott Feldman 	struct switchdev_vlan_dump *dump =
61525f07adcSVivien Didelot 		container_of(vlan, struct switchdev_vlan_dump, vlan);
6167d4f8d87SScott Feldman 	int err = 0;
6177d4f8d87SScott Feldman 
6187d4f8d87SScott Feldman 	if (vlan->vid_begin > vlan->vid_end)
6197d4f8d87SScott Feldman 		return -EINVAL;
6207d4f8d87SScott Feldman 
6217d4f8d87SScott Feldman 	if (dump->filter_mask & RTEXT_FILTER_BRVLAN) {
6227d4f8d87SScott Feldman 		dump->flags = vlan->flags;
6237d4f8d87SScott Feldman 		for (dump->begin = dump->end = vlan->vid_begin;
6247d4f8d87SScott Feldman 		     dump->begin <= vlan->vid_end;
6257d4f8d87SScott Feldman 		     dump->begin++, dump->end++) {
626e23b002bSVivien Didelot 			err = switchdev_port_vlan_dump_put(dump);
6277d4f8d87SScott Feldman 			if (err)
6287d4f8d87SScott Feldman 				return err;
6297d4f8d87SScott Feldman 		}
6307d4f8d87SScott Feldman 	} else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) {
6317d4f8d87SScott Feldman 		if (dump->begin > vlan->vid_begin &&
6327d4f8d87SScott Feldman 		    dump->begin >= vlan->vid_end) {
6337d4f8d87SScott Feldman 			if ((dump->begin - 1) == vlan->vid_end &&
6347d4f8d87SScott Feldman 			    dump->flags == vlan->flags) {
6357d4f8d87SScott Feldman 				/* prepend */
6367d4f8d87SScott Feldman 				dump->begin = vlan->vid_begin;
6377d4f8d87SScott Feldman 			} else {
638e23b002bSVivien Didelot 				err = switchdev_port_vlan_dump_put(dump);
6397d4f8d87SScott Feldman 				dump->flags = vlan->flags;
6407d4f8d87SScott Feldman 				dump->begin = vlan->vid_begin;
6417d4f8d87SScott Feldman 				dump->end = vlan->vid_end;
6427d4f8d87SScott Feldman 			}
6437d4f8d87SScott Feldman 		} else if (dump->end <= vlan->vid_begin &&
6447d4f8d87SScott Feldman 		           dump->end < vlan->vid_end) {
6457d4f8d87SScott Feldman 			if ((dump->end  + 1) == vlan->vid_begin &&
6467d4f8d87SScott Feldman 			    dump->flags == vlan->flags) {
6477d4f8d87SScott Feldman 				/* append */
6487d4f8d87SScott Feldman 				dump->end = vlan->vid_end;
6497d4f8d87SScott Feldman 			} else {
650e23b002bSVivien Didelot 				err = switchdev_port_vlan_dump_put(dump);
6517d4f8d87SScott Feldman 				dump->flags = vlan->flags;
6527d4f8d87SScott Feldman 				dump->begin = vlan->vid_begin;
6537d4f8d87SScott Feldman 				dump->end = vlan->vid_end;
6547d4f8d87SScott Feldman 			}
6557d4f8d87SScott Feldman 		} else {
6567d4f8d87SScott Feldman 			err = -EINVAL;
6577d4f8d87SScott Feldman 		}
6587d4f8d87SScott Feldman 	}
6597d4f8d87SScott Feldman 
6607d4f8d87SScott Feldman 	return err;
6617d4f8d87SScott Feldman }
6627d4f8d87SScott Feldman 
6637d4f8d87SScott Feldman static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
6647d4f8d87SScott Feldman 				    u32 filter_mask)
6657d4f8d87SScott Feldman {
6667d4f8d87SScott Feldman 	struct switchdev_vlan_dump dump = {
6679e8f4a54SJiri Pirko 		.vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
6687d4f8d87SScott Feldman 		.skb = skb,
6697d4f8d87SScott Feldman 		.filter_mask = filter_mask,
6707d4f8d87SScott Feldman 	};
6717d4f8d87SScott Feldman 	int err = 0;
6727d4f8d87SScott Feldman 
6737d4f8d87SScott Feldman 	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
6747d4f8d87SScott Feldman 	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
6759e8f4a54SJiri Pirko 		err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
67625f07adcSVivien Didelot 					      switchdev_port_vlan_dump_cb);
6777d4f8d87SScott Feldman 		if (err)
6787d4f8d87SScott Feldman 			goto err_out;
6797d4f8d87SScott Feldman 		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
6807d4f8d87SScott Feldman 			/* last one */
681e23b002bSVivien Didelot 			err = switchdev_port_vlan_dump_put(&dump);
6827d4f8d87SScott Feldman 	}
6837d4f8d87SScott Feldman 
6847d4f8d87SScott Feldman err_out:
6857d4f8d87SScott Feldman 	return err == -EOPNOTSUPP ? 0 : err;
6867d4f8d87SScott Feldman }
6877d4f8d87SScott Feldman 
6888793d0a6SScott Feldman /**
6898793d0a6SScott Feldman  *	switchdev_port_bridge_getlink - Get bridge port attributes
6908793d0a6SScott Feldman  *
6918793d0a6SScott Feldman  *	@dev: port device
6928793d0a6SScott Feldman  *
6938793d0a6SScott Feldman  *	Called for SELF on rtnl_bridge_getlink to get bridge port
6948793d0a6SScott Feldman  *	attributes.
6958793d0a6SScott Feldman  */
6968793d0a6SScott Feldman int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6978793d0a6SScott Feldman 				  struct net_device *dev, u32 filter_mask,
6988793d0a6SScott Feldman 				  int nlflags)
6998793d0a6SScott Feldman {
7008793d0a6SScott Feldman 	struct switchdev_attr attr = {
7011f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
7028793d0a6SScott Feldman 	};
7038793d0a6SScott Feldman 	u16 mode = BRIDGE_MODE_UNDEF;
7048793d0a6SScott Feldman 	u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
7058793d0a6SScott Feldman 	int err;
7068793d0a6SScott Feldman 
7078793d0a6SScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
7085c8079d0SVivien Didelot 	if (err && err != -EOPNOTSUPP)
7098793d0a6SScott Feldman 		return err;
7108793d0a6SScott Feldman 
7118793d0a6SScott Feldman 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
7127d4f8d87SScott Feldman 				       attr.u.brport_flags, mask, nlflags,
7137d4f8d87SScott Feldman 				       filter_mask, switchdev_port_vlan_fill);
7148793d0a6SScott Feldman }
7158793d0a6SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
7168793d0a6SScott Feldman 
71747f8328bSScott Feldman static int switchdev_port_br_setflag(struct net_device *dev,
71847f8328bSScott Feldman 				     struct nlattr *nlattr,
71947f8328bSScott Feldman 				     unsigned long brport_flag)
72047f8328bSScott Feldman {
72147f8328bSScott Feldman 	struct switchdev_attr attr = {
7221f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
72347f8328bSScott Feldman 	};
72447f8328bSScott Feldman 	u8 flag = nla_get_u8(nlattr);
72547f8328bSScott Feldman 	int err;
72647f8328bSScott Feldman 
72747f8328bSScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
72847f8328bSScott Feldman 	if (err)
72947f8328bSScott Feldman 		return err;
73047f8328bSScott Feldman 
73147f8328bSScott Feldman 	if (flag)
73242275bd8SScott Feldman 		attr.u.brport_flags |= brport_flag;
73347f8328bSScott Feldman 	else
73442275bd8SScott Feldman 		attr.u.brport_flags &= ~brport_flag;
73547f8328bSScott Feldman 
73647f8328bSScott Feldman 	return switchdev_port_attr_set(dev, &attr);
73747f8328bSScott Feldman }
73847f8328bSScott Feldman 
73947f8328bSScott Feldman static const struct nla_policy
74047f8328bSScott Feldman switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
74147f8328bSScott Feldman 	[IFLA_BRPORT_STATE]		= { .type = NLA_U8 },
74247f8328bSScott Feldman 	[IFLA_BRPORT_COST]		= { .type = NLA_U32 },
74347f8328bSScott Feldman 	[IFLA_BRPORT_PRIORITY]		= { .type = NLA_U16 },
74447f8328bSScott Feldman 	[IFLA_BRPORT_MODE]		= { .type = NLA_U8 },
74547f8328bSScott Feldman 	[IFLA_BRPORT_GUARD]		= { .type = NLA_U8 },
74647f8328bSScott Feldman 	[IFLA_BRPORT_PROTECT]		= { .type = NLA_U8 },
74747f8328bSScott Feldman 	[IFLA_BRPORT_FAST_LEAVE]	= { .type = NLA_U8 },
74847f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING]		= { .type = NLA_U8 },
74947f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING_SYNC]	= { .type = NLA_U8 },
75047f8328bSScott Feldman 	[IFLA_BRPORT_UNICAST_FLOOD]	= { .type = NLA_U8 },
75147f8328bSScott Feldman };
75247f8328bSScott Feldman 
75347f8328bSScott Feldman static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
75447f8328bSScott Feldman 					      struct nlattr *protinfo)
75547f8328bSScott Feldman {
75647f8328bSScott Feldman 	struct nlattr *attr;
75747f8328bSScott Feldman 	int rem;
75847f8328bSScott Feldman 	int err;
75947f8328bSScott Feldman 
76047f8328bSScott Feldman 	err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
76147f8328bSScott Feldman 				  switchdev_port_bridge_policy);
76247f8328bSScott Feldman 	if (err)
76347f8328bSScott Feldman 		return err;
76447f8328bSScott Feldman 
76547f8328bSScott Feldman 	nla_for_each_nested(attr, protinfo, rem) {
76647f8328bSScott Feldman 		switch (nla_type(attr)) {
76747f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING:
76847f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
76947f8328bSScott Feldman 							BR_LEARNING);
77047f8328bSScott Feldman 			break;
77147f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING_SYNC:
77247f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
77347f8328bSScott Feldman 							BR_LEARNING_SYNC);
77447f8328bSScott Feldman 			break;
77547f8328bSScott Feldman 		default:
77647f8328bSScott Feldman 			err = -EOPNOTSUPP;
77747f8328bSScott Feldman 			break;
77847f8328bSScott Feldman 		}
77947f8328bSScott Feldman 		if (err)
78047f8328bSScott Feldman 			return err;
78147f8328bSScott Feldman 	}
78247f8328bSScott Feldman 
78347f8328bSScott Feldman 	return 0;
78447f8328bSScott Feldman }
78547f8328bSScott Feldman 
78647f8328bSScott Feldman static int switchdev_port_br_afspec(struct net_device *dev,
78747f8328bSScott Feldman 				    struct nlattr *afspec,
78847f8328bSScott Feldman 				    int (*f)(struct net_device *dev,
789648b4a99SJiri Pirko 					     const struct switchdev_obj *obj))
79047f8328bSScott Feldman {
79147f8328bSScott Feldman 	struct nlattr *attr;
79247f8328bSScott Feldman 	struct bridge_vlan_info *vinfo;
7939e8f4a54SJiri Pirko 	struct switchdev_obj_port_vlan vlan = {
7949e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
7959e8f4a54SJiri Pirko 	};
79647f8328bSScott Feldman 	int rem;
79747f8328bSScott Feldman 	int err;
79847f8328bSScott Feldman 
79947f8328bSScott Feldman 	nla_for_each_nested(attr, afspec, rem) {
80047f8328bSScott Feldman 		if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
80147f8328bSScott Feldman 			continue;
80247f8328bSScott Feldman 		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
80347f8328bSScott Feldman 			return -EINVAL;
80447f8328bSScott Feldman 		vinfo = nla_data(attr);
805ab069002SVivien Didelot 		vlan.flags = vinfo->flags;
80647f8328bSScott Feldman 		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
807ab069002SVivien Didelot 			if (vlan.vid_begin)
80847f8328bSScott Feldman 				return -EINVAL;
809ab069002SVivien Didelot 			vlan.vid_begin = vinfo->vid;
810cc02aa8eSNikolay Aleksandrov 			/* don't allow range of pvids */
811cc02aa8eSNikolay Aleksandrov 			if (vlan.flags & BRIDGE_VLAN_INFO_PVID)
812cc02aa8eSNikolay Aleksandrov 				return -EINVAL;
81347f8328bSScott Feldman 		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
814ab069002SVivien Didelot 			if (!vlan.vid_begin)
81547f8328bSScott Feldman 				return -EINVAL;
816ab069002SVivien Didelot 			vlan.vid_end = vinfo->vid;
817ab069002SVivien Didelot 			if (vlan.vid_end <= vlan.vid_begin)
81847f8328bSScott Feldman 				return -EINVAL;
8199e8f4a54SJiri Pirko 			err = f(dev, &vlan.obj);
82047f8328bSScott Feldman 			if (err)
82147f8328bSScott Feldman 				return err;
822ab069002SVivien Didelot 			memset(&vlan, 0, sizeof(vlan));
82347f8328bSScott Feldman 		} else {
824ab069002SVivien Didelot 			if (vlan.vid_begin)
82547f8328bSScott Feldman 				return -EINVAL;
826ab069002SVivien Didelot 			vlan.vid_begin = vinfo->vid;
827ab069002SVivien Didelot 			vlan.vid_end = vinfo->vid;
8289e8f4a54SJiri Pirko 			err = f(dev, &vlan.obj);
82947f8328bSScott Feldman 			if (err)
83047f8328bSScott Feldman 				return err;
831ab069002SVivien Didelot 			memset(&vlan, 0, sizeof(vlan));
83247f8328bSScott Feldman 		}
83347f8328bSScott Feldman 	}
83447f8328bSScott Feldman 
83547f8328bSScott Feldman 	return 0;
83647f8328bSScott Feldman }
83747f8328bSScott Feldman 
8388a44dbb2SRoopa Prabhu /**
83947f8328bSScott Feldman  *	switchdev_port_bridge_setlink - Set bridge port attributes
8408a44dbb2SRoopa Prabhu  *
8418a44dbb2SRoopa Prabhu  *	@dev: port device
84247f8328bSScott Feldman  *	@nlh: netlink header
84347f8328bSScott Feldman  *	@flags: netlink flags
8448a44dbb2SRoopa Prabhu  *
84547f8328bSScott Feldman  *	Called for SELF on rtnl_bridge_setlink to set bridge port
84647f8328bSScott Feldman  *	attributes.
8478a44dbb2SRoopa Prabhu  */
848ebb9a03aSJiri Pirko int switchdev_port_bridge_setlink(struct net_device *dev,
8498a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
8508a44dbb2SRoopa Prabhu {
85147f8328bSScott Feldman 	struct nlattr *protinfo;
85247f8328bSScott Feldman 	struct nlattr *afspec;
85347f8328bSScott Feldman 	int err = 0;
8548a44dbb2SRoopa Prabhu 
85547f8328bSScott Feldman 	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
85647f8328bSScott Feldman 				   IFLA_PROTINFO);
85747f8328bSScott Feldman 	if (protinfo) {
85847f8328bSScott Feldman 		err = switchdev_port_br_setlink_protinfo(dev, protinfo);
85947f8328bSScott Feldman 		if (err)
86047f8328bSScott Feldman 			return err;
86147f8328bSScott Feldman 	}
8628a44dbb2SRoopa Prabhu 
86347f8328bSScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
86447f8328bSScott Feldman 				 IFLA_AF_SPEC);
86547f8328bSScott Feldman 	if (afspec)
86647f8328bSScott Feldman 		err = switchdev_port_br_afspec(dev, afspec,
86747f8328bSScott Feldman 					       switchdev_port_obj_add);
8688a44dbb2SRoopa Prabhu 
86947f8328bSScott Feldman 	return err;
8708a44dbb2SRoopa Prabhu }
871ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
8728a44dbb2SRoopa Prabhu 
8738a44dbb2SRoopa Prabhu /**
8745c34e022SScott Feldman  *	switchdev_port_bridge_dellink - Set bridge port attributes
8758a44dbb2SRoopa Prabhu  *
8768a44dbb2SRoopa Prabhu  *	@dev: port device
8775c34e022SScott Feldman  *	@nlh: netlink header
8785c34e022SScott Feldman  *	@flags: netlink flags
8798a44dbb2SRoopa Prabhu  *
8805c34e022SScott Feldman  *	Called for SELF on rtnl_bridge_dellink to set bridge port
8815c34e022SScott Feldman  *	attributes.
8828a44dbb2SRoopa Prabhu  */
883ebb9a03aSJiri Pirko int switchdev_port_bridge_dellink(struct net_device *dev,
8848a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
8858a44dbb2SRoopa Prabhu {
8865c34e022SScott Feldman 	struct nlattr *afspec;
8878a44dbb2SRoopa Prabhu 
8885c34e022SScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
8895c34e022SScott Feldman 				 IFLA_AF_SPEC);
8905c34e022SScott Feldman 	if (afspec)
8915c34e022SScott Feldman 		return switchdev_port_br_afspec(dev, afspec,
8925c34e022SScott Feldman 						switchdev_port_obj_del);
8935c34e022SScott Feldman 
8948a44dbb2SRoopa Prabhu 	return 0;
8958a44dbb2SRoopa Prabhu }
896ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
8978a44dbb2SRoopa Prabhu 
89845d4122cSSamudrala, Sridhar /**
89945d4122cSSamudrala, Sridhar  *	switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
90045d4122cSSamudrala, Sridhar  *
90145d4122cSSamudrala, Sridhar  *	@ndmsg: netlink hdr
90245d4122cSSamudrala, Sridhar  *	@nlattr: netlink attributes
90345d4122cSSamudrala, Sridhar  *	@dev: port device
90445d4122cSSamudrala, Sridhar  *	@addr: MAC address to add
90545d4122cSSamudrala, Sridhar  *	@vid: VLAN to add
90645d4122cSSamudrala, Sridhar  *
90745d4122cSSamudrala, Sridhar  *	Add FDB entry to switch device.
90845d4122cSSamudrala, Sridhar  */
90945d4122cSSamudrala, Sridhar int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
91045d4122cSSamudrala, Sridhar 			   struct net_device *dev, const unsigned char *addr,
91145d4122cSSamudrala, Sridhar 			   u16 vid, u16 nlm_flags)
91245d4122cSSamudrala, Sridhar {
91352ba57cfSJiri Pirko 	struct switchdev_obj_port_fdb fdb = {
9149e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
915cdf09697SDavid S. Miller 		.addr = addr,
91645d4122cSSamudrala, Sridhar 		.vid = vid,
91745d4122cSSamudrala, Sridhar 	};
91845d4122cSSamudrala, Sridhar 
9199e8f4a54SJiri Pirko 	return switchdev_port_obj_add(dev, &fdb.obj);
92045d4122cSSamudrala, Sridhar }
92145d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
92245d4122cSSamudrala, Sridhar 
92345d4122cSSamudrala, Sridhar /**
92445d4122cSSamudrala, Sridhar  *	switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
92545d4122cSSamudrala, Sridhar  *
92645d4122cSSamudrala, Sridhar  *	@ndmsg: netlink hdr
92745d4122cSSamudrala, Sridhar  *	@nlattr: netlink attributes
92845d4122cSSamudrala, Sridhar  *	@dev: port device
92945d4122cSSamudrala, Sridhar  *	@addr: MAC address to delete
93045d4122cSSamudrala, Sridhar  *	@vid: VLAN to delete
93145d4122cSSamudrala, Sridhar  *
93245d4122cSSamudrala, Sridhar  *	Delete FDB entry from switch device.
93345d4122cSSamudrala, Sridhar  */
93445d4122cSSamudrala, Sridhar int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
93545d4122cSSamudrala, Sridhar 			   struct net_device *dev, const unsigned char *addr,
93645d4122cSSamudrala, Sridhar 			   u16 vid)
93745d4122cSSamudrala, Sridhar {
93852ba57cfSJiri Pirko 	struct switchdev_obj_port_fdb fdb = {
9399e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
940cdf09697SDavid S. Miller 		.addr = addr,
94145d4122cSSamudrala, Sridhar 		.vid = vid,
94245d4122cSSamudrala, Sridhar 	};
94345d4122cSSamudrala, Sridhar 
9449e8f4a54SJiri Pirko 	return switchdev_port_obj_del(dev, &fdb.obj);
94545d4122cSSamudrala, Sridhar }
94645d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
94745d4122cSSamudrala, Sridhar 
94845d4122cSSamudrala, Sridhar struct switchdev_fdb_dump {
94952ba57cfSJiri Pirko 	struct switchdev_obj_port_fdb fdb;
950e02a06b2SVivien Didelot 	struct net_device *dev;
95145d4122cSSamudrala, Sridhar 	struct sk_buff *skb;
95245d4122cSSamudrala, Sridhar 	struct netlink_callback *cb;
95345d4122cSSamudrala, Sridhar 	int idx;
95445d4122cSSamudrala, Sridhar };
95545d4122cSSamudrala, Sridhar 
956648b4a99SJiri Pirko static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
95745d4122cSSamudrala, Sridhar {
958648b4a99SJiri Pirko 	struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
95945d4122cSSamudrala, Sridhar 	struct switchdev_fdb_dump *dump =
96025f07adcSVivien Didelot 		container_of(fdb, struct switchdev_fdb_dump, fdb);
96145d4122cSSamudrala, Sridhar 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
96245d4122cSSamudrala, Sridhar 	u32 seq = dump->cb->nlh->nlmsg_seq;
96345d4122cSSamudrala, Sridhar 	struct nlmsghdr *nlh;
96445d4122cSSamudrala, Sridhar 	struct ndmsg *ndm;
96545d4122cSSamudrala, Sridhar 
96645d4122cSSamudrala, Sridhar 	if (dump->idx < dump->cb->args[0])
96745d4122cSSamudrala, Sridhar 		goto skip;
96845d4122cSSamudrala, Sridhar 
96945d4122cSSamudrala, Sridhar 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
97045d4122cSSamudrala, Sridhar 			sizeof(*ndm), NLM_F_MULTI);
97145d4122cSSamudrala, Sridhar 	if (!nlh)
97245d4122cSSamudrala, Sridhar 		return -EMSGSIZE;
97345d4122cSSamudrala, Sridhar 
97445d4122cSSamudrala, Sridhar 	ndm = nlmsg_data(nlh);
97545d4122cSSamudrala, Sridhar 	ndm->ndm_family  = AF_BRIDGE;
97645d4122cSSamudrala, Sridhar 	ndm->ndm_pad1    = 0;
97745d4122cSSamudrala, Sridhar 	ndm->ndm_pad2    = 0;
97845d4122cSSamudrala, Sridhar 	ndm->ndm_flags   = NTF_SELF;
97945d4122cSSamudrala, Sridhar 	ndm->ndm_type    = 0;
980e02a06b2SVivien Didelot 	ndm->ndm_ifindex = dump->dev->ifindex;
98125f07adcSVivien Didelot 	ndm->ndm_state   = fdb->ndm_state;
98245d4122cSSamudrala, Sridhar 
98325f07adcSVivien Didelot 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
98445d4122cSSamudrala, Sridhar 		goto nla_put_failure;
98545d4122cSSamudrala, Sridhar 
98625f07adcSVivien Didelot 	if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
98745d4122cSSamudrala, Sridhar 		goto nla_put_failure;
98845d4122cSSamudrala, Sridhar 
98945d4122cSSamudrala, Sridhar 	nlmsg_end(dump->skb, nlh);
99045d4122cSSamudrala, Sridhar 
99145d4122cSSamudrala, Sridhar skip:
99245d4122cSSamudrala, Sridhar 	dump->idx++;
99345d4122cSSamudrala, Sridhar 	return 0;
99445d4122cSSamudrala, Sridhar 
99545d4122cSSamudrala, Sridhar nla_put_failure:
99645d4122cSSamudrala, Sridhar 	nlmsg_cancel(dump->skb, nlh);
99745d4122cSSamudrala, Sridhar 	return -EMSGSIZE;
99845d4122cSSamudrala, Sridhar }
99945d4122cSSamudrala, Sridhar 
100045d4122cSSamudrala, Sridhar /**
100145d4122cSSamudrala, Sridhar  *	switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
100245d4122cSSamudrala, Sridhar  *
100345d4122cSSamudrala, Sridhar  *	@skb: netlink skb
100445d4122cSSamudrala, Sridhar  *	@cb: netlink callback
100545d4122cSSamudrala, Sridhar  *	@dev: port device
100645d4122cSSamudrala, Sridhar  *	@filter_dev: filter device
100745d4122cSSamudrala, Sridhar  *	@idx:
100845d4122cSSamudrala, Sridhar  *
100945d4122cSSamudrala, Sridhar  *	Delete FDB entry from switch device.
101045d4122cSSamudrala, Sridhar  */
101145d4122cSSamudrala, Sridhar int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
101245d4122cSSamudrala, Sridhar 			    struct net_device *dev,
101345d4122cSSamudrala, Sridhar 			    struct net_device *filter_dev, int idx)
101445d4122cSSamudrala, Sridhar {
101545d4122cSSamudrala, Sridhar 	struct switchdev_fdb_dump dump = {
10169e8f4a54SJiri Pirko 		.fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
1017e02a06b2SVivien Didelot 		.dev = dev,
101845d4122cSSamudrala, Sridhar 		.skb = skb,
101945d4122cSSamudrala, Sridhar 		.cb = cb,
102045d4122cSSamudrala, Sridhar 		.idx = idx,
102145d4122cSSamudrala, Sridhar 	};
102245d4122cSSamudrala, Sridhar 
10239e8f4a54SJiri Pirko 	switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb);
102445d4122cSSamudrala, Sridhar 	return dump.idx;
102545d4122cSSamudrala, Sridhar }
102645d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
102745d4122cSSamudrala, Sridhar 
1028ebb9a03aSJiri Pirko static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
1029b5d6fbdeSScott Feldman {
10309d47c0a2SJiri Pirko 	const struct switchdev_ops *ops = dev->switchdev_ops;
1031b5d6fbdeSScott Feldman 	struct net_device *lower_dev;
1032b5d6fbdeSScott Feldman 	struct net_device *port_dev;
1033b5d6fbdeSScott Feldman 	struct list_head *iter;
1034b5d6fbdeSScott Feldman 
1035b5d6fbdeSScott Feldman 	/* Recusively search down until we find a sw port dev.
1036f8e20a9fSScott Feldman 	 * (A sw port dev supports switchdev_port_attr_get).
1037b5d6fbdeSScott Feldman 	 */
1038b5d6fbdeSScott Feldman 
1039f8e20a9fSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
1040b5d6fbdeSScott Feldman 		return dev;
1041b5d6fbdeSScott Feldman 
1042b5d6fbdeSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
1043ebb9a03aSJiri Pirko 		port_dev = switchdev_get_lowest_dev(lower_dev);
1044b5d6fbdeSScott Feldman 		if (port_dev)
1045b5d6fbdeSScott Feldman 			return port_dev;
1046b5d6fbdeSScott Feldman 	}
1047b5d6fbdeSScott Feldman 
1048b5d6fbdeSScott Feldman 	return NULL;
1049b5d6fbdeSScott Feldman }
1050b5d6fbdeSScott Feldman 
1051ebb9a03aSJiri Pirko static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
1052b5d6fbdeSScott Feldman {
1053f8e20a9fSScott Feldman 	struct switchdev_attr attr = {
10541f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1055f8e20a9fSScott Feldman 	};
1056f8e20a9fSScott Feldman 	struct switchdev_attr prev_attr;
1057b5d6fbdeSScott Feldman 	struct net_device *dev = NULL;
1058b5d6fbdeSScott Feldman 	int nhsel;
1059b5d6fbdeSScott Feldman 
1060b5d6fbdeSScott Feldman 	/* For this route, all nexthop devs must be on the same switch. */
1061b5d6fbdeSScott Feldman 
1062b5d6fbdeSScott Feldman 	for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1063b5d6fbdeSScott Feldman 		const struct fib_nh *nh = &fi->fib_nh[nhsel];
1064b5d6fbdeSScott Feldman 
1065b5d6fbdeSScott Feldman 		if (!nh->nh_dev)
1066b5d6fbdeSScott Feldman 			return NULL;
1067b5d6fbdeSScott Feldman 
1068ebb9a03aSJiri Pirko 		dev = switchdev_get_lowest_dev(nh->nh_dev);
1069b5d6fbdeSScott Feldman 		if (!dev)
1070b5d6fbdeSScott Feldman 			return NULL;
1071b5d6fbdeSScott Feldman 
1072f8e20a9fSScott Feldman 		if (switchdev_port_attr_get(dev, &attr))
1073b5d6fbdeSScott Feldman 			return NULL;
1074b5d6fbdeSScott Feldman 
1075d754f98bSScott Feldman 		if (nhsel > 0 &&
1076d754f98bSScott Feldman 		    !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
1077b5d6fbdeSScott Feldman 				return NULL;
1078b5d6fbdeSScott Feldman 
1079f8e20a9fSScott Feldman 		prev_attr = attr;
1080b5d6fbdeSScott Feldman 	}
1081b5d6fbdeSScott Feldman 
1082b5d6fbdeSScott Feldman 	return dev;
1083b5d6fbdeSScott Feldman }
1084b5d6fbdeSScott Feldman 
10855e8d9049SScott Feldman /**
10867616dcbbSScott Feldman  *	switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry
10875e8d9049SScott Feldman  *
10885e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
10895e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
10905e8d9049SScott Feldman  *	@fi: route FIB info structure
10915e8d9049SScott Feldman  *	@tos: route TOS
10925e8d9049SScott Feldman  *	@type: route type
1093f8f21471SScott Feldman  *	@nlflags: netlink flags passed in (NLM_F_*)
10945e8d9049SScott Feldman  *	@tb_id: route table ID
10955e8d9049SScott Feldman  *
10967616dcbbSScott Feldman  *	Add/modify switch IPv4 route entry.
10975e8d9049SScott Feldman  */
1098ebb9a03aSJiri Pirko int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
1099f8f21471SScott Feldman 			   u8 tos, u8 type, u32 nlflags, u32 tb_id)
11005e8d9049SScott Feldman {
1101ab069002SVivien Didelot 	struct switchdev_obj_ipv4_fib ipv4_fib = {
11029e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
11037a7ee531SScott Feldman 		.dst = dst,
110458c2cb16SScott Feldman 		.dst_len = dst_len,
110558c2cb16SScott Feldman 		.fi = fi,
110658c2cb16SScott Feldman 		.tos = tos,
110758c2cb16SScott Feldman 		.type = type,
110858c2cb16SScott Feldman 		.nlflags = nlflags,
110958c2cb16SScott Feldman 		.tb_id = tb_id,
111058c2cb16SScott Feldman 	};
1111b5d6fbdeSScott Feldman 	struct net_device *dev;
1112b5d6fbdeSScott Feldman 	int err = 0;
1113b5d6fbdeSScott Feldman 
11148e05fd71SScott Feldman 	/* Don't offload route if using custom ip rules or if
11158e05fd71SScott Feldman 	 * IPv4 FIB offloading has been disabled completely.
11168e05fd71SScott Feldman 	 */
11178e05fd71SScott Feldman 
1118e1315db1SScott Feldman #ifdef CONFIG_IP_MULTIPLE_TABLES
1119e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_has_custom_rules)
1120e1315db1SScott Feldman 		return 0;
1121e1315db1SScott Feldman #endif
1122e1315db1SScott Feldman 
1123e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_offload_disabled)
1124104616e7SScott Feldman 		return 0;
1125104616e7SScott Feldman 
1126ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
1127b5d6fbdeSScott Feldman 	if (!dev)
11285e8d9049SScott Feldman 		return 0;
1129b5d6fbdeSScott Feldman 
11309e8f4a54SJiri Pirko 	err = switchdev_port_obj_add(dev, &ipv4_fib.obj);
1131b5d6fbdeSScott Feldman 	if (!err)
1132eea39946SRoopa Prabhu 		fi->fib_flags |= RTNH_F_OFFLOAD;
1133b5d6fbdeSScott Feldman 
1134af201f72SScott Feldman 	return err == -EOPNOTSUPP ? 0 : err;
11355e8d9049SScott Feldman }
1136ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
11375e8d9049SScott Feldman 
11385e8d9049SScott Feldman /**
1139ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
11405e8d9049SScott Feldman  *
11415e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
11425e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
11435e8d9049SScott Feldman  *	@fi: route FIB info structure
11445e8d9049SScott Feldman  *	@tos: route TOS
11455e8d9049SScott Feldman  *	@type: route type
11465e8d9049SScott Feldman  *	@tb_id: route table ID
11475e8d9049SScott Feldman  *
11485e8d9049SScott Feldman  *	Delete IPv4 route entry from switch device.
11495e8d9049SScott Feldman  */
1150ebb9a03aSJiri Pirko int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
11515e8d9049SScott Feldman 			   u8 tos, u8 type, u32 tb_id)
11525e8d9049SScott Feldman {
1153ab069002SVivien Didelot 	struct switchdev_obj_ipv4_fib ipv4_fib = {
11549e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
11557a7ee531SScott Feldman 		.dst = dst,
115658c2cb16SScott Feldman 		.dst_len = dst_len,
115758c2cb16SScott Feldman 		.fi = fi,
115858c2cb16SScott Feldman 		.tos = tos,
115958c2cb16SScott Feldman 		.type = type,
116058c2cb16SScott Feldman 		.nlflags = 0,
116158c2cb16SScott Feldman 		.tb_id = tb_id,
116258c2cb16SScott Feldman 	};
1163b5d6fbdeSScott Feldman 	struct net_device *dev;
1164b5d6fbdeSScott Feldman 	int err = 0;
1165b5d6fbdeSScott Feldman 
1166eea39946SRoopa Prabhu 	if (!(fi->fib_flags & RTNH_F_OFFLOAD))
11675e8d9049SScott Feldman 		return 0;
1168b5d6fbdeSScott Feldman 
1169ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
1170b5d6fbdeSScott Feldman 	if (!dev)
1171b5d6fbdeSScott Feldman 		return 0;
1172b5d6fbdeSScott Feldman 
11739e8f4a54SJiri Pirko 	err = switchdev_port_obj_del(dev, &ipv4_fib.obj);
1174b5d6fbdeSScott Feldman 	if (!err)
1175eea39946SRoopa Prabhu 		fi->fib_flags &= ~RTNH_F_OFFLOAD;
1176b5d6fbdeSScott Feldman 
1177af201f72SScott Feldman 	return err == -EOPNOTSUPP ? 0 : err;
11785e8d9049SScott Feldman }
1179ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
11808e05fd71SScott Feldman 
11818e05fd71SScott Feldman /**
1182ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
11838e05fd71SScott Feldman  *
11848e05fd71SScott Feldman  *	@fi: route FIB info structure
11858e05fd71SScott Feldman  */
1186ebb9a03aSJiri Pirko void switchdev_fib_ipv4_abort(struct fib_info *fi)
11878e05fd71SScott Feldman {
11888e05fd71SScott Feldman 	/* There was a problem installing this route to the offload
11898e05fd71SScott Feldman 	 * device.  For now, until we come up with more refined
11908e05fd71SScott Feldman 	 * policy handling, abruptly end IPv4 fib offloading for
11918e05fd71SScott Feldman 	 * for entire net by flushing offload device(s) of all
11928e05fd71SScott Feldman 	 * IPv4 routes, and mark IPv4 fib offloading broken from
11938e05fd71SScott Feldman 	 * this point forward.
11948e05fd71SScott Feldman 	 */
11958e05fd71SScott Feldman 
11968e05fd71SScott Feldman 	fib_flush_external(fi->fib_net);
11978e05fd71SScott Feldman 	fi->fib_net->ipv4.fib_offload_disabled = true;
11988e05fd71SScott Feldman }
1199ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
12001a3b2ec9SScott Feldman 
12011a3b2ec9SScott Feldman static bool switchdev_port_same_parent_id(struct net_device *a,
12021a3b2ec9SScott Feldman 					  struct net_device *b)
12031a3b2ec9SScott Feldman {
12041a3b2ec9SScott Feldman 	struct switchdev_attr a_attr = {
12051f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
12061a3b2ec9SScott Feldman 		.flags = SWITCHDEV_F_NO_RECURSE,
12071a3b2ec9SScott Feldman 	};
12081a3b2ec9SScott Feldman 	struct switchdev_attr b_attr = {
12091f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
12101a3b2ec9SScott Feldman 		.flags = SWITCHDEV_F_NO_RECURSE,
12111a3b2ec9SScott Feldman 	};
12121a3b2ec9SScott Feldman 
12131a3b2ec9SScott Feldman 	if (switchdev_port_attr_get(a, &a_attr) ||
12141a3b2ec9SScott Feldman 	    switchdev_port_attr_get(b, &b_attr))
12151a3b2ec9SScott Feldman 		return false;
12161a3b2ec9SScott Feldman 
12171a3b2ec9SScott Feldman 	return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
12181a3b2ec9SScott Feldman }
12191a3b2ec9SScott Feldman 
12201a3b2ec9SScott Feldman static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
12211a3b2ec9SScott Feldman 				       struct net_device *group_dev)
12221a3b2ec9SScott Feldman {
12231a3b2ec9SScott Feldman 	struct net_device *lower_dev;
12241a3b2ec9SScott Feldman 	struct list_head *iter;
12251a3b2ec9SScott Feldman 
12261a3b2ec9SScott Feldman 	netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
12271a3b2ec9SScott Feldman 		if (lower_dev == dev)
12281a3b2ec9SScott Feldman 			continue;
12291a3b2ec9SScott Feldman 		if (switchdev_port_same_parent_id(dev, lower_dev))
12301a3b2ec9SScott Feldman 			return lower_dev->offload_fwd_mark;
12311a3b2ec9SScott Feldman 		return switchdev_port_fwd_mark_get(dev, lower_dev);
12321a3b2ec9SScott Feldman 	}
12331a3b2ec9SScott Feldman 
12341a3b2ec9SScott Feldman 	return dev->ifindex;
12351a3b2ec9SScott Feldman }
12361a3b2ec9SScott Feldman 
12371a3b2ec9SScott Feldman static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
12381a3b2ec9SScott Feldman 					  u32 old_mark, u32 *reset_mark)
12391a3b2ec9SScott Feldman {
12401a3b2ec9SScott Feldman 	struct net_device *lower_dev;
12411a3b2ec9SScott Feldman 	struct list_head *iter;
12421a3b2ec9SScott Feldman 
12431a3b2ec9SScott Feldman 	netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
12441a3b2ec9SScott Feldman 		if (lower_dev->offload_fwd_mark == old_mark) {
12451a3b2ec9SScott Feldman 			if (!*reset_mark)
12461a3b2ec9SScott Feldman 				*reset_mark = lower_dev->ifindex;
12471a3b2ec9SScott Feldman 			lower_dev->offload_fwd_mark = *reset_mark;
12481a3b2ec9SScott Feldman 		}
12491a3b2ec9SScott Feldman 		switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
12501a3b2ec9SScott Feldman 	}
12511a3b2ec9SScott Feldman }
12521a3b2ec9SScott Feldman 
12531a3b2ec9SScott Feldman /**
12541a3b2ec9SScott Feldman  *	switchdev_port_fwd_mark_set - Set port offload forwarding mark
12551a3b2ec9SScott Feldman  *
12561a3b2ec9SScott Feldman  *	@dev: port device
12571a3b2ec9SScott Feldman  *	@group_dev: containing device
12581a3b2ec9SScott Feldman  *	@joining: true if dev is joining group; false if leaving group
12591a3b2ec9SScott Feldman  *
12601a3b2ec9SScott Feldman  *	An ungrouped port's offload mark is just its ifindex.  A grouped
12611a3b2ec9SScott Feldman  *	port's (member of a bridge, for example) offload mark is the ifindex
12621a3b2ec9SScott Feldman  *	of one of the ports in the group with the same parent (switch) ID.
12631a3b2ec9SScott Feldman  *	Ports on the same device in the same group will have the same mark.
12641a3b2ec9SScott Feldman  *
12651a3b2ec9SScott Feldman  *	Example:
12661a3b2ec9SScott Feldman  *
12671a3b2ec9SScott Feldman  *		br0		ifindex=9
12681a3b2ec9SScott Feldman  *		  sw1p1		ifindex=2	mark=2
12691a3b2ec9SScott Feldman  *		  sw1p2		ifindex=3	mark=2
12701a3b2ec9SScott Feldman  *		  sw2p1		ifindex=4	mark=5
12711a3b2ec9SScott Feldman  *		  sw2p2		ifindex=5	mark=5
12721a3b2ec9SScott Feldman  *
12731a3b2ec9SScott Feldman  *	If sw2p2 leaves the bridge, we'll have:
12741a3b2ec9SScott Feldman  *
12751a3b2ec9SScott Feldman  *		br0		ifindex=9
12761a3b2ec9SScott Feldman  *		  sw1p1		ifindex=2	mark=2
12771a3b2ec9SScott Feldman  *		  sw1p2		ifindex=3	mark=2
12781a3b2ec9SScott Feldman  *		  sw2p1		ifindex=4	mark=4
12791a3b2ec9SScott Feldman  *		sw2p2		ifindex=5	mark=5
12801a3b2ec9SScott Feldman  */
12811a3b2ec9SScott Feldman void switchdev_port_fwd_mark_set(struct net_device *dev,
12821a3b2ec9SScott Feldman 				 struct net_device *group_dev,
12831a3b2ec9SScott Feldman 				 bool joining)
12841a3b2ec9SScott Feldman {
12851a3b2ec9SScott Feldman 	u32 mark = dev->ifindex;
12861a3b2ec9SScott Feldman 	u32 reset_mark = 0;
12871a3b2ec9SScott Feldman 
12881a3b2ec9SScott Feldman 	if (group_dev && joining) {
12891a3b2ec9SScott Feldman 		mark = switchdev_port_fwd_mark_get(dev, group_dev);
12901a3b2ec9SScott Feldman 	} else if (group_dev && !joining) {
12911a3b2ec9SScott Feldman 		if (dev->offload_fwd_mark == mark)
12921a3b2ec9SScott Feldman 			/* Ohoh, this port was the mark reference port,
12931a3b2ec9SScott Feldman 			 * but it's leaving the group, so reset the
12941a3b2ec9SScott Feldman 			 * mark for the remaining ports in the group.
12951a3b2ec9SScott Feldman 			 */
12961a3b2ec9SScott Feldman 			switchdev_port_fwd_mark_reset(group_dev, mark,
12971a3b2ec9SScott Feldman 						      &reset_mark);
12981a3b2ec9SScott Feldman 	}
12991a3b2ec9SScott Feldman 
13001a3b2ec9SScott Feldman 	dev->offload_fwd_mark = mark;
13011a3b2ec9SScott Feldman }
13021a3b2ec9SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);
1303