xref: /linux-6.15/net/switchdev/switchdev.c (revision ff5cf100)
1007f790cSJiri Pirko /*
2007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
37ea6eb3fSJiri Pirko  * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
4f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
5007f790cSJiri Pirko  *
6007f790cSJiri Pirko  * This program is free software; you can redistribute it and/or modify
7007f790cSJiri Pirko  * it under the terms of the GNU General Public License as published by
8007f790cSJiri Pirko  * the Free Software Foundation; either version 2 of the License, or
9007f790cSJiri Pirko  * (at your option) any later version.
10007f790cSJiri Pirko  */
11007f790cSJiri Pirko 
12007f790cSJiri Pirko #include <linux/kernel.h>
13007f790cSJiri Pirko #include <linux/types.h>
14007f790cSJiri Pirko #include <linux/init.h>
1503bf0c28SJiri Pirko #include <linux/mutex.h>
1603bf0c28SJiri Pirko #include <linux/notifier.h>
17007f790cSJiri Pirko #include <linux/netdevice.h>
18850d0cbcSJiri Pirko #include <linux/etherdevice.h>
1947f8328bSScott Feldman #include <linux/if_bridge.h>
207ea6eb3fSJiri Pirko #include <linux/list.h>
21793f4014SJiri Pirko #include <linux/workqueue.h>
2287aaf2caSNikolay Aleksandrov #include <linux/if_vlan.h>
234f2c6ae5SIdo Schimmel #include <linux/rtnetlink.h>
24007f790cSJiri Pirko #include <net/switchdev.h>
25007f790cSJiri Pirko 
26007f790cSJiri Pirko /**
277ea6eb3fSJiri Pirko  *	switchdev_trans_item_enqueue - Enqueue data item to transaction queue
287ea6eb3fSJiri Pirko  *
297ea6eb3fSJiri Pirko  *	@trans: transaction
307ea6eb3fSJiri Pirko  *	@data: pointer to data being queued
317ea6eb3fSJiri Pirko  *	@destructor: data destructor
327ea6eb3fSJiri Pirko  *	@tritem: transaction item being queued
337ea6eb3fSJiri Pirko  *
347ea6eb3fSJiri Pirko  *	Enqeueue data item to transaction queue. tritem is typically placed in
357ea6eb3fSJiri Pirko  *	cointainter pointed at by data pointer. Destructor is called on
367ea6eb3fSJiri Pirko  *	transaction abort and after successful commit phase in case
377ea6eb3fSJiri Pirko  *	the caller did not dequeue the item before.
387ea6eb3fSJiri Pirko  */
397ea6eb3fSJiri Pirko void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
407ea6eb3fSJiri Pirko 				  void *data, void (*destructor)(void const *),
417ea6eb3fSJiri Pirko 				  struct switchdev_trans_item *tritem)
427ea6eb3fSJiri Pirko {
437ea6eb3fSJiri Pirko 	tritem->data = data;
447ea6eb3fSJiri Pirko 	tritem->destructor = destructor;
457ea6eb3fSJiri Pirko 	list_add_tail(&tritem->list, &trans->item_list);
467ea6eb3fSJiri Pirko }
477ea6eb3fSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
487ea6eb3fSJiri Pirko 
497ea6eb3fSJiri Pirko static struct switchdev_trans_item *
507ea6eb3fSJiri Pirko __switchdev_trans_item_dequeue(struct switchdev_trans *trans)
517ea6eb3fSJiri Pirko {
527ea6eb3fSJiri Pirko 	struct switchdev_trans_item *tritem;
537ea6eb3fSJiri Pirko 
547ea6eb3fSJiri Pirko 	if (list_empty(&trans->item_list))
557ea6eb3fSJiri Pirko 		return NULL;
567ea6eb3fSJiri Pirko 	tritem = list_first_entry(&trans->item_list,
577ea6eb3fSJiri Pirko 				  struct switchdev_trans_item, list);
587ea6eb3fSJiri Pirko 	list_del(&tritem->list);
597ea6eb3fSJiri Pirko 	return tritem;
607ea6eb3fSJiri Pirko }
617ea6eb3fSJiri Pirko 
627ea6eb3fSJiri Pirko /**
637ea6eb3fSJiri Pirko  *	switchdev_trans_item_dequeue - Dequeue data item from transaction queue
647ea6eb3fSJiri Pirko  *
657ea6eb3fSJiri Pirko  *	@trans: transaction
667ea6eb3fSJiri Pirko  */
677ea6eb3fSJiri Pirko void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
687ea6eb3fSJiri Pirko {
697ea6eb3fSJiri Pirko 	struct switchdev_trans_item *tritem;
707ea6eb3fSJiri Pirko 
717ea6eb3fSJiri Pirko 	tritem = __switchdev_trans_item_dequeue(trans);
727ea6eb3fSJiri Pirko 	BUG_ON(!tritem);
737ea6eb3fSJiri Pirko 	return tritem->data;
747ea6eb3fSJiri Pirko }
757ea6eb3fSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
767ea6eb3fSJiri Pirko 
777ea6eb3fSJiri Pirko static void switchdev_trans_init(struct switchdev_trans *trans)
787ea6eb3fSJiri Pirko {
797ea6eb3fSJiri Pirko 	INIT_LIST_HEAD(&trans->item_list);
807ea6eb3fSJiri Pirko }
817ea6eb3fSJiri Pirko 
827ea6eb3fSJiri Pirko static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
837ea6eb3fSJiri Pirko {
847ea6eb3fSJiri Pirko 	struct switchdev_trans_item *tritem;
857ea6eb3fSJiri Pirko 
867ea6eb3fSJiri Pirko 	while ((tritem = __switchdev_trans_item_dequeue(trans)))
877ea6eb3fSJiri Pirko 		tritem->destructor(tritem->data);
887ea6eb3fSJiri Pirko }
897ea6eb3fSJiri Pirko 
907ea6eb3fSJiri Pirko static void switchdev_trans_items_warn_destroy(struct net_device *dev,
917ea6eb3fSJiri Pirko 					       struct switchdev_trans *trans)
927ea6eb3fSJiri Pirko {
937ea6eb3fSJiri Pirko 	WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
947ea6eb3fSJiri Pirko 	     dev->name);
957ea6eb3fSJiri Pirko 	switchdev_trans_items_destroy(trans);
967ea6eb3fSJiri Pirko }
977ea6eb3fSJiri Pirko 
98793f4014SJiri Pirko static LIST_HEAD(deferred);
99793f4014SJiri Pirko static DEFINE_SPINLOCK(deferred_lock);
100793f4014SJiri Pirko 
101793f4014SJiri Pirko typedef void switchdev_deferred_func_t(struct net_device *dev,
102793f4014SJiri Pirko 				       const void *data);
103793f4014SJiri Pirko 
104793f4014SJiri Pirko struct switchdev_deferred_item {
105793f4014SJiri Pirko 	struct list_head list;
106793f4014SJiri Pirko 	struct net_device *dev;
107793f4014SJiri Pirko 	switchdev_deferred_func_t *func;
108793f4014SJiri Pirko 	unsigned long data[0];
109793f4014SJiri Pirko };
110793f4014SJiri Pirko 
111793f4014SJiri Pirko static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
112793f4014SJiri Pirko {
113793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
114793f4014SJiri Pirko 
115793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
116793f4014SJiri Pirko 	if (list_empty(&deferred)) {
117793f4014SJiri Pirko 		dfitem = NULL;
118793f4014SJiri Pirko 		goto unlock;
119793f4014SJiri Pirko 	}
120793f4014SJiri Pirko 	dfitem = list_first_entry(&deferred,
121793f4014SJiri Pirko 				  struct switchdev_deferred_item, list);
122793f4014SJiri Pirko 	list_del(&dfitem->list);
123793f4014SJiri Pirko unlock:
124793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
125793f4014SJiri Pirko 	return dfitem;
126793f4014SJiri Pirko }
127793f4014SJiri Pirko 
128793f4014SJiri Pirko /**
129793f4014SJiri Pirko  *	switchdev_deferred_process - Process ops in deferred queue
130793f4014SJiri Pirko  *
131793f4014SJiri Pirko  *	Called to flush the ops currently queued in deferred ops queue.
132793f4014SJiri Pirko  *	rtnl_lock must be held.
133793f4014SJiri Pirko  */
134793f4014SJiri Pirko void switchdev_deferred_process(void)
135793f4014SJiri Pirko {
136793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
137793f4014SJiri Pirko 
138793f4014SJiri Pirko 	ASSERT_RTNL();
139793f4014SJiri Pirko 
140793f4014SJiri Pirko 	while ((dfitem = switchdev_deferred_dequeue())) {
141793f4014SJiri Pirko 		dfitem->func(dfitem->dev, dfitem->data);
142793f4014SJiri Pirko 		dev_put(dfitem->dev);
143793f4014SJiri Pirko 		kfree(dfitem);
144793f4014SJiri Pirko 	}
145793f4014SJiri Pirko }
146793f4014SJiri Pirko EXPORT_SYMBOL_GPL(switchdev_deferred_process);
147793f4014SJiri Pirko 
148793f4014SJiri Pirko static void switchdev_deferred_process_work(struct work_struct *work)
149793f4014SJiri Pirko {
150793f4014SJiri Pirko 	rtnl_lock();
151793f4014SJiri Pirko 	switchdev_deferred_process();
152793f4014SJiri Pirko 	rtnl_unlock();
153793f4014SJiri Pirko }
154793f4014SJiri Pirko 
155793f4014SJiri Pirko static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
156793f4014SJiri Pirko 
157793f4014SJiri Pirko static int switchdev_deferred_enqueue(struct net_device *dev,
158793f4014SJiri Pirko 				      const void *data, size_t data_len,
159793f4014SJiri Pirko 				      switchdev_deferred_func_t *func)
160793f4014SJiri Pirko {
161793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
162793f4014SJiri Pirko 
163793f4014SJiri Pirko 	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
164793f4014SJiri Pirko 	if (!dfitem)
165793f4014SJiri Pirko 		return -ENOMEM;
166793f4014SJiri Pirko 	dfitem->dev = dev;
167793f4014SJiri Pirko 	dfitem->func = func;
168793f4014SJiri Pirko 	memcpy(dfitem->data, data, data_len);
169793f4014SJiri Pirko 	dev_hold(dev);
170793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
171793f4014SJiri Pirko 	list_add_tail(&dfitem->list, &deferred);
172793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
173793f4014SJiri Pirko 	schedule_work(&deferred_process_work);
174793f4014SJiri Pirko 	return 0;
175793f4014SJiri Pirko }
176793f4014SJiri Pirko 
1777ea6eb3fSJiri Pirko /**
1783094333dSScott Feldman  *	switchdev_port_attr_get - Get port attribute
1793094333dSScott Feldman  *
1803094333dSScott Feldman  *	@dev: port device
1813094333dSScott Feldman  *	@attr: attribute to get
1823094333dSScott Feldman  */
1833094333dSScott Feldman int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
1843094333dSScott Feldman {
1853094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
1863094333dSScott Feldman 	struct net_device *lower_dev;
1873094333dSScott Feldman 	struct list_head *iter;
1883094333dSScott Feldman 	struct switchdev_attr first = {
1891f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_UNDEFINED
1903094333dSScott Feldman 	};
1913094333dSScott Feldman 	int err = -EOPNOTSUPP;
1923094333dSScott Feldman 
1933094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
1943094333dSScott Feldman 		return ops->switchdev_port_attr_get(dev, attr);
1953094333dSScott Feldman 
1963094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
1973094333dSScott Feldman 		return err;
1983094333dSScott Feldman 
1993094333dSScott Feldman 	/* Switch device port(s) may be stacked under
2003094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to get attr on
2013094333dSScott Feldman 	 * each port.  Return -ENODATA if attr values don't
2023094333dSScott Feldman 	 * compare across ports.
2033094333dSScott Feldman 	 */
2043094333dSScott Feldman 
2053094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
2063094333dSScott Feldman 		err = switchdev_port_attr_get(lower_dev, attr);
2073094333dSScott Feldman 		if (err)
2083094333dSScott Feldman 			break;
2091f868398SJiri Pirko 		if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
2103094333dSScott Feldman 			first = *attr;
2113094333dSScott Feldman 		else if (memcmp(&first, attr, sizeof(*attr)))
2123094333dSScott Feldman 			return -ENODATA;
2133094333dSScott Feldman 	}
2143094333dSScott Feldman 
2153094333dSScott Feldman 	return err;
2163094333dSScott Feldman }
2173094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
2183094333dSScott Feldman 
2193094333dSScott Feldman static int __switchdev_port_attr_set(struct net_device *dev,
220f7fadf30SJiri Pirko 				     const struct switchdev_attr *attr,
2217ea6eb3fSJiri Pirko 				     struct switchdev_trans *trans)
2223094333dSScott Feldman {
2233094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
2243094333dSScott Feldman 	struct net_device *lower_dev;
2253094333dSScott Feldman 	struct list_head *iter;
2263094333dSScott Feldman 	int err = -EOPNOTSUPP;
2273094333dSScott Feldman 
2280c63d80cSJiri Pirko 	if (ops && ops->switchdev_port_attr_set) {
2290c63d80cSJiri Pirko 		err = ops->switchdev_port_attr_set(dev, attr, trans);
2300c63d80cSJiri Pirko 		goto done;
2310c63d80cSJiri Pirko 	}
2323094333dSScott Feldman 
2333094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
234464314eaSScott Feldman 		goto done;
2353094333dSScott Feldman 
2363094333dSScott Feldman 	/* Switch device port(s) may be stacked under
2373094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to set attr on
2383094333dSScott Feldman 	 * each port.
2393094333dSScott Feldman 	 */
2403094333dSScott Feldman 
2413094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
2427ea6eb3fSJiri Pirko 		err = __switchdev_port_attr_set(lower_dev, attr, trans);
2433094333dSScott Feldman 		if (err)
2443094333dSScott Feldman 			break;
2453094333dSScott Feldman 	}
2463094333dSScott Feldman 
247464314eaSScott Feldman done:
248464314eaSScott Feldman 	if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
249464314eaSScott Feldman 		err = 0;
250464314eaSScott Feldman 
2513094333dSScott Feldman 	return err;
2523094333dSScott Feldman }
2533094333dSScott Feldman 
2540bc05d58SJiri Pirko static int switchdev_port_attr_set_now(struct net_device *dev,
255f7fadf30SJiri Pirko 				       const struct switchdev_attr *attr)
2563094333dSScott Feldman {
2577ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
2583094333dSScott Feldman 	int err;
2593094333dSScott Feldman 
2607ea6eb3fSJiri Pirko 	switchdev_trans_init(&trans);
2617ea6eb3fSJiri Pirko 
2623094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
2633094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
2643094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
2653094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
2663094333dSScott Feldman 	 * but should not commit the attr.
2673094333dSScott Feldman 	 */
2683094333dSScott Feldman 
269f623ab7fSJiri Pirko 	trans.ph_prepare = true;
2707ea6eb3fSJiri Pirko 	err = __switchdev_port_attr_set(dev, attr, &trans);
2713094333dSScott Feldman 	if (err) {
2723094333dSScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
2733094333dSScott Feldman 		 * resources reserved in the prepare phase are
2743094333dSScott Feldman 		 * released.
2753094333dSScott Feldman 		 */
2763094333dSScott Feldman 
2779f6467cfSJiri Pirko 		if (err != -EOPNOTSUPP)
2787ea6eb3fSJiri Pirko 			switchdev_trans_items_destroy(&trans);
2793094333dSScott Feldman 
2803094333dSScott Feldman 		return err;
2813094333dSScott Feldman 	}
2823094333dSScott Feldman 
2833094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
2843094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
2853094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
2863094333dSScott Feldman 	 */
2873094333dSScott Feldman 
288f623ab7fSJiri Pirko 	trans.ph_prepare = false;
2897ea6eb3fSJiri Pirko 	err = __switchdev_port_attr_set(dev, attr, &trans);
290e9fdaec0SScott Feldman 	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
291e9fdaec0SScott Feldman 	     dev->name, attr->id);
2927ea6eb3fSJiri Pirko 	switchdev_trans_items_warn_destroy(dev, &trans);
2933094333dSScott Feldman 
2943094333dSScott Feldman 	return err;
2953094333dSScott Feldman }
2960bc05d58SJiri Pirko 
2970bc05d58SJiri Pirko static void switchdev_port_attr_set_deferred(struct net_device *dev,
2980bc05d58SJiri Pirko 					     const void *data)
2990bc05d58SJiri Pirko {
3000bc05d58SJiri Pirko 	const struct switchdev_attr *attr = data;
3010bc05d58SJiri Pirko 	int err;
3020bc05d58SJiri Pirko 
3030bc05d58SJiri Pirko 	err = switchdev_port_attr_set_now(dev, attr);
3040bc05d58SJiri Pirko 	if (err && err != -EOPNOTSUPP)
3050bc05d58SJiri Pirko 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
3060bc05d58SJiri Pirko 			   err, attr->id);
3077ceb2afbSElad Raz 	if (attr->complete)
3087ceb2afbSElad Raz 		attr->complete(dev, err, attr->complete_priv);
3090bc05d58SJiri Pirko }
3100bc05d58SJiri Pirko 
3110bc05d58SJiri Pirko static int switchdev_port_attr_set_defer(struct net_device *dev,
3120bc05d58SJiri Pirko 					 const struct switchdev_attr *attr)
3130bc05d58SJiri Pirko {
3140bc05d58SJiri Pirko 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
3150bc05d58SJiri Pirko 					  switchdev_port_attr_set_deferred);
3160bc05d58SJiri Pirko }
3170bc05d58SJiri Pirko 
3180bc05d58SJiri Pirko /**
3190bc05d58SJiri Pirko  *	switchdev_port_attr_set - Set port attribute
3200bc05d58SJiri Pirko  *
3210bc05d58SJiri Pirko  *	@dev: port device
3220bc05d58SJiri Pirko  *	@attr: attribute to set
3230bc05d58SJiri Pirko  *
3240bc05d58SJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
3250bc05d58SJiri Pirko  *	system is not left in a partially updated state due to
3260bc05d58SJiri Pirko  *	failure from driver/device.
3270bc05d58SJiri Pirko  *
3280bc05d58SJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
3290bc05d58SJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
3300bc05d58SJiri Pirko  */
3310bc05d58SJiri Pirko int switchdev_port_attr_set(struct net_device *dev,
3320bc05d58SJiri Pirko 			    const struct switchdev_attr *attr)
3330bc05d58SJiri Pirko {
3340bc05d58SJiri Pirko 	if (attr->flags & SWITCHDEV_F_DEFER)
3350bc05d58SJiri Pirko 		return switchdev_port_attr_set_defer(dev, attr);
3360bc05d58SJiri Pirko 	ASSERT_RTNL();
3370bc05d58SJiri Pirko 	return switchdev_port_attr_set_now(dev, attr);
3380bc05d58SJiri Pirko }
3393094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
3403094333dSScott Feldman 
341e258d919SScott Feldman static size_t switchdev_obj_size(const struct switchdev_obj *obj)
342e258d919SScott Feldman {
343e258d919SScott Feldman 	switch (obj->id) {
344e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
345e258d919SScott Feldman 		return sizeof(struct switchdev_obj_port_vlan);
346e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_PORT_FDB:
347e258d919SScott Feldman 		return sizeof(struct switchdev_obj_port_fdb);
3484d41e125SElad Raz 	case SWITCHDEV_OBJ_ID_PORT_MDB:
3494d41e125SElad Raz 		return sizeof(struct switchdev_obj_port_mdb);
350e258d919SScott Feldman 	default:
351e258d919SScott Feldman 		BUG();
352e258d919SScott Feldman 	}
353e258d919SScott Feldman 	return 0;
354e258d919SScott Feldman }
355e258d919SScott Feldman 
35622c1f67eSScott Feldman static int __switchdev_port_obj_add(struct net_device *dev,
357648b4a99SJiri Pirko 				    const struct switchdev_obj *obj,
3587ea6eb3fSJiri Pirko 				    struct switchdev_trans *trans)
359491d0f15SScott Feldman {
360491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
361491d0f15SScott Feldman 	struct net_device *lower_dev;
362491d0f15SScott Feldman 	struct list_head *iter;
363491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
364491d0f15SScott Feldman 
365491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_add)
3669e8f4a54SJiri Pirko 		return ops->switchdev_port_obj_add(dev, obj, trans);
367491d0f15SScott Feldman 
368491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
369491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to add object on
370491d0f15SScott Feldman 	 * each port.
371491d0f15SScott Feldman 	 */
372491d0f15SScott Feldman 
373491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
3749e8f4a54SJiri Pirko 		err = __switchdev_port_obj_add(lower_dev, obj, trans);
375491d0f15SScott Feldman 		if (err)
376491d0f15SScott Feldman 			break;
377491d0f15SScott Feldman 	}
378491d0f15SScott Feldman 
379491d0f15SScott Feldman 	return err;
380491d0f15SScott Feldman }
381491d0f15SScott Feldman 
3824d429c5dSJiri Pirko static int switchdev_port_obj_add_now(struct net_device *dev,
383648b4a99SJiri Pirko 				      const struct switchdev_obj *obj)
384491d0f15SScott Feldman {
3857ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
386491d0f15SScott Feldman 	int err;
387491d0f15SScott Feldman 
388491d0f15SScott Feldman 	ASSERT_RTNL();
389491d0f15SScott Feldman 
3907ea6eb3fSJiri Pirko 	switchdev_trans_init(&trans);
3917ea6eb3fSJiri Pirko 
392491d0f15SScott Feldman 	/* Phase I: prepare for obj add. Driver/device should fail
393491d0f15SScott Feldman 	 * here if there are going to be issues in the commit phase,
394491d0f15SScott Feldman 	 * such as lack of resources or support.  The driver/device
395491d0f15SScott Feldman 	 * should reserve resources needed for the commit phase here,
396491d0f15SScott Feldman 	 * but should not commit the obj.
397491d0f15SScott Feldman 	 */
398491d0f15SScott Feldman 
399f623ab7fSJiri Pirko 	trans.ph_prepare = true;
4009e8f4a54SJiri Pirko 	err = __switchdev_port_obj_add(dev, obj, &trans);
401491d0f15SScott Feldman 	if (err) {
402491d0f15SScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
403491d0f15SScott Feldman 		 * resources reserved in the prepare phase are
404491d0f15SScott Feldman 		 * released.
405491d0f15SScott Feldman 		 */
406491d0f15SScott Feldman 
4079f6467cfSJiri Pirko 		if (err != -EOPNOTSUPP)
4087ea6eb3fSJiri Pirko 			switchdev_trans_items_destroy(&trans);
409491d0f15SScott Feldman 
410491d0f15SScott Feldman 		return err;
411491d0f15SScott Feldman 	}
412491d0f15SScott Feldman 
413491d0f15SScott Feldman 	/* Phase II: commit obj add.  This cannot fail as a fault
414491d0f15SScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
415491d0f15SScott Feldman 	 * because the driver said everythings was OK in phase I.
416491d0f15SScott Feldman 	 */
417491d0f15SScott Feldman 
418f623ab7fSJiri Pirko 	trans.ph_prepare = false;
4199e8f4a54SJiri Pirko 	err = __switchdev_port_obj_add(dev, obj, &trans);
4209e8f4a54SJiri Pirko 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
4217ea6eb3fSJiri Pirko 	switchdev_trans_items_warn_destroy(dev, &trans);
422491d0f15SScott Feldman 
423491d0f15SScott Feldman 	return err;
424491d0f15SScott Feldman }
4254d429c5dSJiri Pirko 
4264d429c5dSJiri Pirko static void switchdev_port_obj_add_deferred(struct net_device *dev,
4274d429c5dSJiri Pirko 					    const void *data)
4284d429c5dSJiri Pirko {
4294d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
4304d429c5dSJiri Pirko 	int err;
4314d429c5dSJiri Pirko 
4324d429c5dSJiri Pirko 	err = switchdev_port_obj_add_now(dev, obj);
4334d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
4344d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
4354d429c5dSJiri Pirko 			   err, obj->id);
4367ceb2afbSElad Raz 	if (obj->complete)
4377ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
4384d429c5dSJiri Pirko }
4394d429c5dSJiri Pirko 
4404d429c5dSJiri Pirko static int switchdev_port_obj_add_defer(struct net_device *dev,
4414d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
4424d429c5dSJiri Pirko {
443e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
4444d429c5dSJiri Pirko 					  switchdev_port_obj_add_deferred);
4454d429c5dSJiri Pirko }
446491d0f15SScott Feldman 
447491d0f15SScott Feldman /**
4484d429c5dSJiri Pirko  *	switchdev_port_obj_add - Add port object
449491d0f15SScott Feldman  *
450491d0f15SScott Feldman  *	@dev: port device
451ab069002SVivien Didelot  *	@id: object ID
4524d429c5dSJiri Pirko  *	@obj: object to add
4534d429c5dSJiri Pirko  *
4544d429c5dSJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
4554d429c5dSJiri Pirko  *	system is not left in a partially updated state due to
4564d429c5dSJiri Pirko  *	failure from driver/device.
4574d429c5dSJiri Pirko  *
4584d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
4594d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
460491d0f15SScott Feldman  */
4614d429c5dSJiri Pirko int switchdev_port_obj_add(struct net_device *dev,
4624d429c5dSJiri Pirko 			   const struct switchdev_obj *obj)
4634d429c5dSJiri Pirko {
4644d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
4654d429c5dSJiri Pirko 		return switchdev_port_obj_add_defer(dev, obj);
4664d429c5dSJiri Pirko 	ASSERT_RTNL();
4674d429c5dSJiri Pirko 	return switchdev_port_obj_add_now(dev, obj);
4684d429c5dSJiri Pirko }
4694d429c5dSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
4704d429c5dSJiri Pirko 
4714d429c5dSJiri Pirko static int switchdev_port_obj_del_now(struct net_device *dev,
472648b4a99SJiri Pirko 				      const struct switchdev_obj *obj)
473491d0f15SScott Feldman {
474491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
475491d0f15SScott Feldman 	struct net_device *lower_dev;
476491d0f15SScott Feldman 	struct list_head *iter;
477491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
478491d0f15SScott Feldman 
479491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_del)
4809e8f4a54SJiri Pirko 		return ops->switchdev_port_obj_del(dev, obj);
481491d0f15SScott Feldman 
482491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
483491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to delete object on
484491d0f15SScott Feldman 	 * each port.
485491d0f15SScott Feldman 	 */
486491d0f15SScott Feldman 
487491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
4884d429c5dSJiri Pirko 		err = switchdev_port_obj_del_now(lower_dev, obj);
489491d0f15SScott Feldman 		if (err)
490491d0f15SScott Feldman 			break;
491491d0f15SScott Feldman 	}
492491d0f15SScott Feldman 
493491d0f15SScott Feldman 	return err;
494491d0f15SScott Feldman }
4954d429c5dSJiri Pirko 
4964d429c5dSJiri Pirko static void switchdev_port_obj_del_deferred(struct net_device *dev,
4974d429c5dSJiri Pirko 					    const void *data)
4984d429c5dSJiri Pirko {
4994d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
5004d429c5dSJiri Pirko 	int err;
5014d429c5dSJiri Pirko 
5024d429c5dSJiri Pirko 	err = switchdev_port_obj_del_now(dev, obj);
5034d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
5044d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
5054d429c5dSJiri Pirko 			   err, obj->id);
5067ceb2afbSElad Raz 	if (obj->complete)
5077ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
5084d429c5dSJiri Pirko }
5094d429c5dSJiri Pirko 
5104d429c5dSJiri Pirko static int switchdev_port_obj_del_defer(struct net_device *dev,
5114d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
5124d429c5dSJiri Pirko {
513e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
5144d429c5dSJiri Pirko 					  switchdev_port_obj_del_deferred);
5154d429c5dSJiri Pirko }
5164d429c5dSJiri Pirko 
5174d429c5dSJiri Pirko /**
5184d429c5dSJiri Pirko  *	switchdev_port_obj_del - Delete port object
5194d429c5dSJiri Pirko  *
5204d429c5dSJiri Pirko  *	@dev: port device
5214d429c5dSJiri Pirko  *	@id: object ID
5224d429c5dSJiri Pirko  *	@obj: object to delete
5234d429c5dSJiri Pirko  *
5244d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
5254d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
5264d429c5dSJiri Pirko  */
5274d429c5dSJiri Pirko int switchdev_port_obj_del(struct net_device *dev,
5284d429c5dSJiri Pirko 			   const struct switchdev_obj *obj)
5294d429c5dSJiri Pirko {
5304d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
5314d429c5dSJiri Pirko 		return switchdev_port_obj_del_defer(dev, obj);
5324d429c5dSJiri Pirko 	ASSERT_RTNL();
5334d429c5dSJiri Pirko 	return switchdev_port_obj_del_now(dev, obj);
5344d429c5dSJiri Pirko }
535491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
536491d0f15SScott Feldman 
53745d4122cSSamudrala, Sridhar /**
53845d4122cSSamudrala, Sridhar  *	switchdev_port_obj_dump - Dump port objects
53945d4122cSSamudrala, Sridhar  *
54045d4122cSSamudrala, Sridhar  *	@dev: port device
54125f07adcSVivien Didelot  *	@id: object ID
54245d4122cSSamudrala, Sridhar  *	@obj: object to dump
54325f07adcSVivien Didelot  *	@cb: function to call with a filled object
544771acac2SJiri Pirko  *
545771acac2SJiri Pirko  *	rtnl_lock must be held.
54645d4122cSSamudrala, Sridhar  */
5479e8f4a54SJiri Pirko int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
548648b4a99SJiri Pirko 			    switchdev_obj_dump_cb_t *cb)
54945d4122cSSamudrala, Sridhar {
55045d4122cSSamudrala, Sridhar 	const struct switchdev_ops *ops = dev->switchdev_ops;
55145d4122cSSamudrala, Sridhar 	struct net_device *lower_dev;
55245d4122cSSamudrala, Sridhar 	struct list_head *iter;
55345d4122cSSamudrala, Sridhar 	int err = -EOPNOTSUPP;
55445d4122cSSamudrala, Sridhar 
555771acac2SJiri Pirko 	ASSERT_RTNL();
556771acac2SJiri Pirko 
55745d4122cSSamudrala, Sridhar 	if (ops && ops->switchdev_port_obj_dump)
5589e8f4a54SJiri Pirko 		return ops->switchdev_port_obj_dump(dev, obj, cb);
55945d4122cSSamudrala, Sridhar 
56045d4122cSSamudrala, Sridhar 	/* Switch device port(s) may be stacked under
56145d4122cSSamudrala, Sridhar 	 * bond/team/vlan dev, so recurse down to dump objects on
56245d4122cSSamudrala, Sridhar 	 * first port at bottom of stack.
56345d4122cSSamudrala, Sridhar 	 */
56445d4122cSSamudrala, Sridhar 
56545d4122cSSamudrala, Sridhar 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
5669e8f4a54SJiri Pirko 		err = switchdev_port_obj_dump(lower_dev, obj, cb);
56745d4122cSSamudrala, Sridhar 		break;
56845d4122cSSamudrala, Sridhar 	}
56945d4122cSSamudrala, Sridhar 
57045d4122cSSamudrala, Sridhar 	return err;
57145d4122cSSamudrala, Sridhar }
57245d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
57345d4122cSSamudrala, Sridhar 
574*ff5cf100SArkadi Sharshevsky static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
57503bf0c28SJiri Pirko 
57603bf0c28SJiri Pirko /**
577ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
57803bf0c28SJiri Pirko  *	@nb: notifier_block
57903bf0c28SJiri Pirko  *
580*ff5cf100SArkadi Sharshevsky  *	Register switch device notifier.
58103bf0c28SJiri Pirko  */
582ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
58303bf0c28SJiri Pirko {
584*ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
58503bf0c28SJiri Pirko }
586ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
58703bf0c28SJiri Pirko 
58803bf0c28SJiri Pirko /**
589ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
59003bf0c28SJiri Pirko  *	@nb: notifier_block
59103bf0c28SJiri Pirko  *
59203bf0c28SJiri Pirko  *	Unregister switch device notifier.
59303bf0c28SJiri Pirko  */
594ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
59503bf0c28SJiri Pirko {
596*ff5cf100SArkadi Sharshevsky 	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
59703bf0c28SJiri Pirko }
598ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
59903bf0c28SJiri Pirko 
60003bf0c28SJiri Pirko /**
601ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
60203bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
60303bf0c28SJiri Pirko  *	@dev: port device
60403bf0c28SJiri Pirko  *	@info: notifier information data
60503bf0c28SJiri Pirko  *
606*ff5cf100SArkadi Sharshevsky  *	Call all network notifier blocks.
60703bf0c28SJiri Pirko  */
608ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
609ebb9a03aSJiri Pirko 			     struct switchdev_notifier_info *info)
61003bf0c28SJiri Pirko {
61103bf0c28SJiri Pirko 	info->dev = dev;
612*ff5cf100SArkadi Sharshevsky 	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
61303bf0c28SJiri Pirko }
614ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
6158a44dbb2SRoopa Prabhu 
6167d4f8d87SScott Feldman struct switchdev_vlan_dump {
6178f24f309SJiri Pirko 	struct switchdev_obj_port_vlan vlan;
6187d4f8d87SScott Feldman 	struct sk_buff *skb;
6197d4f8d87SScott Feldman 	u32 filter_mask;
6207d4f8d87SScott Feldman 	u16 flags;
6217d4f8d87SScott Feldman 	u16 begin;
6227d4f8d87SScott Feldman 	u16 end;
6237d4f8d87SScott Feldman };
6247d4f8d87SScott Feldman 
625e23b002bSVivien Didelot static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
6267d4f8d87SScott Feldman {
6277d4f8d87SScott Feldman 	struct bridge_vlan_info vinfo;
6287d4f8d87SScott Feldman 
6297d4f8d87SScott Feldman 	vinfo.flags = dump->flags;
6307d4f8d87SScott Feldman 
6317d4f8d87SScott Feldman 	if (dump->begin == 0 && dump->end == 0) {
6327d4f8d87SScott Feldman 		return 0;
6337d4f8d87SScott Feldman 	} else if (dump->begin == dump->end) {
6347d4f8d87SScott Feldman 		vinfo.vid = dump->begin;
6357d4f8d87SScott Feldman 		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
6367d4f8d87SScott Feldman 			    sizeof(vinfo), &vinfo))
6377d4f8d87SScott Feldman 			return -EMSGSIZE;
6387d4f8d87SScott Feldman 	} else {
6397d4f8d87SScott Feldman 		vinfo.vid = dump->begin;
6407d4f8d87SScott Feldman 		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
6417d4f8d87SScott Feldman 		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
6427d4f8d87SScott Feldman 			    sizeof(vinfo), &vinfo))
6437d4f8d87SScott Feldman 			return -EMSGSIZE;
6447d4f8d87SScott Feldman 		vinfo.vid = dump->end;
6457d4f8d87SScott Feldman 		vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
6467d4f8d87SScott Feldman 		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END;
6477d4f8d87SScott Feldman 		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
6487d4f8d87SScott Feldman 			    sizeof(vinfo), &vinfo))
6497d4f8d87SScott Feldman 			return -EMSGSIZE;
6507d4f8d87SScott Feldman 	}
6517d4f8d87SScott Feldman 
6527d4f8d87SScott Feldman 	return 0;
6537d4f8d87SScott Feldman }
6547d4f8d87SScott Feldman 
655648b4a99SJiri Pirko static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
6567d4f8d87SScott Feldman {
657648b4a99SJiri Pirko 	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
6587d4f8d87SScott Feldman 	struct switchdev_vlan_dump *dump =
65925f07adcSVivien Didelot 		container_of(vlan, struct switchdev_vlan_dump, vlan);
6607d4f8d87SScott Feldman 	int err = 0;
6617d4f8d87SScott Feldman 
6627d4f8d87SScott Feldman 	if (vlan->vid_begin > vlan->vid_end)
6637d4f8d87SScott Feldman 		return -EINVAL;
6647d4f8d87SScott Feldman 
6657d4f8d87SScott Feldman 	if (dump->filter_mask & RTEXT_FILTER_BRVLAN) {
6667d4f8d87SScott Feldman 		dump->flags = vlan->flags;
6677d4f8d87SScott Feldman 		for (dump->begin = dump->end = vlan->vid_begin;
6687d4f8d87SScott Feldman 		     dump->begin <= vlan->vid_end;
6697d4f8d87SScott Feldman 		     dump->begin++, dump->end++) {
670e23b002bSVivien Didelot 			err = switchdev_port_vlan_dump_put(dump);
6717d4f8d87SScott Feldman 			if (err)
6727d4f8d87SScott Feldman 				return err;
6737d4f8d87SScott Feldman 		}
6747d4f8d87SScott Feldman 	} else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) {
6757d4f8d87SScott Feldman 		if (dump->begin > vlan->vid_begin &&
6767d4f8d87SScott Feldman 		    dump->begin >= vlan->vid_end) {
6777d4f8d87SScott Feldman 			if ((dump->begin - 1) == vlan->vid_end &&
6787d4f8d87SScott Feldman 			    dump->flags == vlan->flags) {
6797d4f8d87SScott Feldman 				/* prepend */
6807d4f8d87SScott Feldman 				dump->begin = vlan->vid_begin;
6817d4f8d87SScott Feldman 			} else {
682e23b002bSVivien Didelot 				err = switchdev_port_vlan_dump_put(dump);
6837d4f8d87SScott Feldman 				dump->flags = vlan->flags;
6847d4f8d87SScott Feldman 				dump->begin = vlan->vid_begin;
6857d4f8d87SScott Feldman 				dump->end = vlan->vid_end;
6867d4f8d87SScott Feldman 			}
6877d4f8d87SScott Feldman 		} else if (dump->end <= vlan->vid_begin &&
6887d4f8d87SScott Feldman 		           dump->end < vlan->vid_end) {
6897d4f8d87SScott Feldman 			if ((dump->end  + 1) == vlan->vid_begin &&
6907d4f8d87SScott Feldman 			    dump->flags == vlan->flags) {
6917d4f8d87SScott Feldman 				/* append */
6927d4f8d87SScott Feldman 				dump->end = vlan->vid_end;
6937d4f8d87SScott Feldman 			} else {
694e23b002bSVivien Didelot 				err = switchdev_port_vlan_dump_put(dump);
6957d4f8d87SScott Feldman 				dump->flags = vlan->flags;
6967d4f8d87SScott Feldman 				dump->begin = vlan->vid_begin;
6977d4f8d87SScott Feldman 				dump->end = vlan->vid_end;
6987d4f8d87SScott Feldman 			}
6997d4f8d87SScott Feldman 		} else {
7007d4f8d87SScott Feldman 			err = -EINVAL;
7017d4f8d87SScott Feldman 		}
7027d4f8d87SScott Feldman 	}
7037d4f8d87SScott Feldman 
7047d4f8d87SScott Feldman 	return err;
7057d4f8d87SScott Feldman }
7067d4f8d87SScott Feldman 
7077d4f8d87SScott Feldman static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
7087d4f8d87SScott Feldman 				    u32 filter_mask)
7097d4f8d87SScott Feldman {
7107d4f8d87SScott Feldman 	struct switchdev_vlan_dump dump = {
7116ff64f6fSIdo Schimmel 		.vlan.obj.orig_dev = dev,
7129e8f4a54SJiri Pirko 		.vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
7137d4f8d87SScott Feldman 		.skb = skb,
7147d4f8d87SScott Feldman 		.filter_mask = filter_mask,
7157d4f8d87SScott Feldman 	};
7167d4f8d87SScott Feldman 	int err = 0;
7177d4f8d87SScott Feldman 
7187d4f8d87SScott Feldman 	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
7197d4f8d87SScott Feldman 	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
7209e8f4a54SJiri Pirko 		err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
72125f07adcSVivien Didelot 					      switchdev_port_vlan_dump_cb);
7227d4f8d87SScott Feldman 		if (err)
7237d4f8d87SScott Feldman 			goto err_out;
7247d4f8d87SScott Feldman 		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
7257d4f8d87SScott Feldman 			/* last one */
726e23b002bSVivien Didelot 			err = switchdev_port_vlan_dump_put(&dump);
7277d4f8d87SScott Feldman 	}
7287d4f8d87SScott Feldman 
7297d4f8d87SScott Feldman err_out:
7307d4f8d87SScott Feldman 	return err == -EOPNOTSUPP ? 0 : err;
7317d4f8d87SScott Feldman }
7327d4f8d87SScott Feldman 
7338793d0a6SScott Feldman /**
7348793d0a6SScott Feldman  *	switchdev_port_bridge_getlink - Get bridge port attributes
7358793d0a6SScott Feldman  *
7368793d0a6SScott Feldman  *	@dev: port device
7378793d0a6SScott Feldman  *
7388793d0a6SScott Feldman  *	Called for SELF on rtnl_bridge_getlink to get bridge port
7398793d0a6SScott Feldman  *	attributes.
7408793d0a6SScott Feldman  */
7418793d0a6SScott Feldman int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7428793d0a6SScott Feldman 				  struct net_device *dev, u32 filter_mask,
7438793d0a6SScott Feldman 				  int nlflags)
7448793d0a6SScott Feldman {
7458793d0a6SScott Feldman 	struct switchdev_attr attr = {
7466ff64f6fSIdo Schimmel 		.orig_dev = dev,
7471f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
7488793d0a6SScott Feldman 	};
7498793d0a6SScott Feldman 	u16 mode = BRIDGE_MODE_UNDEF;
750741af005SIdo Schimmel 	u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
7518793d0a6SScott Feldman 	int err;
7528793d0a6SScott Feldman 
75397c24290SIdo Schimmel 	if (!netif_is_bridge_port(dev))
75497c24290SIdo Schimmel 		return -EOPNOTSUPP;
75597c24290SIdo Schimmel 
7568793d0a6SScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
7575c8079d0SVivien Didelot 	if (err && err != -EOPNOTSUPP)
7588793d0a6SScott Feldman 		return err;
7598793d0a6SScott Feldman 
7608793d0a6SScott Feldman 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
7617d4f8d87SScott Feldman 				       attr.u.brport_flags, mask, nlflags,
7627d4f8d87SScott Feldman 				       filter_mask, switchdev_port_vlan_fill);
7638793d0a6SScott Feldman }
7648793d0a6SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
7658793d0a6SScott Feldman 
76647f8328bSScott Feldman static int switchdev_port_br_setflag(struct net_device *dev,
76747f8328bSScott Feldman 				     struct nlattr *nlattr,
76847f8328bSScott Feldman 				     unsigned long brport_flag)
76947f8328bSScott Feldman {
77047f8328bSScott Feldman 	struct switchdev_attr attr = {
7716ff64f6fSIdo Schimmel 		.orig_dev = dev,
7721f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
77347f8328bSScott Feldman 	};
77447f8328bSScott Feldman 	u8 flag = nla_get_u8(nlattr);
77547f8328bSScott Feldman 	int err;
77647f8328bSScott Feldman 
77747f8328bSScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
77847f8328bSScott Feldman 	if (err)
77947f8328bSScott Feldman 		return err;
78047f8328bSScott Feldman 
78147f8328bSScott Feldman 	if (flag)
78242275bd8SScott Feldman 		attr.u.brport_flags |= brport_flag;
78347f8328bSScott Feldman 	else
78442275bd8SScott Feldman 		attr.u.brport_flags &= ~brport_flag;
78547f8328bSScott Feldman 
78647f8328bSScott Feldman 	return switchdev_port_attr_set(dev, &attr);
78747f8328bSScott Feldman }
78847f8328bSScott Feldman 
78947f8328bSScott Feldman static const struct nla_policy
79047f8328bSScott Feldman switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
79147f8328bSScott Feldman 	[IFLA_BRPORT_STATE]		= { .type = NLA_U8 },
79247f8328bSScott Feldman 	[IFLA_BRPORT_COST]		= { .type = NLA_U32 },
79347f8328bSScott Feldman 	[IFLA_BRPORT_PRIORITY]		= { .type = NLA_U16 },
79447f8328bSScott Feldman 	[IFLA_BRPORT_MODE]		= { .type = NLA_U8 },
79547f8328bSScott Feldman 	[IFLA_BRPORT_GUARD]		= { .type = NLA_U8 },
79647f8328bSScott Feldman 	[IFLA_BRPORT_PROTECT]		= { .type = NLA_U8 },
79747f8328bSScott Feldman 	[IFLA_BRPORT_FAST_LEAVE]	= { .type = NLA_U8 },
79847f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING]		= { .type = NLA_U8 },
79947f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING_SYNC]	= { .type = NLA_U8 },
80047f8328bSScott Feldman 	[IFLA_BRPORT_UNICAST_FLOOD]	= { .type = NLA_U8 },
80147f8328bSScott Feldman };
80247f8328bSScott Feldman 
80347f8328bSScott Feldman static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
80447f8328bSScott Feldman 					      struct nlattr *protinfo)
80547f8328bSScott Feldman {
80647f8328bSScott Feldman 	struct nlattr *attr;
80747f8328bSScott Feldman 	int rem;
80847f8328bSScott Feldman 	int err;
80947f8328bSScott Feldman 
81047f8328bSScott Feldman 	err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
811fceb6435SJohannes Berg 				  switchdev_port_bridge_policy, NULL);
81247f8328bSScott Feldman 	if (err)
81347f8328bSScott Feldman 		return err;
81447f8328bSScott Feldman 
81547f8328bSScott Feldman 	nla_for_each_nested(attr, protinfo, rem) {
81647f8328bSScott Feldman 		switch (nla_type(attr)) {
81747f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING:
81847f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
81947f8328bSScott Feldman 							BR_LEARNING);
82047f8328bSScott Feldman 			break;
82147f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING_SYNC:
82247f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
82347f8328bSScott Feldman 							BR_LEARNING_SYNC);
82447f8328bSScott Feldman 			break;
825741af005SIdo Schimmel 		case IFLA_BRPORT_UNICAST_FLOOD:
826741af005SIdo Schimmel 			err = switchdev_port_br_setflag(dev, attr, BR_FLOOD);
827741af005SIdo Schimmel 			break;
82847f8328bSScott Feldman 		default:
82947f8328bSScott Feldman 			err = -EOPNOTSUPP;
83047f8328bSScott Feldman 			break;
83147f8328bSScott Feldman 		}
83247f8328bSScott Feldman 		if (err)
83347f8328bSScott Feldman 			return err;
83447f8328bSScott Feldman 	}
83547f8328bSScott Feldman 
83647f8328bSScott Feldman 	return 0;
83747f8328bSScott Feldman }
83847f8328bSScott Feldman 
83947f8328bSScott Feldman static int switchdev_port_br_afspec(struct net_device *dev,
84047f8328bSScott Feldman 				    struct nlattr *afspec,
84147f8328bSScott Feldman 				    int (*f)(struct net_device *dev,
842648b4a99SJiri Pirko 					     const struct switchdev_obj *obj))
84347f8328bSScott Feldman {
84447f8328bSScott Feldman 	struct nlattr *attr;
84547f8328bSScott Feldman 	struct bridge_vlan_info *vinfo;
8469e8f4a54SJiri Pirko 	struct switchdev_obj_port_vlan vlan = {
8476ff64f6fSIdo Schimmel 		.obj.orig_dev = dev,
8489e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
8499e8f4a54SJiri Pirko 	};
85047f8328bSScott Feldman 	int rem;
85147f8328bSScott Feldman 	int err;
85247f8328bSScott Feldman 
85347f8328bSScott Feldman 	nla_for_each_nested(attr, afspec, rem) {
85447f8328bSScott Feldman 		if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
85547f8328bSScott Feldman 			continue;
85647f8328bSScott Feldman 		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
85747f8328bSScott Feldman 			return -EINVAL;
85847f8328bSScott Feldman 		vinfo = nla_data(attr);
85987aaf2caSNikolay Aleksandrov 		if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
86087aaf2caSNikolay Aleksandrov 			return -EINVAL;
861ab069002SVivien Didelot 		vlan.flags = vinfo->flags;
86247f8328bSScott Feldman 		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
863ab069002SVivien Didelot 			if (vlan.vid_begin)
86447f8328bSScott Feldman 				return -EINVAL;
865ab069002SVivien Didelot 			vlan.vid_begin = vinfo->vid;
866cc02aa8eSNikolay Aleksandrov 			/* don't allow range of pvids */
867cc02aa8eSNikolay Aleksandrov 			if (vlan.flags & BRIDGE_VLAN_INFO_PVID)
868cc02aa8eSNikolay Aleksandrov 				return -EINVAL;
86947f8328bSScott Feldman 		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
870ab069002SVivien Didelot 			if (!vlan.vid_begin)
87147f8328bSScott Feldman 				return -EINVAL;
872ab069002SVivien Didelot 			vlan.vid_end = vinfo->vid;
873ab069002SVivien Didelot 			if (vlan.vid_end <= vlan.vid_begin)
87447f8328bSScott Feldman 				return -EINVAL;
8759e8f4a54SJiri Pirko 			err = f(dev, &vlan.obj);
87647f8328bSScott Feldman 			if (err)
87747f8328bSScott Feldman 				return err;
8783a7bde55SScott Feldman 			vlan.vid_begin = 0;
87947f8328bSScott Feldman 		} else {
880ab069002SVivien Didelot 			if (vlan.vid_begin)
88147f8328bSScott Feldman 				return -EINVAL;
882ab069002SVivien Didelot 			vlan.vid_begin = vinfo->vid;
883ab069002SVivien Didelot 			vlan.vid_end = vinfo->vid;
8849e8f4a54SJiri Pirko 			err = f(dev, &vlan.obj);
88547f8328bSScott Feldman 			if (err)
88647f8328bSScott Feldman 				return err;
8873a7bde55SScott Feldman 			vlan.vid_begin = 0;
88847f8328bSScott Feldman 		}
88947f8328bSScott Feldman 	}
89047f8328bSScott Feldman 
89147f8328bSScott Feldman 	return 0;
89247f8328bSScott Feldman }
89347f8328bSScott Feldman 
8948a44dbb2SRoopa Prabhu /**
89547f8328bSScott Feldman  *	switchdev_port_bridge_setlink - Set bridge port attributes
8968a44dbb2SRoopa Prabhu  *
8978a44dbb2SRoopa Prabhu  *	@dev: port device
89847f8328bSScott Feldman  *	@nlh: netlink header
89947f8328bSScott Feldman  *	@flags: netlink flags
9008a44dbb2SRoopa Prabhu  *
90147f8328bSScott Feldman  *	Called for SELF on rtnl_bridge_setlink to set bridge port
90247f8328bSScott Feldman  *	attributes.
9038a44dbb2SRoopa Prabhu  */
904ebb9a03aSJiri Pirko int switchdev_port_bridge_setlink(struct net_device *dev,
9058a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
9068a44dbb2SRoopa Prabhu {
90747f8328bSScott Feldman 	struct nlattr *protinfo;
90847f8328bSScott Feldman 	struct nlattr *afspec;
90947f8328bSScott Feldman 	int err = 0;
9108a44dbb2SRoopa Prabhu 
91197c24290SIdo Schimmel 	if (!netif_is_bridge_port(dev))
91297c24290SIdo Schimmel 		return -EOPNOTSUPP;
91397c24290SIdo Schimmel 
91447f8328bSScott Feldman 	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
91547f8328bSScott Feldman 				   IFLA_PROTINFO);
91647f8328bSScott Feldman 	if (protinfo) {
91747f8328bSScott Feldman 		err = switchdev_port_br_setlink_protinfo(dev, protinfo);
91847f8328bSScott Feldman 		if (err)
91947f8328bSScott Feldman 			return err;
92047f8328bSScott Feldman 	}
9218a44dbb2SRoopa Prabhu 
92247f8328bSScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
92347f8328bSScott Feldman 				 IFLA_AF_SPEC);
92447f8328bSScott Feldman 	if (afspec)
92547f8328bSScott Feldman 		err = switchdev_port_br_afspec(dev, afspec,
92647f8328bSScott Feldman 					       switchdev_port_obj_add);
9278a44dbb2SRoopa Prabhu 
92847f8328bSScott Feldman 	return err;
9298a44dbb2SRoopa Prabhu }
930ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
9318a44dbb2SRoopa Prabhu 
9328a44dbb2SRoopa Prabhu /**
9335c34e022SScott Feldman  *	switchdev_port_bridge_dellink - Set bridge port attributes
9348a44dbb2SRoopa Prabhu  *
9358a44dbb2SRoopa Prabhu  *	@dev: port device
9365c34e022SScott Feldman  *	@nlh: netlink header
9375c34e022SScott Feldman  *	@flags: netlink flags
9388a44dbb2SRoopa Prabhu  *
9395c34e022SScott Feldman  *	Called for SELF on rtnl_bridge_dellink to set bridge port
9405c34e022SScott Feldman  *	attributes.
9418a44dbb2SRoopa Prabhu  */
942ebb9a03aSJiri Pirko int switchdev_port_bridge_dellink(struct net_device *dev,
9438a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
9448a44dbb2SRoopa Prabhu {
9455c34e022SScott Feldman 	struct nlattr *afspec;
9468a44dbb2SRoopa Prabhu 
94797c24290SIdo Schimmel 	if (!netif_is_bridge_port(dev))
94897c24290SIdo Schimmel 		return -EOPNOTSUPP;
94997c24290SIdo Schimmel 
9505c34e022SScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
9515c34e022SScott Feldman 				 IFLA_AF_SPEC);
9525c34e022SScott Feldman 	if (afspec)
9535c34e022SScott Feldman 		return switchdev_port_br_afspec(dev, afspec,
9545c34e022SScott Feldman 						switchdev_port_obj_del);
9555c34e022SScott Feldman 
9568a44dbb2SRoopa Prabhu 	return 0;
9578a44dbb2SRoopa Prabhu }
958ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
9598a44dbb2SRoopa Prabhu 
96045d4122cSSamudrala, Sridhar /**
96145d4122cSSamudrala, Sridhar  *	switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
96245d4122cSSamudrala, Sridhar  *
96345d4122cSSamudrala, Sridhar  *	@ndmsg: netlink hdr
96445d4122cSSamudrala, Sridhar  *	@nlattr: netlink attributes
96545d4122cSSamudrala, Sridhar  *	@dev: port device
96645d4122cSSamudrala, Sridhar  *	@addr: MAC address to add
96745d4122cSSamudrala, Sridhar  *	@vid: VLAN to add
96845d4122cSSamudrala, Sridhar  *
96945d4122cSSamudrala, Sridhar  *	Add FDB entry to switch device.
97045d4122cSSamudrala, Sridhar  */
97145d4122cSSamudrala, Sridhar int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
97245d4122cSSamudrala, Sridhar 			   struct net_device *dev, const unsigned char *addr,
97345d4122cSSamudrala, Sridhar 			   u16 vid, u16 nlm_flags)
97445d4122cSSamudrala, Sridhar {
97552ba57cfSJiri Pirko 	struct switchdev_obj_port_fdb fdb = {
9766ff64f6fSIdo Schimmel 		.obj.orig_dev = dev,
9779e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
97845d4122cSSamudrala, Sridhar 		.vid = vid,
97945d4122cSSamudrala, Sridhar 	};
98045d4122cSSamudrala, Sridhar 
981850d0cbcSJiri Pirko 	ether_addr_copy(fdb.addr, addr);
9829e8f4a54SJiri Pirko 	return switchdev_port_obj_add(dev, &fdb.obj);
98345d4122cSSamudrala, Sridhar }
98445d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
98545d4122cSSamudrala, Sridhar 
98645d4122cSSamudrala, Sridhar /**
98745d4122cSSamudrala, Sridhar  *	switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
98845d4122cSSamudrala, Sridhar  *
98945d4122cSSamudrala, Sridhar  *	@ndmsg: netlink hdr
99045d4122cSSamudrala, Sridhar  *	@nlattr: netlink attributes
99145d4122cSSamudrala, Sridhar  *	@dev: port device
99245d4122cSSamudrala, Sridhar  *	@addr: MAC address to delete
99345d4122cSSamudrala, Sridhar  *	@vid: VLAN to delete
99445d4122cSSamudrala, Sridhar  *
99545d4122cSSamudrala, Sridhar  *	Delete FDB entry from switch device.
99645d4122cSSamudrala, Sridhar  */
99745d4122cSSamudrala, Sridhar int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
99845d4122cSSamudrala, Sridhar 			   struct net_device *dev, const unsigned char *addr,
99945d4122cSSamudrala, Sridhar 			   u16 vid)
100045d4122cSSamudrala, Sridhar {
100152ba57cfSJiri Pirko 	struct switchdev_obj_port_fdb fdb = {
10026ff64f6fSIdo Schimmel 		.obj.orig_dev = dev,
10039e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
100445d4122cSSamudrala, Sridhar 		.vid = vid,
100545d4122cSSamudrala, Sridhar 	};
100645d4122cSSamudrala, Sridhar 
1007850d0cbcSJiri Pirko 	ether_addr_copy(fdb.addr, addr);
10089e8f4a54SJiri Pirko 	return switchdev_port_obj_del(dev, &fdb.obj);
100945d4122cSSamudrala, Sridhar }
101045d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
101145d4122cSSamudrala, Sridhar 
101245d4122cSSamudrala, Sridhar struct switchdev_fdb_dump {
101352ba57cfSJiri Pirko 	struct switchdev_obj_port_fdb fdb;
1014e02a06b2SVivien Didelot 	struct net_device *dev;
101545d4122cSSamudrala, Sridhar 	struct sk_buff *skb;
101645d4122cSSamudrala, Sridhar 	struct netlink_callback *cb;
101745d4122cSSamudrala, Sridhar 	int idx;
101845d4122cSSamudrala, Sridhar };
101945d4122cSSamudrala, Sridhar 
1020648b4a99SJiri Pirko static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
102145d4122cSSamudrala, Sridhar {
1022648b4a99SJiri Pirko 	struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
102345d4122cSSamudrala, Sridhar 	struct switchdev_fdb_dump *dump =
102425f07adcSVivien Didelot 		container_of(fdb, struct switchdev_fdb_dump, fdb);
102545d4122cSSamudrala, Sridhar 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
102645d4122cSSamudrala, Sridhar 	u32 seq = dump->cb->nlh->nlmsg_seq;
102745d4122cSSamudrala, Sridhar 	struct nlmsghdr *nlh;
102845d4122cSSamudrala, Sridhar 	struct ndmsg *ndm;
102945d4122cSSamudrala, Sridhar 
1030d297653dSRoopa Prabhu 	if (dump->idx < dump->cb->args[2])
103145d4122cSSamudrala, Sridhar 		goto skip;
103245d4122cSSamudrala, Sridhar 
103345d4122cSSamudrala, Sridhar 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
103445d4122cSSamudrala, Sridhar 			sizeof(*ndm), NLM_F_MULTI);
103545d4122cSSamudrala, Sridhar 	if (!nlh)
103645d4122cSSamudrala, Sridhar 		return -EMSGSIZE;
103745d4122cSSamudrala, Sridhar 
103845d4122cSSamudrala, Sridhar 	ndm = nlmsg_data(nlh);
103945d4122cSSamudrala, Sridhar 	ndm->ndm_family  = AF_BRIDGE;
104045d4122cSSamudrala, Sridhar 	ndm->ndm_pad1    = 0;
104145d4122cSSamudrala, Sridhar 	ndm->ndm_pad2    = 0;
104245d4122cSSamudrala, Sridhar 	ndm->ndm_flags   = NTF_SELF;
104345d4122cSSamudrala, Sridhar 	ndm->ndm_type    = 0;
1044e02a06b2SVivien Didelot 	ndm->ndm_ifindex = dump->dev->ifindex;
104525f07adcSVivien Didelot 	ndm->ndm_state   = fdb->ndm_state;
104645d4122cSSamudrala, Sridhar 
104725f07adcSVivien Didelot 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
104845d4122cSSamudrala, Sridhar 		goto nla_put_failure;
104945d4122cSSamudrala, Sridhar 
105025f07adcSVivien Didelot 	if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
105145d4122cSSamudrala, Sridhar 		goto nla_put_failure;
105245d4122cSSamudrala, Sridhar 
105345d4122cSSamudrala, Sridhar 	nlmsg_end(dump->skb, nlh);
105445d4122cSSamudrala, Sridhar 
105545d4122cSSamudrala, Sridhar skip:
105645d4122cSSamudrala, Sridhar 	dump->idx++;
105745d4122cSSamudrala, Sridhar 	return 0;
105845d4122cSSamudrala, Sridhar 
105945d4122cSSamudrala, Sridhar nla_put_failure:
106045d4122cSSamudrala, Sridhar 	nlmsg_cancel(dump->skb, nlh);
106145d4122cSSamudrala, Sridhar 	return -EMSGSIZE;
106245d4122cSSamudrala, Sridhar }
106345d4122cSSamudrala, Sridhar 
106445d4122cSSamudrala, Sridhar /**
106545d4122cSSamudrala, Sridhar  *	switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
106645d4122cSSamudrala, Sridhar  *
106745d4122cSSamudrala, Sridhar  *	@skb: netlink skb
106845d4122cSSamudrala, Sridhar  *	@cb: netlink callback
106945d4122cSSamudrala, Sridhar  *	@dev: port device
107045d4122cSSamudrala, Sridhar  *	@filter_dev: filter device
107145d4122cSSamudrala, Sridhar  *	@idx:
107245d4122cSSamudrala, Sridhar  *
10733e347660SNicolas Dichtel  *	Dump FDB entries from switch device.
107445d4122cSSamudrala, Sridhar  */
107545d4122cSSamudrala, Sridhar int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
107645d4122cSSamudrala, Sridhar 			    struct net_device *dev,
1077d297653dSRoopa Prabhu 			    struct net_device *filter_dev, int *idx)
107845d4122cSSamudrala, Sridhar {
107945d4122cSSamudrala, Sridhar 	struct switchdev_fdb_dump dump = {
10806ff64f6fSIdo Schimmel 		.fdb.obj.orig_dev = dev,
10819e8f4a54SJiri Pirko 		.fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
1082e02a06b2SVivien Didelot 		.dev = dev,
108345d4122cSSamudrala, Sridhar 		.skb = skb,
108445d4122cSSamudrala, Sridhar 		.cb = cb,
1085d297653dSRoopa Prabhu 		.idx = *idx,
108645d4122cSSamudrala, Sridhar 	};
1087472681d5SMINOURA Makoto / 箕浦 真 	int err;
108845d4122cSSamudrala, Sridhar 
1089472681d5SMINOURA Makoto / 箕浦 真 	err = switchdev_port_obj_dump(dev, &dump.fdb.obj,
1090472681d5SMINOURA Makoto / 箕浦 真 				      switchdev_port_fdb_dump_cb);
1091d297653dSRoopa Prabhu 	*idx = dump.idx;
1092d297653dSRoopa Prabhu 	return err;
109345d4122cSSamudrala, Sridhar }
109445d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
109545d4122cSSamudrala, Sridhar 
10968438884dSOr Gerlitz bool switchdev_port_same_parent_id(struct net_device *a,
10971a3b2ec9SScott Feldman 				   struct net_device *b)
10981a3b2ec9SScott Feldman {
10991a3b2ec9SScott Feldman 	struct switchdev_attr a_attr = {
11006ff64f6fSIdo Schimmel 		.orig_dev = a,
11011f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
11021a3b2ec9SScott Feldman 	};
11031a3b2ec9SScott Feldman 	struct switchdev_attr b_attr = {
11046ff64f6fSIdo Schimmel 		.orig_dev = b,
11051f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
11061a3b2ec9SScott Feldman 	};
11071a3b2ec9SScott Feldman 
11081a3b2ec9SScott Feldman 	if (switchdev_port_attr_get(a, &a_attr) ||
11091a3b2ec9SScott Feldman 	    switchdev_port_attr_get(b, &b_attr))
11101a3b2ec9SScott Feldman 		return false;
11111a3b2ec9SScott Feldman 
11121a3b2ec9SScott Feldman 	return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
11131a3b2ec9SScott Feldman }
11142eb03e6cSOr Gerlitz EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
1115