xref: /linux-6.15/net/switchdev/switchdev.c (revision 2eb03e6c)
1007f790cSJiri Pirko /*
2007f790cSJiri Pirko  * net/switchdev/switchdev.c - Switch device API
37ea6eb3fSJiri Pirko  * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
4f8f21471SScott Feldman  * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
5007f790cSJiri Pirko  *
6007f790cSJiri Pirko  * This program is free software; you can redistribute it and/or modify
7007f790cSJiri Pirko  * it under the terms of the GNU General Public License as published by
8007f790cSJiri Pirko  * the Free Software Foundation; either version 2 of the License, or
9007f790cSJiri Pirko  * (at your option) any later version.
10007f790cSJiri Pirko  */
11007f790cSJiri Pirko 
12007f790cSJiri Pirko #include <linux/kernel.h>
13007f790cSJiri Pirko #include <linux/types.h>
14007f790cSJiri Pirko #include <linux/init.h>
1503bf0c28SJiri Pirko #include <linux/mutex.h>
1603bf0c28SJiri Pirko #include <linux/notifier.h>
17007f790cSJiri Pirko #include <linux/netdevice.h>
18850d0cbcSJiri Pirko #include <linux/etherdevice.h>
1947f8328bSScott Feldman #include <linux/if_bridge.h>
207ea6eb3fSJiri Pirko #include <linux/list.h>
21793f4014SJiri Pirko #include <linux/workqueue.h>
2287aaf2caSNikolay Aleksandrov #include <linux/if_vlan.h>
234f2c6ae5SIdo Schimmel #include <linux/rtnetlink.h>
245e8d9049SScott Feldman #include <net/ip_fib.h>
25007f790cSJiri Pirko #include <net/switchdev.h>
26007f790cSJiri Pirko 
27007f790cSJiri Pirko /**
287ea6eb3fSJiri Pirko  *	switchdev_trans_item_enqueue - Enqueue data item to transaction queue
297ea6eb3fSJiri Pirko  *
307ea6eb3fSJiri Pirko  *	@trans: transaction
317ea6eb3fSJiri Pirko  *	@data: pointer to data being queued
327ea6eb3fSJiri Pirko  *	@destructor: data destructor
337ea6eb3fSJiri Pirko  *	@tritem: transaction item being queued
347ea6eb3fSJiri Pirko  *
357ea6eb3fSJiri Pirko  *	Enqeueue data item to transaction queue. tritem is typically placed in
367ea6eb3fSJiri Pirko  *	cointainter pointed at by data pointer. Destructor is called on
377ea6eb3fSJiri Pirko  *	transaction abort and after successful commit phase in case
387ea6eb3fSJiri Pirko  *	the caller did not dequeue the item before.
397ea6eb3fSJiri Pirko  */
407ea6eb3fSJiri Pirko void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
417ea6eb3fSJiri Pirko 				  void *data, void (*destructor)(void const *),
427ea6eb3fSJiri Pirko 				  struct switchdev_trans_item *tritem)
437ea6eb3fSJiri Pirko {
447ea6eb3fSJiri Pirko 	tritem->data = data;
457ea6eb3fSJiri Pirko 	tritem->destructor = destructor;
467ea6eb3fSJiri Pirko 	list_add_tail(&tritem->list, &trans->item_list);
477ea6eb3fSJiri Pirko }
487ea6eb3fSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
497ea6eb3fSJiri Pirko 
507ea6eb3fSJiri Pirko static struct switchdev_trans_item *
517ea6eb3fSJiri Pirko __switchdev_trans_item_dequeue(struct switchdev_trans *trans)
527ea6eb3fSJiri Pirko {
537ea6eb3fSJiri Pirko 	struct switchdev_trans_item *tritem;
547ea6eb3fSJiri Pirko 
557ea6eb3fSJiri Pirko 	if (list_empty(&trans->item_list))
567ea6eb3fSJiri Pirko 		return NULL;
577ea6eb3fSJiri Pirko 	tritem = list_first_entry(&trans->item_list,
587ea6eb3fSJiri Pirko 				  struct switchdev_trans_item, list);
597ea6eb3fSJiri Pirko 	list_del(&tritem->list);
607ea6eb3fSJiri Pirko 	return tritem;
617ea6eb3fSJiri Pirko }
627ea6eb3fSJiri Pirko 
637ea6eb3fSJiri Pirko /**
647ea6eb3fSJiri Pirko  *	switchdev_trans_item_dequeue - Dequeue data item from transaction queue
657ea6eb3fSJiri Pirko  *
667ea6eb3fSJiri Pirko  *	@trans: transaction
677ea6eb3fSJiri Pirko  */
687ea6eb3fSJiri Pirko void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
697ea6eb3fSJiri Pirko {
707ea6eb3fSJiri Pirko 	struct switchdev_trans_item *tritem;
717ea6eb3fSJiri Pirko 
727ea6eb3fSJiri Pirko 	tritem = __switchdev_trans_item_dequeue(trans);
737ea6eb3fSJiri Pirko 	BUG_ON(!tritem);
747ea6eb3fSJiri Pirko 	return tritem->data;
757ea6eb3fSJiri Pirko }
767ea6eb3fSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
777ea6eb3fSJiri Pirko 
787ea6eb3fSJiri Pirko static void switchdev_trans_init(struct switchdev_trans *trans)
797ea6eb3fSJiri Pirko {
807ea6eb3fSJiri Pirko 	INIT_LIST_HEAD(&trans->item_list);
817ea6eb3fSJiri Pirko }
827ea6eb3fSJiri Pirko 
837ea6eb3fSJiri Pirko static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
847ea6eb3fSJiri Pirko {
857ea6eb3fSJiri Pirko 	struct switchdev_trans_item *tritem;
867ea6eb3fSJiri Pirko 
877ea6eb3fSJiri Pirko 	while ((tritem = __switchdev_trans_item_dequeue(trans)))
887ea6eb3fSJiri Pirko 		tritem->destructor(tritem->data);
897ea6eb3fSJiri Pirko }
907ea6eb3fSJiri Pirko 
917ea6eb3fSJiri Pirko static void switchdev_trans_items_warn_destroy(struct net_device *dev,
927ea6eb3fSJiri Pirko 					       struct switchdev_trans *trans)
937ea6eb3fSJiri Pirko {
947ea6eb3fSJiri Pirko 	WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
957ea6eb3fSJiri Pirko 	     dev->name);
967ea6eb3fSJiri Pirko 	switchdev_trans_items_destroy(trans);
977ea6eb3fSJiri Pirko }
987ea6eb3fSJiri Pirko 
99793f4014SJiri Pirko static LIST_HEAD(deferred);
100793f4014SJiri Pirko static DEFINE_SPINLOCK(deferred_lock);
101793f4014SJiri Pirko 
102793f4014SJiri Pirko typedef void switchdev_deferred_func_t(struct net_device *dev,
103793f4014SJiri Pirko 				       const void *data);
104793f4014SJiri Pirko 
105793f4014SJiri Pirko struct switchdev_deferred_item {
106793f4014SJiri Pirko 	struct list_head list;
107793f4014SJiri Pirko 	struct net_device *dev;
108793f4014SJiri Pirko 	switchdev_deferred_func_t *func;
109793f4014SJiri Pirko 	unsigned long data[0];
110793f4014SJiri Pirko };
111793f4014SJiri Pirko 
112793f4014SJiri Pirko static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
113793f4014SJiri Pirko {
114793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
115793f4014SJiri Pirko 
116793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
117793f4014SJiri Pirko 	if (list_empty(&deferred)) {
118793f4014SJiri Pirko 		dfitem = NULL;
119793f4014SJiri Pirko 		goto unlock;
120793f4014SJiri Pirko 	}
121793f4014SJiri Pirko 	dfitem = list_first_entry(&deferred,
122793f4014SJiri Pirko 				  struct switchdev_deferred_item, list);
123793f4014SJiri Pirko 	list_del(&dfitem->list);
124793f4014SJiri Pirko unlock:
125793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
126793f4014SJiri Pirko 	return dfitem;
127793f4014SJiri Pirko }
128793f4014SJiri Pirko 
129793f4014SJiri Pirko /**
130793f4014SJiri Pirko  *	switchdev_deferred_process - Process ops in deferred queue
131793f4014SJiri Pirko  *
132793f4014SJiri Pirko  *	Called to flush the ops currently queued in deferred ops queue.
133793f4014SJiri Pirko  *	rtnl_lock must be held.
134793f4014SJiri Pirko  */
135793f4014SJiri Pirko void switchdev_deferred_process(void)
136793f4014SJiri Pirko {
137793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
138793f4014SJiri Pirko 
139793f4014SJiri Pirko 	ASSERT_RTNL();
140793f4014SJiri Pirko 
141793f4014SJiri Pirko 	while ((dfitem = switchdev_deferred_dequeue())) {
142793f4014SJiri Pirko 		dfitem->func(dfitem->dev, dfitem->data);
143793f4014SJiri Pirko 		dev_put(dfitem->dev);
144793f4014SJiri Pirko 		kfree(dfitem);
145793f4014SJiri Pirko 	}
146793f4014SJiri Pirko }
147793f4014SJiri Pirko EXPORT_SYMBOL_GPL(switchdev_deferred_process);
148793f4014SJiri Pirko 
149793f4014SJiri Pirko static void switchdev_deferred_process_work(struct work_struct *work)
150793f4014SJiri Pirko {
151793f4014SJiri Pirko 	rtnl_lock();
152793f4014SJiri Pirko 	switchdev_deferred_process();
153793f4014SJiri Pirko 	rtnl_unlock();
154793f4014SJiri Pirko }
155793f4014SJiri Pirko 
156793f4014SJiri Pirko static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
157793f4014SJiri Pirko 
158793f4014SJiri Pirko static int switchdev_deferred_enqueue(struct net_device *dev,
159793f4014SJiri Pirko 				      const void *data, size_t data_len,
160793f4014SJiri Pirko 				      switchdev_deferred_func_t *func)
161793f4014SJiri Pirko {
162793f4014SJiri Pirko 	struct switchdev_deferred_item *dfitem;
163793f4014SJiri Pirko 
164793f4014SJiri Pirko 	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
165793f4014SJiri Pirko 	if (!dfitem)
166793f4014SJiri Pirko 		return -ENOMEM;
167793f4014SJiri Pirko 	dfitem->dev = dev;
168793f4014SJiri Pirko 	dfitem->func = func;
169793f4014SJiri Pirko 	memcpy(dfitem->data, data, data_len);
170793f4014SJiri Pirko 	dev_hold(dev);
171793f4014SJiri Pirko 	spin_lock_bh(&deferred_lock);
172793f4014SJiri Pirko 	list_add_tail(&dfitem->list, &deferred);
173793f4014SJiri Pirko 	spin_unlock_bh(&deferred_lock);
174793f4014SJiri Pirko 	schedule_work(&deferred_process_work);
175793f4014SJiri Pirko 	return 0;
176793f4014SJiri Pirko }
177793f4014SJiri Pirko 
1787ea6eb3fSJiri Pirko /**
1793094333dSScott Feldman  *	switchdev_port_attr_get - Get port attribute
1803094333dSScott Feldman  *
1813094333dSScott Feldman  *	@dev: port device
1823094333dSScott Feldman  *	@attr: attribute to get
1833094333dSScott Feldman  */
1843094333dSScott Feldman int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
1853094333dSScott Feldman {
1863094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
1873094333dSScott Feldman 	struct net_device *lower_dev;
1883094333dSScott Feldman 	struct list_head *iter;
1893094333dSScott Feldman 	struct switchdev_attr first = {
1901f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_UNDEFINED
1913094333dSScott Feldman 	};
1923094333dSScott Feldman 	int err = -EOPNOTSUPP;
1933094333dSScott Feldman 
1943094333dSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
1953094333dSScott Feldman 		return ops->switchdev_port_attr_get(dev, attr);
1963094333dSScott Feldman 
1973094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
1983094333dSScott Feldman 		return err;
1993094333dSScott Feldman 
2003094333dSScott Feldman 	/* Switch device port(s) may be stacked under
2013094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to get attr on
2023094333dSScott Feldman 	 * each port.  Return -ENODATA if attr values don't
2033094333dSScott Feldman 	 * compare across ports.
2043094333dSScott Feldman 	 */
2053094333dSScott Feldman 
2063094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
2073094333dSScott Feldman 		err = switchdev_port_attr_get(lower_dev, attr);
2083094333dSScott Feldman 		if (err)
2093094333dSScott Feldman 			break;
2101f868398SJiri Pirko 		if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
2113094333dSScott Feldman 			first = *attr;
2123094333dSScott Feldman 		else if (memcmp(&first, attr, sizeof(*attr)))
2133094333dSScott Feldman 			return -ENODATA;
2143094333dSScott Feldman 	}
2153094333dSScott Feldman 
2163094333dSScott Feldman 	return err;
2173094333dSScott Feldman }
2183094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
2193094333dSScott Feldman 
2203094333dSScott Feldman static int __switchdev_port_attr_set(struct net_device *dev,
221f7fadf30SJiri Pirko 				     const struct switchdev_attr *attr,
2227ea6eb3fSJiri Pirko 				     struct switchdev_trans *trans)
2233094333dSScott Feldman {
2243094333dSScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
2253094333dSScott Feldman 	struct net_device *lower_dev;
2263094333dSScott Feldman 	struct list_head *iter;
2273094333dSScott Feldman 	int err = -EOPNOTSUPP;
2283094333dSScott Feldman 
2290c63d80cSJiri Pirko 	if (ops && ops->switchdev_port_attr_set) {
2300c63d80cSJiri Pirko 		err = ops->switchdev_port_attr_set(dev, attr, trans);
2310c63d80cSJiri Pirko 		goto done;
2320c63d80cSJiri Pirko 	}
2333094333dSScott Feldman 
2343094333dSScott Feldman 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
235464314eaSScott Feldman 		goto done;
2363094333dSScott Feldman 
2373094333dSScott Feldman 	/* Switch device port(s) may be stacked under
2383094333dSScott Feldman 	 * bond/team/vlan dev, so recurse down to set attr on
2393094333dSScott Feldman 	 * each port.
2403094333dSScott Feldman 	 */
2413094333dSScott Feldman 
2423094333dSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
2437ea6eb3fSJiri Pirko 		err = __switchdev_port_attr_set(lower_dev, attr, trans);
2443094333dSScott Feldman 		if (err)
2453094333dSScott Feldman 			break;
2463094333dSScott Feldman 	}
2473094333dSScott Feldman 
248464314eaSScott Feldman done:
249464314eaSScott Feldman 	if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
250464314eaSScott Feldman 		err = 0;
251464314eaSScott Feldman 
2523094333dSScott Feldman 	return err;
2533094333dSScott Feldman }
2543094333dSScott Feldman 
2550bc05d58SJiri Pirko static int switchdev_port_attr_set_now(struct net_device *dev,
256f7fadf30SJiri Pirko 				       const struct switchdev_attr *attr)
2573094333dSScott Feldman {
2587ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
2593094333dSScott Feldman 	int err;
2603094333dSScott Feldman 
2617ea6eb3fSJiri Pirko 	switchdev_trans_init(&trans);
2627ea6eb3fSJiri Pirko 
2633094333dSScott Feldman 	/* Phase I: prepare for attr set. Driver/device should fail
2643094333dSScott Feldman 	 * here if there are going to be issues in the commit phase,
2653094333dSScott Feldman 	 * such as lack of resources or support.  The driver/device
2663094333dSScott Feldman 	 * should reserve resources needed for the commit phase here,
2673094333dSScott Feldman 	 * but should not commit the attr.
2683094333dSScott Feldman 	 */
2693094333dSScott Feldman 
270f623ab7fSJiri Pirko 	trans.ph_prepare = true;
2717ea6eb3fSJiri Pirko 	err = __switchdev_port_attr_set(dev, attr, &trans);
2723094333dSScott Feldman 	if (err) {
2733094333dSScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
2743094333dSScott Feldman 		 * resources reserved in the prepare phase are
2753094333dSScott Feldman 		 * released.
2763094333dSScott Feldman 		 */
2773094333dSScott Feldman 
2789f6467cfSJiri Pirko 		if (err != -EOPNOTSUPP)
2797ea6eb3fSJiri Pirko 			switchdev_trans_items_destroy(&trans);
2803094333dSScott Feldman 
2813094333dSScott Feldman 		return err;
2823094333dSScott Feldman 	}
2833094333dSScott Feldman 
2843094333dSScott Feldman 	/* Phase II: commit attr set.  This cannot fail as a fault
2853094333dSScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
2863094333dSScott Feldman 	 * because the driver said everythings was OK in phase I.
2873094333dSScott Feldman 	 */
2883094333dSScott Feldman 
289f623ab7fSJiri Pirko 	trans.ph_prepare = false;
2907ea6eb3fSJiri Pirko 	err = __switchdev_port_attr_set(dev, attr, &trans);
291e9fdaec0SScott Feldman 	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
292e9fdaec0SScott Feldman 	     dev->name, attr->id);
2937ea6eb3fSJiri Pirko 	switchdev_trans_items_warn_destroy(dev, &trans);
2943094333dSScott Feldman 
2953094333dSScott Feldman 	return err;
2963094333dSScott Feldman }
2970bc05d58SJiri Pirko 
2980bc05d58SJiri Pirko static void switchdev_port_attr_set_deferred(struct net_device *dev,
2990bc05d58SJiri Pirko 					     const void *data)
3000bc05d58SJiri Pirko {
3010bc05d58SJiri Pirko 	const struct switchdev_attr *attr = data;
3020bc05d58SJiri Pirko 	int err;
3030bc05d58SJiri Pirko 
3040bc05d58SJiri Pirko 	err = switchdev_port_attr_set_now(dev, attr);
3050bc05d58SJiri Pirko 	if (err && err != -EOPNOTSUPP)
3060bc05d58SJiri Pirko 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
3070bc05d58SJiri Pirko 			   err, attr->id);
3087ceb2afbSElad Raz 	if (attr->complete)
3097ceb2afbSElad Raz 		attr->complete(dev, err, attr->complete_priv);
3100bc05d58SJiri Pirko }
3110bc05d58SJiri Pirko 
3120bc05d58SJiri Pirko static int switchdev_port_attr_set_defer(struct net_device *dev,
3130bc05d58SJiri Pirko 					 const struct switchdev_attr *attr)
3140bc05d58SJiri Pirko {
3150bc05d58SJiri Pirko 	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
3160bc05d58SJiri Pirko 					  switchdev_port_attr_set_deferred);
3170bc05d58SJiri Pirko }
3180bc05d58SJiri Pirko 
3190bc05d58SJiri Pirko /**
3200bc05d58SJiri Pirko  *	switchdev_port_attr_set - Set port attribute
3210bc05d58SJiri Pirko  *
3220bc05d58SJiri Pirko  *	@dev: port device
3230bc05d58SJiri Pirko  *	@attr: attribute to set
3240bc05d58SJiri Pirko  *
3250bc05d58SJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
3260bc05d58SJiri Pirko  *	system is not left in a partially updated state due to
3270bc05d58SJiri Pirko  *	failure from driver/device.
3280bc05d58SJiri Pirko  *
3290bc05d58SJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
3300bc05d58SJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
3310bc05d58SJiri Pirko  */
3320bc05d58SJiri Pirko int switchdev_port_attr_set(struct net_device *dev,
3330bc05d58SJiri Pirko 			    const struct switchdev_attr *attr)
3340bc05d58SJiri Pirko {
3350bc05d58SJiri Pirko 	if (attr->flags & SWITCHDEV_F_DEFER)
3360bc05d58SJiri Pirko 		return switchdev_port_attr_set_defer(dev, attr);
3370bc05d58SJiri Pirko 	ASSERT_RTNL();
3380bc05d58SJiri Pirko 	return switchdev_port_attr_set_now(dev, attr);
3390bc05d58SJiri Pirko }
3403094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
3413094333dSScott Feldman 
342e258d919SScott Feldman static size_t switchdev_obj_size(const struct switchdev_obj *obj)
343e258d919SScott Feldman {
344e258d919SScott Feldman 	switch (obj->id) {
345e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
346e258d919SScott Feldman 		return sizeof(struct switchdev_obj_port_vlan);
347e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_IPV4_FIB:
348e258d919SScott Feldman 		return sizeof(struct switchdev_obj_ipv4_fib);
349e258d919SScott Feldman 	case SWITCHDEV_OBJ_ID_PORT_FDB:
350e258d919SScott Feldman 		return sizeof(struct switchdev_obj_port_fdb);
3514d41e125SElad Raz 	case SWITCHDEV_OBJ_ID_PORT_MDB:
3524d41e125SElad Raz 		return sizeof(struct switchdev_obj_port_mdb);
353e258d919SScott Feldman 	default:
354e258d919SScott Feldman 		BUG();
355e258d919SScott Feldman 	}
356e258d919SScott Feldman 	return 0;
357e258d919SScott Feldman }
358e258d919SScott Feldman 
35922c1f67eSScott Feldman static int __switchdev_port_obj_add(struct net_device *dev,
360648b4a99SJiri Pirko 				    const struct switchdev_obj *obj,
3617ea6eb3fSJiri Pirko 				    struct switchdev_trans *trans)
362491d0f15SScott Feldman {
363491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
364491d0f15SScott Feldman 	struct net_device *lower_dev;
365491d0f15SScott Feldman 	struct list_head *iter;
366491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
367491d0f15SScott Feldman 
368491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_add)
3699e8f4a54SJiri Pirko 		return ops->switchdev_port_obj_add(dev, obj, trans);
370491d0f15SScott Feldman 
371491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
372491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to add object on
373491d0f15SScott Feldman 	 * each port.
374491d0f15SScott Feldman 	 */
375491d0f15SScott Feldman 
376491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
3779e8f4a54SJiri Pirko 		err = __switchdev_port_obj_add(lower_dev, obj, trans);
378491d0f15SScott Feldman 		if (err)
379491d0f15SScott Feldman 			break;
380491d0f15SScott Feldman 	}
381491d0f15SScott Feldman 
382491d0f15SScott Feldman 	return err;
383491d0f15SScott Feldman }
384491d0f15SScott Feldman 
3854d429c5dSJiri Pirko static int switchdev_port_obj_add_now(struct net_device *dev,
386648b4a99SJiri Pirko 				      const struct switchdev_obj *obj)
387491d0f15SScott Feldman {
3887ea6eb3fSJiri Pirko 	struct switchdev_trans trans;
389491d0f15SScott Feldman 	int err;
390491d0f15SScott Feldman 
391491d0f15SScott Feldman 	ASSERT_RTNL();
392491d0f15SScott Feldman 
3937ea6eb3fSJiri Pirko 	switchdev_trans_init(&trans);
3947ea6eb3fSJiri Pirko 
395491d0f15SScott Feldman 	/* Phase I: prepare for obj add. Driver/device should fail
396491d0f15SScott Feldman 	 * here if there are going to be issues in the commit phase,
397491d0f15SScott Feldman 	 * such as lack of resources or support.  The driver/device
398491d0f15SScott Feldman 	 * should reserve resources needed for the commit phase here,
399491d0f15SScott Feldman 	 * but should not commit the obj.
400491d0f15SScott Feldman 	 */
401491d0f15SScott Feldman 
402f623ab7fSJiri Pirko 	trans.ph_prepare = true;
4039e8f4a54SJiri Pirko 	err = __switchdev_port_obj_add(dev, obj, &trans);
404491d0f15SScott Feldman 	if (err) {
405491d0f15SScott Feldman 		/* Prepare phase failed: abort the transaction.  Any
406491d0f15SScott Feldman 		 * resources reserved in the prepare phase are
407491d0f15SScott Feldman 		 * released.
408491d0f15SScott Feldman 		 */
409491d0f15SScott Feldman 
4109f6467cfSJiri Pirko 		if (err != -EOPNOTSUPP)
4117ea6eb3fSJiri Pirko 			switchdev_trans_items_destroy(&trans);
412491d0f15SScott Feldman 
413491d0f15SScott Feldman 		return err;
414491d0f15SScott Feldman 	}
415491d0f15SScott Feldman 
416491d0f15SScott Feldman 	/* Phase II: commit obj add.  This cannot fail as a fault
417491d0f15SScott Feldman 	 * of driver/device.  If it does, it's a bug in the driver/device
418491d0f15SScott Feldman 	 * because the driver said everythings was OK in phase I.
419491d0f15SScott Feldman 	 */
420491d0f15SScott Feldman 
421f623ab7fSJiri Pirko 	trans.ph_prepare = false;
4229e8f4a54SJiri Pirko 	err = __switchdev_port_obj_add(dev, obj, &trans);
4239e8f4a54SJiri Pirko 	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
4247ea6eb3fSJiri Pirko 	switchdev_trans_items_warn_destroy(dev, &trans);
425491d0f15SScott Feldman 
426491d0f15SScott Feldman 	return err;
427491d0f15SScott Feldman }
4284d429c5dSJiri Pirko 
4294d429c5dSJiri Pirko static void switchdev_port_obj_add_deferred(struct net_device *dev,
4304d429c5dSJiri Pirko 					    const void *data)
4314d429c5dSJiri Pirko {
4324d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
4334d429c5dSJiri Pirko 	int err;
4344d429c5dSJiri Pirko 
4354d429c5dSJiri Pirko 	err = switchdev_port_obj_add_now(dev, obj);
4364d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
4374d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
4384d429c5dSJiri Pirko 			   err, obj->id);
4397ceb2afbSElad Raz 	if (obj->complete)
4407ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
4414d429c5dSJiri Pirko }
4424d429c5dSJiri Pirko 
4434d429c5dSJiri Pirko static int switchdev_port_obj_add_defer(struct net_device *dev,
4444d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
4454d429c5dSJiri Pirko {
446e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
4474d429c5dSJiri Pirko 					  switchdev_port_obj_add_deferred);
4484d429c5dSJiri Pirko }
449491d0f15SScott Feldman 
450491d0f15SScott Feldman /**
4514d429c5dSJiri Pirko  *	switchdev_port_obj_add - Add port object
452491d0f15SScott Feldman  *
453491d0f15SScott Feldman  *	@dev: port device
454ab069002SVivien Didelot  *	@id: object ID
4554d429c5dSJiri Pirko  *	@obj: object to add
4564d429c5dSJiri Pirko  *
4574d429c5dSJiri Pirko  *	Use a 2-phase prepare-commit transaction model to ensure
4584d429c5dSJiri Pirko  *	system is not left in a partially updated state due to
4594d429c5dSJiri Pirko  *	failure from driver/device.
4604d429c5dSJiri Pirko  *
4614d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
4624d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
463491d0f15SScott Feldman  */
4644d429c5dSJiri Pirko int switchdev_port_obj_add(struct net_device *dev,
4654d429c5dSJiri Pirko 			   const struct switchdev_obj *obj)
4664d429c5dSJiri Pirko {
4674d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
4684d429c5dSJiri Pirko 		return switchdev_port_obj_add_defer(dev, obj);
4694d429c5dSJiri Pirko 	ASSERT_RTNL();
4704d429c5dSJiri Pirko 	return switchdev_port_obj_add_now(dev, obj);
4714d429c5dSJiri Pirko }
4724d429c5dSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
4734d429c5dSJiri Pirko 
4744d429c5dSJiri Pirko static int switchdev_port_obj_del_now(struct net_device *dev,
475648b4a99SJiri Pirko 				      const struct switchdev_obj *obj)
476491d0f15SScott Feldman {
477491d0f15SScott Feldman 	const struct switchdev_ops *ops = dev->switchdev_ops;
478491d0f15SScott Feldman 	struct net_device *lower_dev;
479491d0f15SScott Feldman 	struct list_head *iter;
480491d0f15SScott Feldman 	int err = -EOPNOTSUPP;
481491d0f15SScott Feldman 
482491d0f15SScott Feldman 	if (ops && ops->switchdev_port_obj_del)
4839e8f4a54SJiri Pirko 		return ops->switchdev_port_obj_del(dev, obj);
484491d0f15SScott Feldman 
485491d0f15SScott Feldman 	/* Switch device port(s) may be stacked under
486491d0f15SScott Feldman 	 * bond/team/vlan dev, so recurse down to delete object on
487491d0f15SScott Feldman 	 * each port.
488491d0f15SScott Feldman 	 */
489491d0f15SScott Feldman 
490491d0f15SScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
4914d429c5dSJiri Pirko 		err = switchdev_port_obj_del_now(lower_dev, obj);
492491d0f15SScott Feldman 		if (err)
493491d0f15SScott Feldman 			break;
494491d0f15SScott Feldman 	}
495491d0f15SScott Feldman 
496491d0f15SScott Feldman 	return err;
497491d0f15SScott Feldman }
4984d429c5dSJiri Pirko 
4994d429c5dSJiri Pirko static void switchdev_port_obj_del_deferred(struct net_device *dev,
5004d429c5dSJiri Pirko 					    const void *data)
5014d429c5dSJiri Pirko {
5024d429c5dSJiri Pirko 	const struct switchdev_obj *obj = data;
5034d429c5dSJiri Pirko 	int err;
5044d429c5dSJiri Pirko 
5054d429c5dSJiri Pirko 	err = switchdev_port_obj_del_now(dev, obj);
5064d429c5dSJiri Pirko 	if (err && err != -EOPNOTSUPP)
5074d429c5dSJiri Pirko 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
5084d429c5dSJiri Pirko 			   err, obj->id);
5097ceb2afbSElad Raz 	if (obj->complete)
5107ceb2afbSElad Raz 		obj->complete(dev, err, obj->complete_priv);
5114d429c5dSJiri Pirko }
5124d429c5dSJiri Pirko 
5134d429c5dSJiri Pirko static int switchdev_port_obj_del_defer(struct net_device *dev,
5144d429c5dSJiri Pirko 					const struct switchdev_obj *obj)
5154d429c5dSJiri Pirko {
516e258d919SScott Feldman 	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
5174d429c5dSJiri Pirko 					  switchdev_port_obj_del_deferred);
5184d429c5dSJiri Pirko }
5194d429c5dSJiri Pirko 
5204d429c5dSJiri Pirko /**
5214d429c5dSJiri Pirko  *	switchdev_port_obj_del - Delete port object
5224d429c5dSJiri Pirko  *
5234d429c5dSJiri Pirko  *	@dev: port device
5244d429c5dSJiri Pirko  *	@id: object ID
5254d429c5dSJiri Pirko  *	@obj: object to delete
5264d429c5dSJiri Pirko  *
5274d429c5dSJiri Pirko  *	rtnl_lock must be held and must not be in atomic section,
5284d429c5dSJiri Pirko  *	in case SWITCHDEV_F_DEFER flag is not set.
5294d429c5dSJiri Pirko  */
5304d429c5dSJiri Pirko int switchdev_port_obj_del(struct net_device *dev,
5314d429c5dSJiri Pirko 			   const struct switchdev_obj *obj)
5324d429c5dSJiri Pirko {
5334d429c5dSJiri Pirko 	if (obj->flags & SWITCHDEV_F_DEFER)
5344d429c5dSJiri Pirko 		return switchdev_port_obj_del_defer(dev, obj);
5354d429c5dSJiri Pirko 	ASSERT_RTNL();
5364d429c5dSJiri Pirko 	return switchdev_port_obj_del_now(dev, obj);
5374d429c5dSJiri Pirko }
538491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
539491d0f15SScott Feldman 
54045d4122cSSamudrala, Sridhar /**
54145d4122cSSamudrala, Sridhar  *	switchdev_port_obj_dump - Dump port objects
54245d4122cSSamudrala, Sridhar  *
54345d4122cSSamudrala, Sridhar  *	@dev: port device
54425f07adcSVivien Didelot  *	@id: object ID
54545d4122cSSamudrala, Sridhar  *	@obj: object to dump
54625f07adcSVivien Didelot  *	@cb: function to call with a filled object
547771acac2SJiri Pirko  *
548771acac2SJiri Pirko  *	rtnl_lock must be held.
54945d4122cSSamudrala, Sridhar  */
5509e8f4a54SJiri Pirko int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
551648b4a99SJiri Pirko 			    switchdev_obj_dump_cb_t *cb)
55245d4122cSSamudrala, Sridhar {
55345d4122cSSamudrala, Sridhar 	const struct switchdev_ops *ops = dev->switchdev_ops;
55445d4122cSSamudrala, Sridhar 	struct net_device *lower_dev;
55545d4122cSSamudrala, Sridhar 	struct list_head *iter;
55645d4122cSSamudrala, Sridhar 	int err = -EOPNOTSUPP;
55745d4122cSSamudrala, Sridhar 
558771acac2SJiri Pirko 	ASSERT_RTNL();
559771acac2SJiri Pirko 
56045d4122cSSamudrala, Sridhar 	if (ops && ops->switchdev_port_obj_dump)
5619e8f4a54SJiri Pirko 		return ops->switchdev_port_obj_dump(dev, obj, cb);
56245d4122cSSamudrala, Sridhar 
56345d4122cSSamudrala, Sridhar 	/* Switch device port(s) may be stacked under
56445d4122cSSamudrala, Sridhar 	 * bond/team/vlan dev, so recurse down to dump objects on
56545d4122cSSamudrala, Sridhar 	 * first port at bottom of stack.
56645d4122cSSamudrala, Sridhar 	 */
56745d4122cSSamudrala, Sridhar 
56845d4122cSSamudrala, Sridhar 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
5699e8f4a54SJiri Pirko 		err = switchdev_port_obj_dump(lower_dev, obj, cb);
57045d4122cSSamudrala, Sridhar 		break;
57145d4122cSSamudrala, Sridhar 	}
57245d4122cSSamudrala, Sridhar 
57345d4122cSSamudrala, Sridhar 	return err;
57445d4122cSSamudrala, Sridhar }
57545d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
57645d4122cSSamudrala, Sridhar 
577ebb9a03aSJiri Pirko static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
57803bf0c28SJiri Pirko 
57903bf0c28SJiri Pirko /**
580ebb9a03aSJiri Pirko  *	register_switchdev_notifier - Register notifier
58103bf0c28SJiri Pirko  *	@nb: notifier_block
58203bf0c28SJiri Pirko  *
58303bf0c28SJiri Pirko  *	Register switch device notifier. This should be used by code
58403bf0c28SJiri Pirko  *	which needs to monitor events happening in particular device.
58503bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_register().
58603bf0c28SJiri Pirko  */
587ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb)
58803bf0c28SJiri Pirko {
58903bf0c28SJiri Pirko 	int err;
59003bf0c28SJiri Pirko 
5914f2c6ae5SIdo Schimmel 	rtnl_lock();
592ebb9a03aSJiri Pirko 	err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
5934f2c6ae5SIdo Schimmel 	rtnl_unlock();
59403bf0c28SJiri Pirko 	return err;
59503bf0c28SJiri Pirko }
596ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier);
59703bf0c28SJiri Pirko 
59803bf0c28SJiri Pirko /**
599ebb9a03aSJiri Pirko  *	unregister_switchdev_notifier - Unregister notifier
60003bf0c28SJiri Pirko  *	@nb: notifier_block
60103bf0c28SJiri Pirko  *
60203bf0c28SJiri Pirko  *	Unregister switch device notifier.
60303bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_chain_unregister().
60403bf0c28SJiri Pirko  */
605ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb)
60603bf0c28SJiri Pirko {
60703bf0c28SJiri Pirko 	int err;
60803bf0c28SJiri Pirko 
6094f2c6ae5SIdo Schimmel 	rtnl_lock();
610ebb9a03aSJiri Pirko 	err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
6114f2c6ae5SIdo Schimmel 	rtnl_unlock();
61203bf0c28SJiri Pirko 	return err;
61303bf0c28SJiri Pirko }
614ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
61503bf0c28SJiri Pirko 
61603bf0c28SJiri Pirko /**
617ebb9a03aSJiri Pirko  *	call_switchdev_notifiers - Call notifiers
61803bf0c28SJiri Pirko  *	@val: value passed unmodified to notifier function
61903bf0c28SJiri Pirko  *	@dev: port device
62003bf0c28SJiri Pirko  *	@info: notifier information data
62103bf0c28SJiri Pirko  *
62203bf0c28SJiri Pirko  *	Call all network notifier blocks. This should be called by driver
62303bf0c28SJiri Pirko  *	when it needs to propagate hardware event.
62403bf0c28SJiri Pirko  *	Return values are same as for atomic_notifier_call_chain().
6254f2c6ae5SIdo Schimmel  *	rtnl_lock must be held.
62603bf0c28SJiri Pirko  */
627ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
628ebb9a03aSJiri Pirko 			     struct switchdev_notifier_info *info)
62903bf0c28SJiri Pirko {
63003bf0c28SJiri Pirko 	int err;
63103bf0c28SJiri Pirko 
6324f2c6ae5SIdo Schimmel 	ASSERT_RTNL();
6334f2c6ae5SIdo Schimmel 
63403bf0c28SJiri Pirko 	info->dev = dev;
635ebb9a03aSJiri Pirko 	err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
63603bf0c28SJiri Pirko 	return err;
63703bf0c28SJiri Pirko }
638ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
6398a44dbb2SRoopa Prabhu 
6407d4f8d87SScott Feldman struct switchdev_vlan_dump {
6418f24f309SJiri Pirko 	struct switchdev_obj_port_vlan vlan;
6427d4f8d87SScott Feldman 	struct sk_buff *skb;
6437d4f8d87SScott Feldman 	u32 filter_mask;
6447d4f8d87SScott Feldman 	u16 flags;
6457d4f8d87SScott Feldman 	u16 begin;
6467d4f8d87SScott Feldman 	u16 end;
6477d4f8d87SScott Feldman };
6487d4f8d87SScott Feldman 
649e23b002bSVivien Didelot static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
6507d4f8d87SScott Feldman {
6517d4f8d87SScott Feldman 	struct bridge_vlan_info vinfo;
6527d4f8d87SScott Feldman 
6537d4f8d87SScott Feldman 	vinfo.flags = dump->flags;
6547d4f8d87SScott Feldman 
6557d4f8d87SScott Feldman 	if (dump->begin == 0 && dump->end == 0) {
6567d4f8d87SScott Feldman 		return 0;
6577d4f8d87SScott Feldman 	} else if (dump->begin == dump->end) {
6587d4f8d87SScott Feldman 		vinfo.vid = dump->begin;
6597d4f8d87SScott Feldman 		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
6607d4f8d87SScott Feldman 			    sizeof(vinfo), &vinfo))
6617d4f8d87SScott Feldman 			return -EMSGSIZE;
6627d4f8d87SScott Feldman 	} else {
6637d4f8d87SScott Feldman 		vinfo.vid = dump->begin;
6647d4f8d87SScott Feldman 		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
6657d4f8d87SScott Feldman 		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
6667d4f8d87SScott Feldman 			    sizeof(vinfo), &vinfo))
6677d4f8d87SScott Feldman 			return -EMSGSIZE;
6687d4f8d87SScott Feldman 		vinfo.vid = dump->end;
6697d4f8d87SScott Feldman 		vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
6707d4f8d87SScott Feldman 		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END;
6717d4f8d87SScott Feldman 		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
6727d4f8d87SScott Feldman 			    sizeof(vinfo), &vinfo))
6737d4f8d87SScott Feldman 			return -EMSGSIZE;
6747d4f8d87SScott Feldman 	}
6757d4f8d87SScott Feldman 
6767d4f8d87SScott Feldman 	return 0;
6777d4f8d87SScott Feldman }
6787d4f8d87SScott Feldman 
679648b4a99SJiri Pirko static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
6807d4f8d87SScott Feldman {
681648b4a99SJiri Pirko 	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
6827d4f8d87SScott Feldman 	struct switchdev_vlan_dump *dump =
68325f07adcSVivien Didelot 		container_of(vlan, struct switchdev_vlan_dump, vlan);
6847d4f8d87SScott Feldman 	int err = 0;
6857d4f8d87SScott Feldman 
6867d4f8d87SScott Feldman 	if (vlan->vid_begin > vlan->vid_end)
6877d4f8d87SScott Feldman 		return -EINVAL;
6887d4f8d87SScott Feldman 
6897d4f8d87SScott Feldman 	if (dump->filter_mask & RTEXT_FILTER_BRVLAN) {
6907d4f8d87SScott Feldman 		dump->flags = vlan->flags;
6917d4f8d87SScott Feldman 		for (dump->begin = dump->end = vlan->vid_begin;
6927d4f8d87SScott Feldman 		     dump->begin <= vlan->vid_end;
6937d4f8d87SScott Feldman 		     dump->begin++, dump->end++) {
694e23b002bSVivien Didelot 			err = switchdev_port_vlan_dump_put(dump);
6957d4f8d87SScott Feldman 			if (err)
6967d4f8d87SScott Feldman 				return err;
6977d4f8d87SScott Feldman 		}
6987d4f8d87SScott Feldman 	} else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) {
6997d4f8d87SScott Feldman 		if (dump->begin > vlan->vid_begin &&
7007d4f8d87SScott Feldman 		    dump->begin >= vlan->vid_end) {
7017d4f8d87SScott Feldman 			if ((dump->begin - 1) == vlan->vid_end &&
7027d4f8d87SScott Feldman 			    dump->flags == vlan->flags) {
7037d4f8d87SScott Feldman 				/* prepend */
7047d4f8d87SScott Feldman 				dump->begin = vlan->vid_begin;
7057d4f8d87SScott Feldman 			} else {
706e23b002bSVivien Didelot 				err = switchdev_port_vlan_dump_put(dump);
7077d4f8d87SScott Feldman 				dump->flags = vlan->flags;
7087d4f8d87SScott Feldman 				dump->begin = vlan->vid_begin;
7097d4f8d87SScott Feldman 				dump->end = vlan->vid_end;
7107d4f8d87SScott Feldman 			}
7117d4f8d87SScott Feldman 		} else if (dump->end <= vlan->vid_begin &&
7127d4f8d87SScott Feldman 		           dump->end < vlan->vid_end) {
7137d4f8d87SScott Feldman 			if ((dump->end  + 1) == vlan->vid_begin &&
7147d4f8d87SScott Feldman 			    dump->flags == vlan->flags) {
7157d4f8d87SScott Feldman 				/* append */
7167d4f8d87SScott Feldman 				dump->end = vlan->vid_end;
7177d4f8d87SScott Feldman 			} else {
718e23b002bSVivien Didelot 				err = switchdev_port_vlan_dump_put(dump);
7197d4f8d87SScott Feldman 				dump->flags = vlan->flags;
7207d4f8d87SScott Feldman 				dump->begin = vlan->vid_begin;
7217d4f8d87SScott Feldman 				dump->end = vlan->vid_end;
7227d4f8d87SScott Feldman 			}
7237d4f8d87SScott Feldman 		} else {
7247d4f8d87SScott Feldman 			err = -EINVAL;
7257d4f8d87SScott Feldman 		}
7267d4f8d87SScott Feldman 	}
7277d4f8d87SScott Feldman 
7287d4f8d87SScott Feldman 	return err;
7297d4f8d87SScott Feldman }
7307d4f8d87SScott Feldman 
7317d4f8d87SScott Feldman static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
7327d4f8d87SScott Feldman 				    u32 filter_mask)
7337d4f8d87SScott Feldman {
7347d4f8d87SScott Feldman 	struct switchdev_vlan_dump dump = {
7356ff64f6fSIdo Schimmel 		.vlan.obj.orig_dev = dev,
7369e8f4a54SJiri Pirko 		.vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
7377d4f8d87SScott Feldman 		.skb = skb,
7387d4f8d87SScott Feldman 		.filter_mask = filter_mask,
7397d4f8d87SScott Feldman 	};
7407d4f8d87SScott Feldman 	int err = 0;
7417d4f8d87SScott Feldman 
7427d4f8d87SScott Feldman 	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
7437d4f8d87SScott Feldman 	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
7449e8f4a54SJiri Pirko 		err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
74525f07adcSVivien Didelot 					      switchdev_port_vlan_dump_cb);
7467d4f8d87SScott Feldman 		if (err)
7477d4f8d87SScott Feldman 			goto err_out;
7487d4f8d87SScott Feldman 		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
7497d4f8d87SScott Feldman 			/* last one */
750e23b002bSVivien Didelot 			err = switchdev_port_vlan_dump_put(&dump);
7517d4f8d87SScott Feldman 	}
7527d4f8d87SScott Feldman 
7537d4f8d87SScott Feldman err_out:
7547d4f8d87SScott Feldman 	return err == -EOPNOTSUPP ? 0 : err;
7557d4f8d87SScott Feldman }
7567d4f8d87SScott Feldman 
7578793d0a6SScott Feldman /**
7588793d0a6SScott Feldman  *	switchdev_port_bridge_getlink - Get bridge port attributes
7598793d0a6SScott Feldman  *
7608793d0a6SScott Feldman  *	@dev: port device
7618793d0a6SScott Feldman  *
7628793d0a6SScott Feldman  *	Called for SELF on rtnl_bridge_getlink to get bridge port
7638793d0a6SScott Feldman  *	attributes.
7648793d0a6SScott Feldman  */
7658793d0a6SScott Feldman int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7668793d0a6SScott Feldman 				  struct net_device *dev, u32 filter_mask,
7678793d0a6SScott Feldman 				  int nlflags)
7688793d0a6SScott Feldman {
7698793d0a6SScott Feldman 	struct switchdev_attr attr = {
7706ff64f6fSIdo Schimmel 		.orig_dev = dev,
7711f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
7728793d0a6SScott Feldman 	};
7738793d0a6SScott Feldman 	u16 mode = BRIDGE_MODE_UNDEF;
774741af005SIdo Schimmel 	u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
7758793d0a6SScott Feldman 	int err;
7768793d0a6SScott Feldman 
7778793d0a6SScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
7785c8079d0SVivien Didelot 	if (err && err != -EOPNOTSUPP)
7798793d0a6SScott Feldman 		return err;
7808793d0a6SScott Feldman 
7818793d0a6SScott Feldman 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
7827d4f8d87SScott Feldman 				       attr.u.brport_flags, mask, nlflags,
7837d4f8d87SScott Feldman 				       filter_mask, switchdev_port_vlan_fill);
7848793d0a6SScott Feldman }
7858793d0a6SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
7868793d0a6SScott Feldman 
78747f8328bSScott Feldman static int switchdev_port_br_setflag(struct net_device *dev,
78847f8328bSScott Feldman 				     struct nlattr *nlattr,
78947f8328bSScott Feldman 				     unsigned long brport_flag)
79047f8328bSScott Feldman {
79147f8328bSScott Feldman 	struct switchdev_attr attr = {
7926ff64f6fSIdo Schimmel 		.orig_dev = dev,
7931f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
79447f8328bSScott Feldman 	};
79547f8328bSScott Feldman 	u8 flag = nla_get_u8(nlattr);
79647f8328bSScott Feldman 	int err;
79747f8328bSScott Feldman 
79847f8328bSScott Feldman 	err = switchdev_port_attr_get(dev, &attr);
79947f8328bSScott Feldman 	if (err)
80047f8328bSScott Feldman 		return err;
80147f8328bSScott Feldman 
80247f8328bSScott Feldman 	if (flag)
80342275bd8SScott Feldman 		attr.u.brport_flags |= brport_flag;
80447f8328bSScott Feldman 	else
80542275bd8SScott Feldman 		attr.u.brport_flags &= ~brport_flag;
80647f8328bSScott Feldman 
80747f8328bSScott Feldman 	return switchdev_port_attr_set(dev, &attr);
80847f8328bSScott Feldman }
80947f8328bSScott Feldman 
81047f8328bSScott Feldman static const struct nla_policy
81147f8328bSScott Feldman switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
81247f8328bSScott Feldman 	[IFLA_BRPORT_STATE]		= { .type = NLA_U8 },
81347f8328bSScott Feldman 	[IFLA_BRPORT_COST]		= { .type = NLA_U32 },
81447f8328bSScott Feldman 	[IFLA_BRPORT_PRIORITY]		= { .type = NLA_U16 },
81547f8328bSScott Feldman 	[IFLA_BRPORT_MODE]		= { .type = NLA_U8 },
81647f8328bSScott Feldman 	[IFLA_BRPORT_GUARD]		= { .type = NLA_U8 },
81747f8328bSScott Feldman 	[IFLA_BRPORT_PROTECT]		= { .type = NLA_U8 },
81847f8328bSScott Feldman 	[IFLA_BRPORT_FAST_LEAVE]	= { .type = NLA_U8 },
81947f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING]		= { .type = NLA_U8 },
82047f8328bSScott Feldman 	[IFLA_BRPORT_LEARNING_SYNC]	= { .type = NLA_U8 },
82147f8328bSScott Feldman 	[IFLA_BRPORT_UNICAST_FLOOD]	= { .type = NLA_U8 },
82247f8328bSScott Feldman };
82347f8328bSScott Feldman 
82447f8328bSScott Feldman static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
82547f8328bSScott Feldman 					      struct nlattr *protinfo)
82647f8328bSScott Feldman {
82747f8328bSScott Feldman 	struct nlattr *attr;
82847f8328bSScott Feldman 	int rem;
82947f8328bSScott Feldman 	int err;
83047f8328bSScott Feldman 
83147f8328bSScott Feldman 	err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
83247f8328bSScott Feldman 				  switchdev_port_bridge_policy);
83347f8328bSScott Feldman 	if (err)
83447f8328bSScott Feldman 		return err;
83547f8328bSScott Feldman 
83647f8328bSScott Feldman 	nla_for_each_nested(attr, protinfo, rem) {
83747f8328bSScott Feldman 		switch (nla_type(attr)) {
83847f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING:
83947f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
84047f8328bSScott Feldman 							BR_LEARNING);
84147f8328bSScott Feldman 			break;
84247f8328bSScott Feldman 		case IFLA_BRPORT_LEARNING_SYNC:
84347f8328bSScott Feldman 			err = switchdev_port_br_setflag(dev, attr,
84447f8328bSScott Feldman 							BR_LEARNING_SYNC);
84547f8328bSScott Feldman 			break;
846741af005SIdo Schimmel 		case IFLA_BRPORT_UNICAST_FLOOD:
847741af005SIdo Schimmel 			err = switchdev_port_br_setflag(dev, attr, BR_FLOOD);
848741af005SIdo Schimmel 			break;
84947f8328bSScott Feldman 		default:
85047f8328bSScott Feldman 			err = -EOPNOTSUPP;
85147f8328bSScott Feldman 			break;
85247f8328bSScott Feldman 		}
85347f8328bSScott Feldman 		if (err)
85447f8328bSScott Feldman 			return err;
85547f8328bSScott Feldman 	}
85647f8328bSScott Feldman 
85747f8328bSScott Feldman 	return 0;
85847f8328bSScott Feldman }
85947f8328bSScott Feldman 
86047f8328bSScott Feldman static int switchdev_port_br_afspec(struct net_device *dev,
86147f8328bSScott Feldman 				    struct nlattr *afspec,
86247f8328bSScott Feldman 				    int (*f)(struct net_device *dev,
863648b4a99SJiri Pirko 					     const struct switchdev_obj *obj))
86447f8328bSScott Feldman {
86547f8328bSScott Feldman 	struct nlattr *attr;
86647f8328bSScott Feldman 	struct bridge_vlan_info *vinfo;
8679e8f4a54SJiri Pirko 	struct switchdev_obj_port_vlan vlan = {
8686ff64f6fSIdo Schimmel 		.obj.orig_dev = dev,
8699e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
8709e8f4a54SJiri Pirko 	};
87147f8328bSScott Feldman 	int rem;
87247f8328bSScott Feldman 	int err;
87347f8328bSScott Feldman 
87447f8328bSScott Feldman 	nla_for_each_nested(attr, afspec, rem) {
87547f8328bSScott Feldman 		if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
87647f8328bSScott Feldman 			continue;
87747f8328bSScott Feldman 		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
87847f8328bSScott Feldman 			return -EINVAL;
87947f8328bSScott Feldman 		vinfo = nla_data(attr);
88087aaf2caSNikolay Aleksandrov 		if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
88187aaf2caSNikolay Aleksandrov 			return -EINVAL;
882ab069002SVivien Didelot 		vlan.flags = vinfo->flags;
88347f8328bSScott Feldman 		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
884ab069002SVivien Didelot 			if (vlan.vid_begin)
88547f8328bSScott Feldman 				return -EINVAL;
886ab069002SVivien Didelot 			vlan.vid_begin = vinfo->vid;
887cc02aa8eSNikolay Aleksandrov 			/* don't allow range of pvids */
888cc02aa8eSNikolay Aleksandrov 			if (vlan.flags & BRIDGE_VLAN_INFO_PVID)
889cc02aa8eSNikolay Aleksandrov 				return -EINVAL;
89047f8328bSScott Feldman 		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
891ab069002SVivien Didelot 			if (!vlan.vid_begin)
89247f8328bSScott Feldman 				return -EINVAL;
893ab069002SVivien Didelot 			vlan.vid_end = vinfo->vid;
894ab069002SVivien Didelot 			if (vlan.vid_end <= vlan.vid_begin)
89547f8328bSScott Feldman 				return -EINVAL;
8969e8f4a54SJiri Pirko 			err = f(dev, &vlan.obj);
89747f8328bSScott Feldman 			if (err)
89847f8328bSScott Feldman 				return err;
8993a7bde55SScott Feldman 			vlan.vid_begin = 0;
90047f8328bSScott Feldman 		} else {
901ab069002SVivien Didelot 			if (vlan.vid_begin)
90247f8328bSScott Feldman 				return -EINVAL;
903ab069002SVivien Didelot 			vlan.vid_begin = vinfo->vid;
904ab069002SVivien Didelot 			vlan.vid_end = vinfo->vid;
9059e8f4a54SJiri Pirko 			err = f(dev, &vlan.obj);
90647f8328bSScott Feldman 			if (err)
90747f8328bSScott Feldman 				return err;
9083a7bde55SScott Feldman 			vlan.vid_begin = 0;
90947f8328bSScott Feldman 		}
91047f8328bSScott Feldman 	}
91147f8328bSScott Feldman 
91247f8328bSScott Feldman 	return 0;
91347f8328bSScott Feldman }
91447f8328bSScott Feldman 
9158a44dbb2SRoopa Prabhu /**
91647f8328bSScott Feldman  *	switchdev_port_bridge_setlink - Set bridge port attributes
9178a44dbb2SRoopa Prabhu  *
9188a44dbb2SRoopa Prabhu  *	@dev: port device
91947f8328bSScott Feldman  *	@nlh: netlink header
92047f8328bSScott Feldman  *	@flags: netlink flags
9218a44dbb2SRoopa Prabhu  *
92247f8328bSScott Feldman  *	Called for SELF on rtnl_bridge_setlink to set bridge port
92347f8328bSScott Feldman  *	attributes.
9248a44dbb2SRoopa Prabhu  */
925ebb9a03aSJiri Pirko int switchdev_port_bridge_setlink(struct net_device *dev,
9268a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
9278a44dbb2SRoopa Prabhu {
92847f8328bSScott Feldman 	struct nlattr *protinfo;
92947f8328bSScott Feldman 	struct nlattr *afspec;
93047f8328bSScott Feldman 	int err = 0;
9318a44dbb2SRoopa Prabhu 
93247f8328bSScott Feldman 	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
93347f8328bSScott Feldman 				   IFLA_PROTINFO);
93447f8328bSScott Feldman 	if (protinfo) {
93547f8328bSScott Feldman 		err = switchdev_port_br_setlink_protinfo(dev, protinfo);
93647f8328bSScott Feldman 		if (err)
93747f8328bSScott Feldman 			return err;
93847f8328bSScott Feldman 	}
9398a44dbb2SRoopa Prabhu 
94047f8328bSScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
94147f8328bSScott Feldman 				 IFLA_AF_SPEC);
94247f8328bSScott Feldman 	if (afspec)
94347f8328bSScott Feldman 		err = switchdev_port_br_afspec(dev, afspec,
94447f8328bSScott Feldman 					       switchdev_port_obj_add);
9458a44dbb2SRoopa Prabhu 
94647f8328bSScott Feldman 	return err;
9478a44dbb2SRoopa Prabhu }
948ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
9498a44dbb2SRoopa Prabhu 
9508a44dbb2SRoopa Prabhu /**
9515c34e022SScott Feldman  *	switchdev_port_bridge_dellink - Set bridge port attributes
9528a44dbb2SRoopa Prabhu  *
9538a44dbb2SRoopa Prabhu  *	@dev: port device
9545c34e022SScott Feldman  *	@nlh: netlink header
9555c34e022SScott Feldman  *	@flags: netlink flags
9568a44dbb2SRoopa Prabhu  *
9575c34e022SScott Feldman  *	Called for SELF on rtnl_bridge_dellink to set bridge port
9585c34e022SScott Feldman  *	attributes.
9598a44dbb2SRoopa Prabhu  */
960ebb9a03aSJiri Pirko int switchdev_port_bridge_dellink(struct net_device *dev,
9618a44dbb2SRoopa Prabhu 				  struct nlmsghdr *nlh, u16 flags)
9628a44dbb2SRoopa Prabhu {
9635c34e022SScott Feldman 	struct nlattr *afspec;
9648a44dbb2SRoopa Prabhu 
9655c34e022SScott Feldman 	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
9665c34e022SScott Feldman 				 IFLA_AF_SPEC);
9675c34e022SScott Feldman 	if (afspec)
9685c34e022SScott Feldman 		return switchdev_port_br_afspec(dev, afspec,
9695c34e022SScott Feldman 						switchdev_port_obj_del);
9705c34e022SScott Feldman 
9718a44dbb2SRoopa Prabhu 	return 0;
9728a44dbb2SRoopa Prabhu }
973ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
9748a44dbb2SRoopa Prabhu 
97545d4122cSSamudrala, Sridhar /**
97645d4122cSSamudrala, Sridhar  *	switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
97745d4122cSSamudrala, Sridhar  *
97845d4122cSSamudrala, Sridhar  *	@ndmsg: netlink hdr
97945d4122cSSamudrala, Sridhar  *	@nlattr: netlink attributes
98045d4122cSSamudrala, Sridhar  *	@dev: port device
98145d4122cSSamudrala, Sridhar  *	@addr: MAC address to add
98245d4122cSSamudrala, Sridhar  *	@vid: VLAN to add
98345d4122cSSamudrala, Sridhar  *
98445d4122cSSamudrala, Sridhar  *	Add FDB entry to switch device.
98545d4122cSSamudrala, Sridhar  */
98645d4122cSSamudrala, Sridhar int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
98745d4122cSSamudrala, Sridhar 			   struct net_device *dev, const unsigned char *addr,
98845d4122cSSamudrala, Sridhar 			   u16 vid, u16 nlm_flags)
98945d4122cSSamudrala, Sridhar {
99052ba57cfSJiri Pirko 	struct switchdev_obj_port_fdb fdb = {
9916ff64f6fSIdo Schimmel 		.obj.orig_dev = dev,
9929e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
99345d4122cSSamudrala, Sridhar 		.vid = vid,
99445d4122cSSamudrala, Sridhar 	};
99545d4122cSSamudrala, Sridhar 
996850d0cbcSJiri Pirko 	ether_addr_copy(fdb.addr, addr);
9979e8f4a54SJiri Pirko 	return switchdev_port_obj_add(dev, &fdb.obj);
99845d4122cSSamudrala, Sridhar }
99945d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
100045d4122cSSamudrala, Sridhar 
100145d4122cSSamudrala, Sridhar /**
100245d4122cSSamudrala, Sridhar  *	switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
100345d4122cSSamudrala, Sridhar  *
100445d4122cSSamudrala, Sridhar  *	@ndmsg: netlink hdr
100545d4122cSSamudrala, Sridhar  *	@nlattr: netlink attributes
100645d4122cSSamudrala, Sridhar  *	@dev: port device
100745d4122cSSamudrala, Sridhar  *	@addr: MAC address to delete
100845d4122cSSamudrala, Sridhar  *	@vid: VLAN to delete
100945d4122cSSamudrala, Sridhar  *
101045d4122cSSamudrala, Sridhar  *	Delete FDB entry from switch device.
101145d4122cSSamudrala, Sridhar  */
101245d4122cSSamudrala, Sridhar int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
101345d4122cSSamudrala, Sridhar 			   struct net_device *dev, const unsigned char *addr,
101445d4122cSSamudrala, Sridhar 			   u16 vid)
101545d4122cSSamudrala, Sridhar {
101652ba57cfSJiri Pirko 	struct switchdev_obj_port_fdb fdb = {
10176ff64f6fSIdo Schimmel 		.obj.orig_dev = dev,
10189e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
101945d4122cSSamudrala, Sridhar 		.vid = vid,
102045d4122cSSamudrala, Sridhar 	};
102145d4122cSSamudrala, Sridhar 
1022850d0cbcSJiri Pirko 	ether_addr_copy(fdb.addr, addr);
10239e8f4a54SJiri Pirko 	return switchdev_port_obj_del(dev, &fdb.obj);
102445d4122cSSamudrala, Sridhar }
102545d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
102645d4122cSSamudrala, Sridhar 
102745d4122cSSamudrala, Sridhar struct switchdev_fdb_dump {
102852ba57cfSJiri Pirko 	struct switchdev_obj_port_fdb fdb;
1029e02a06b2SVivien Didelot 	struct net_device *dev;
103045d4122cSSamudrala, Sridhar 	struct sk_buff *skb;
103145d4122cSSamudrala, Sridhar 	struct netlink_callback *cb;
103245d4122cSSamudrala, Sridhar 	int idx;
103345d4122cSSamudrala, Sridhar };
103445d4122cSSamudrala, Sridhar 
1035648b4a99SJiri Pirko static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
103645d4122cSSamudrala, Sridhar {
1037648b4a99SJiri Pirko 	struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
103845d4122cSSamudrala, Sridhar 	struct switchdev_fdb_dump *dump =
103925f07adcSVivien Didelot 		container_of(fdb, struct switchdev_fdb_dump, fdb);
104045d4122cSSamudrala, Sridhar 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
104145d4122cSSamudrala, Sridhar 	u32 seq = dump->cb->nlh->nlmsg_seq;
104245d4122cSSamudrala, Sridhar 	struct nlmsghdr *nlh;
104345d4122cSSamudrala, Sridhar 	struct ndmsg *ndm;
104445d4122cSSamudrala, Sridhar 
104545d4122cSSamudrala, Sridhar 	if (dump->idx < dump->cb->args[0])
104645d4122cSSamudrala, Sridhar 		goto skip;
104745d4122cSSamudrala, Sridhar 
104845d4122cSSamudrala, Sridhar 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
104945d4122cSSamudrala, Sridhar 			sizeof(*ndm), NLM_F_MULTI);
105045d4122cSSamudrala, Sridhar 	if (!nlh)
105145d4122cSSamudrala, Sridhar 		return -EMSGSIZE;
105245d4122cSSamudrala, Sridhar 
105345d4122cSSamudrala, Sridhar 	ndm = nlmsg_data(nlh);
105445d4122cSSamudrala, Sridhar 	ndm->ndm_family  = AF_BRIDGE;
105545d4122cSSamudrala, Sridhar 	ndm->ndm_pad1    = 0;
105645d4122cSSamudrala, Sridhar 	ndm->ndm_pad2    = 0;
105745d4122cSSamudrala, Sridhar 	ndm->ndm_flags   = NTF_SELF;
105845d4122cSSamudrala, Sridhar 	ndm->ndm_type    = 0;
1059e02a06b2SVivien Didelot 	ndm->ndm_ifindex = dump->dev->ifindex;
106025f07adcSVivien Didelot 	ndm->ndm_state   = fdb->ndm_state;
106145d4122cSSamudrala, Sridhar 
106225f07adcSVivien Didelot 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
106345d4122cSSamudrala, Sridhar 		goto nla_put_failure;
106445d4122cSSamudrala, Sridhar 
106525f07adcSVivien Didelot 	if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
106645d4122cSSamudrala, Sridhar 		goto nla_put_failure;
106745d4122cSSamudrala, Sridhar 
106845d4122cSSamudrala, Sridhar 	nlmsg_end(dump->skb, nlh);
106945d4122cSSamudrala, Sridhar 
107045d4122cSSamudrala, Sridhar skip:
107145d4122cSSamudrala, Sridhar 	dump->idx++;
107245d4122cSSamudrala, Sridhar 	return 0;
107345d4122cSSamudrala, Sridhar 
107445d4122cSSamudrala, Sridhar nla_put_failure:
107545d4122cSSamudrala, Sridhar 	nlmsg_cancel(dump->skb, nlh);
107645d4122cSSamudrala, Sridhar 	return -EMSGSIZE;
107745d4122cSSamudrala, Sridhar }
107845d4122cSSamudrala, Sridhar 
107945d4122cSSamudrala, Sridhar /**
108045d4122cSSamudrala, Sridhar  *	switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
108145d4122cSSamudrala, Sridhar  *
108245d4122cSSamudrala, Sridhar  *	@skb: netlink skb
108345d4122cSSamudrala, Sridhar  *	@cb: netlink callback
108445d4122cSSamudrala, Sridhar  *	@dev: port device
108545d4122cSSamudrala, Sridhar  *	@filter_dev: filter device
108645d4122cSSamudrala, Sridhar  *	@idx:
108745d4122cSSamudrala, Sridhar  *
10883e347660SNicolas Dichtel  *	Dump FDB entries from switch device.
108945d4122cSSamudrala, Sridhar  */
109045d4122cSSamudrala, Sridhar int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
109145d4122cSSamudrala, Sridhar 			    struct net_device *dev,
109245d4122cSSamudrala, Sridhar 			    struct net_device *filter_dev, int idx)
109345d4122cSSamudrala, Sridhar {
109445d4122cSSamudrala, Sridhar 	struct switchdev_fdb_dump dump = {
10956ff64f6fSIdo Schimmel 		.fdb.obj.orig_dev = dev,
10969e8f4a54SJiri Pirko 		.fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
1097e02a06b2SVivien Didelot 		.dev = dev,
109845d4122cSSamudrala, Sridhar 		.skb = skb,
109945d4122cSSamudrala, Sridhar 		.cb = cb,
110045d4122cSSamudrala, Sridhar 		.idx = idx,
110145d4122cSSamudrala, Sridhar 	};
1102472681d5SMINOURA Makoto / 箕浦 真 	int err;
110345d4122cSSamudrala, Sridhar 
1104472681d5SMINOURA Makoto / 箕浦 真 	err = switchdev_port_obj_dump(dev, &dump.fdb.obj,
1105472681d5SMINOURA Makoto / 箕浦 真 				      switchdev_port_fdb_dump_cb);
1106472681d5SMINOURA Makoto / 箕浦 真 	cb->args[1] = err;
110745d4122cSSamudrala, Sridhar 	return dump.idx;
110845d4122cSSamudrala, Sridhar }
110945d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
111045d4122cSSamudrala, Sridhar 
1111ebb9a03aSJiri Pirko static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
1112b5d6fbdeSScott Feldman {
11139d47c0a2SJiri Pirko 	const struct switchdev_ops *ops = dev->switchdev_ops;
1114b5d6fbdeSScott Feldman 	struct net_device *lower_dev;
1115b5d6fbdeSScott Feldman 	struct net_device *port_dev;
1116b5d6fbdeSScott Feldman 	struct list_head *iter;
1117b5d6fbdeSScott Feldman 
1118b5d6fbdeSScott Feldman 	/* Recusively search down until we find a sw port dev.
1119f8e20a9fSScott Feldman 	 * (A sw port dev supports switchdev_port_attr_get).
1120b5d6fbdeSScott Feldman 	 */
1121b5d6fbdeSScott Feldman 
1122f8e20a9fSScott Feldman 	if (ops && ops->switchdev_port_attr_get)
1123b5d6fbdeSScott Feldman 		return dev;
1124b5d6fbdeSScott Feldman 
1125b5d6fbdeSScott Feldman 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
1126ebb9a03aSJiri Pirko 		port_dev = switchdev_get_lowest_dev(lower_dev);
1127b5d6fbdeSScott Feldman 		if (port_dev)
1128b5d6fbdeSScott Feldman 			return port_dev;
1129b5d6fbdeSScott Feldman 	}
1130b5d6fbdeSScott Feldman 
1131b5d6fbdeSScott Feldman 	return NULL;
1132b5d6fbdeSScott Feldman }
1133b5d6fbdeSScott Feldman 
1134ebb9a03aSJiri Pirko static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
1135b5d6fbdeSScott Feldman {
1136f8e20a9fSScott Feldman 	struct switchdev_attr attr = {
11371f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1138f8e20a9fSScott Feldman 	};
1139f8e20a9fSScott Feldman 	struct switchdev_attr prev_attr;
1140b5d6fbdeSScott Feldman 	struct net_device *dev = NULL;
1141b5d6fbdeSScott Feldman 	int nhsel;
1142b5d6fbdeSScott Feldman 
1143771acac2SJiri Pirko 	ASSERT_RTNL();
1144771acac2SJiri Pirko 
1145b5d6fbdeSScott Feldman 	/* For this route, all nexthop devs must be on the same switch. */
1146b5d6fbdeSScott Feldman 
1147b5d6fbdeSScott Feldman 	for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1148b5d6fbdeSScott Feldman 		const struct fib_nh *nh = &fi->fib_nh[nhsel];
1149b5d6fbdeSScott Feldman 
1150b5d6fbdeSScott Feldman 		if (!nh->nh_dev)
1151b5d6fbdeSScott Feldman 			return NULL;
1152b5d6fbdeSScott Feldman 
1153ebb9a03aSJiri Pirko 		dev = switchdev_get_lowest_dev(nh->nh_dev);
1154b5d6fbdeSScott Feldman 		if (!dev)
1155b5d6fbdeSScott Feldman 			return NULL;
1156b5d6fbdeSScott Feldman 
11576ff64f6fSIdo Schimmel 		attr.orig_dev = dev;
1158f8e20a9fSScott Feldman 		if (switchdev_port_attr_get(dev, &attr))
1159b5d6fbdeSScott Feldman 			return NULL;
1160b5d6fbdeSScott Feldman 
1161d754f98bSScott Feldman 		if (nhsel > 0 &&
1162d754f98bSScott Feldman 		    !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
1163b5d6fbdeSScott Feldman 				return NULL;
1164b5d6fbdeSScott Feldman 
1165f8e20a9fSScott Feldman 		prev_attr = attr;
1166b5d6fbdeSScott Feldman 	}
1167b5d6fbdeSScott Feldman 
1168b5d6fbdeSScott Feldman 	return dev;
1169b5d6fbdeSScott Feldman }
1170b5d6fbdeSScott Feldman 
11715e8d9049SScott Feldman /**
11727616dcbbSScott Feldman  *	switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry
11735e8d9049SScott Feldman  *
11745e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
11755e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
11765e8d9049SScott Feldman  *	@fi: route FIB info structure
11775e8d9049SScott Feldman  *	@tos: route TOS
11785e8d9049SScott Feldman  *	@type: route type
1179f8f21471SScott Feldman  *	@nlflags: netlink flags passed in (NLM_F_*)
11805e8d9049SScott Feldman  *	@tb_id: route table ID
11815e8d9049SScott Feldman  *
11827616dcbbSScott Feldman  *	Add/modify switch IPv4 route entry.
11835e8d9049SScott Feldman  */
1184ebb9a03aSJiri Pirko int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
1185f8f21471SScott Feldman 			   u8 tos, u8 type, u32 nlflags, u32 tb_id)
11865e8d9049SScott Feldman {
1187ab069002SVivien Didelot 	struct switchdev_obj_ipv4_fib ipv4_fib = {
11889e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
11897a7ee531SScott Feldman 		.dst = dst,
119058c2cb16SScott Feldman 		.dst_len = dst_len,
1191da4ed551SJiri Pirko 		.fi = fi,
119258c2cb16SScott Feldman 		.tos = tos,
119358c2cb16SScott Feldman 		.type = type,
119458c2cb16SScott Feldman 		.nlflags = nlflags,
119558c2cb16SScott Feldman 		.tb_id = tb_id,
119658c2cb16SScott Feldman 	};
1197b5d6fbdeSScott Feldman 	struct net_device *dev;
1198b5d6fbdeSScott Feldman 	int err = 0;
1199b5d6fbdeSScott Feldman 
12008e05fd71SScott Feldman 	/* Don't offload route if using custom ip rules or if
12018e05fd71SScott Feldman 	 * IPv4 FIB offloading has been disabled completely.
12028e05fd71SScott Feldman 	 */
12038e05fd71SScott Feldman 
1204e1315db1SScott Feldman #ifdef CONFIG_IP_MULTIPLE_TABLES
1205e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_has_custom_rules)
1206e1315db1SScott Feldman 		return 0;
1207e1315db1SScott Feldman #endif
1208e1315db1SScott Feldman 
1209e1315db1SScott Feldman 	if (fi->fib_net->ipv4.fib_offload_disabled)
1210104616e7SScott Feldman 		return 0;
1211104616e7SScott Feldman 
1212ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
1213b5d6fbdeSScott Feldman 	if (!dev)
12145e8d9049SScott Feldman 		return 0;
1215b5d6fbdeSScott Feldman 
12166ff64f6fSIdo Schimmel 	ipv4_fib.obj.orig_dev = dev;
12179e8f4a54SJiri Pirko 	err = switchdev_port_obj_add(dev, &ipv4_fib.obj);
1218b5d6fbdeSScott Feldman 	if (!err)
1219eea39946SRoopa Prabhu 		fi->fib_flags |= RTNH_F_OFFLOAD;
1220b5d6fbdeSScott Feldman 
1221af201f72SScott Feldman 	return err == -EOPNOTSUPP ? 0 : err;
12225e8d9049SScott Feldman }
1223ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
12245e8d9049SScott Feldman 
12255e8d9049SScott Feldman /**
1226ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
12275e8d9049SScott Feldman  *
12285e8d9049SScott Feldman  *	@dst: route's IPv4 destination address
12295e8d9049SScott Feldman  *	@dst_len: destination address length (prefix length)
12305e8d9049SScott Feldman  *	@fi: route FIB info structure
12315e8d9049SScott Feldman  *	@tos: route TOS
12325e8d9049SScott Feldman  *	@type: route type
12335e8d9049SScott Feldman  *	@tb_id: route table ID
12345e8d9049SScott Feldman  *
12355e8d9049SScott Feldman  *	Delete IPv4 route entry from switch device.
12365e8d9049SScott Feldman  */
1237ebb9a03aSJiri Pirko int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
12385e8d9049SScott Feldman 			   u8 tos, u8 type, u32 tb_id)
12395e8d9049SScott Feldman {
1240ab069002SVivien Didelot 	struct switchdev_obj_ipv4_fib ipv4_fib = {
12419e8f4a54SJiri Pirko 		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
12427a7ee531SScott Feldman 		.dst = dst,
124358c2cb16SScott Feldman 		.dst_len = dst_len,
1244da4ed551SJiri Pirko 		.fi = fi,
124558c2cb16SScott Feldman 		.tos = tos,
124658c2cb16SScott Feldman 		.type = type,
124758c2cb16SScott Feldman 		.nlflags = 0,
124858c2cb16SScott Feldman 		.tb_id = tb_id,
124958c2cb16SScott Feldman 	};
1250b5d6fbdeSScott Feldman 	struct net_device *dev;
1251b5d6fbdeSScott Feldman 	int err = 0;
1252b5d6fbdeSScott Feldman 
1253eea39946SRoopa Prabhu 	if (!(fi->fib_flags & RTNH_F_OFFLOAD))
12545e8d9049SScott Feldman 		return 0;
1255b5d6fbdeSScott Feldman 
1256ebb9a03aSJiri Pirko 	dev = switchdev_get_dev_by_nhs(fi);
1257b5d6fbdeSScott Feldman 	if (!dev)
1258b5d6fbdeSScott Feldman 		return 0;
1259b5d6fbdeSScott Feldman 
12606ff64f6fSIdo Schimmel 	ipv4_fib.obj.orig_dev = dev;
12619e8f4a54SJiri Pirko 	err = switchdev_port_obj_del(dev, &ipv4_fib.obj);
1262b5d6fbdeSScott Feldman 	if (!err)
1263eea39946SRoopa Prabhu 		fi->fib_flags &= ~RTNH_F_OFFLOAD;
1264b5d6fbdeSScott Feldman 
1265af201f72SScott Feldman 	return err == -EOPNOTSUPP ? 0 : err;
12665e8d9049SScott Feldman }
1267ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
12688e05fd71SScott Feldman 
12698e05fd71SScott Feldman /**
1270ebb9a03aSJiri Pirko  *	switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
12718e05fd71SScott Feldman  *
12728e05fd71SScott Feldman  *	@fi: route FIB info structure
12738e05fd71SScott Feldman  */
1274ebb9a03aSJiri Pirko void switchdev_fib_ipv4_abort(struct fib_info *fi)
12758e05fd71SScott Feldman {
12768e05fd71SScott Feldman 	/* There was a problem installing this route to the offload
12778e05fd71SScott Feldman 	 * device.  For now, until we come up with more refined
12788e05fd71SScott Feldman 	 * policy handling, abruptly end IPv4 fib offloading for
12798e05fd71SScott Feldman 	 * for entire net by flushing offload device(s) of all
12808e05fd71SScott Feldman 	 * IPv4 routes, and mark IPv4 fib offloading broken from
12818e05fd71SScott Feldman 	 * this point forward.
12828e05fd71SScott Feldman 	 */
12838e05fd71SScott Feldman 
12848e05fd71SScott Feldman 	fib_flush_external(fi->fib_net);
12858e05fd71SScott Feldman 	fi->fib_net->ipv4.fib_offload_disabled = true;
12868e05fd71SScott Feldman }
1287ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
12881a3b2ec9SScott Feldman 
12898438884dSOr Gerlitz bool switchdev_port_same_parent_id(struct net_device *a,
12901a3b2ec9SScott Feldman 				   struct net_device *b)
12911a3b2ec9SScott Feldman {
12921a3b2ec9SScott Feldman 	struct switchdev_attr a_attr = {
12936ff64f6fSIdo Schimmel 		.orig_dev = a,
12941f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
12951a3b2ec9SScott Feldman 		.flags = SWITCHDEV_F_NO_RECURSE,
12961a3b2ec9SScott Feldman 	};
12971a3b2ec9SScott Feldman 	struct switchdev_attr b_attr = {
12986ff64f6fSIdo Schimmel 		.orig_dev = b,
12991f868398SJiri Pirko 		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
13001a3b2ec9SScott Feldman 		.flags = SWITCHDEV_F_NO_RECURSE,
13011a3b2ec9SScott Feldman 	};
13021a3b2ec9SScott Feldman 
13031a3b2ec9SScott Feldman 	if (switchdev_port_attr_get(a, &a_attr) ||
13041a3b2ec9SScott Feldman 	    switchdev_port_attr_get(b, &b_attr))
13051a3b2ec9SScott Feldman 		return false;
13061a3b2ec9SScott Feldman 
13071a3b2ec9SScott Feldman 	return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
13081a3b2ec9SScott Feldman }
1309*2eb03e6cSOr Gerlitz EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
13101a3b2ec9SScott Feldman 
13111a3b2ec9SScott Feldman static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
13121a3b2ec9SScott Feldman 				       struct net_device *group_dev)
13131a3b2ec9SScott Feldman {
13141a3b2ec9SScott Feldman 	struct net_device *lower_dev;
13151a3b2ec9SScott Feldman 	struct list_head *iter;
13161a3b2ec9SScott Feldman 
13171a3b2ec9SScott Feldman 	netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
13181a3b2ec9SScott Feldman 		if (lower_dev == dev)
13191a3b2ec9SScott Feldman 			continue;
13201a3b2ec9SScott Feldman 		if (switchdev_port_same_parent_id(dev, lower_dev))
13211a3b2ec9SScott Feldman 			return lower_dev->offload_fwd_mark;
13221a3b2ec9SScott Feldman 		return switchdev_port_fwd_mark_get(dev, lower_dev);
13231a3b2ec9SScott Feldman 	}
13241a3b2ec9SScott Feldman 
13251a3b2ec9SScott Feldman 	return dev->ifindex;
13261a3b2ec9SScott Feldman }
13271a3b2ec9SScott Feldman 
13281a3b2ec9SScott Feldman static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
13291a3b2ec9SScott Feldman 					  u32 old_mark, u32 *reset_mark)
13301a3b2ec9SScott Feldman {
13311a3b2ec9SScott Feldman 	struct net_device *lower_dev;
13321a3b2ec9SScott Feldman 	struct list_head *iter;
13331a3b2ec9SScott Feldman 
13341a3b2ec9SScott Feldman 	netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
13351a3b2ec9SScott Feldman 		if (lower_dev->offload_fwd_mark == old_mark) {
13361a3b2ec9SScott Feldman 			if (!*reset_mark)
13371a3b2ec9SScott Feldman 				*reset_mark = lower_dev->ifindex;
13381a3b2ec9SScott Feldman 			lower_dev->offload_fwd_mark = *reset_mark;
13391a3b2ec9SScott Feldman 		}
13401a3b2ec9SScott Feldman 		switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
13411a3b2ec9SScott Feldman 	}
13421a3b2ec9SScott Feldman }
13431a3b2ec9SScott Feldman 
13441a3b2ec9SScott Feldman /**
13451a3b2ec9SScott Feldman  *	switchdev_port_fwd_mark_set - Set port offload forwarding mark
13461a3b2ec9SScott Feldman  *
13471a3b2ec9SScott Feldman  *	@dev: port device
13481a3b2ec9SScott Feldman  *	@group_dev: containing device
13491a3b2ec9SScott Feldman  *	@joining: true if dev is joining group; false if leaving group
13501a3b2ec9SScott Feldman  *
13511a3b2ec9SScott Feldman  *	An ungrouped port's offload mark is just its ifindex.  A grouped
13521a3b2ec9SScott Feldman  *	port's (member of a bridge, for example) offload mark is the ifindex
13531a3b2ec9SScott Feldman  *	of one of the ports in the group with the same parent (switch) ID.
13541a3b2ec9SScott Feldman  *	Ports on the same device in the same group will have the same mark.
13551a3b2ec9SScott Feldman  *
13561a3b2ec9SScott Feldman  *	Example:
13571a3b2ec9SScott Feldman  *
13581a3b2ec9SScott Feldman  *		br0		ifindex=9
13591a3b2ec9SScott Feldman  *		  sw1p1		ifindex=2	mark=2
13601a3b2ec9SScott Feldman  *		  sw1p2		ifindex=3	mark=2
13611a3b2ec9SScott Feldman  *		  sw2p1		ifindex=4	mark=5
13621a3b2ec9SScott Feldman  *		  sw2p2		ifindex=5	mark=5
13631a3b2ec9SScott Feldman  *
13641a3b2ec9SScott Feldman  *	If sw2p2 leaves the bridge, we'll have:
13651a3b2ec9SScott Feldman  *
13661a3b2ec9SScott Feldman  *		br0		ifindex=9
13671a3b2ec9SScott Feldman  *		  sw1p1		ifindex=2	mark=2
13681a3b2ec9SScott Feldman  *		  sw1p2		ifindex=3	mark=2
13691a3b2ec9SScott Feldman  *		  sw2p1		ifindex=4	mark=4
13701a3b2ec9SScott Feldman  *		sw2p2		ifindex=5	mark=5
13711a3b2ec9SScott Feldman  */
13721a3b2ec9SScott Feldman void switchdev_port_fwd_mark_set(struct net_device *dev,
13731a3b2ec9SScott Feldman 				 struct net_device *group_dev,
13741a3b2ec9SScott Feldman 				 bool joining)
13751a3b2ec9SScott Feldman {
13761a3b2ec9SScott Feldman 	u32 mark = dev->ifindex;
13771a3b2ec9SScott Feldman 	u32 reset_mark = 0;
13781a3b2ec9SScott Feldman 
1379771acac2SJiri Pirko 	if (group_dev) {
1380771acac2SJiri Pirko 		ASSERT_RTNL();
1381771acac2SJiri Pirko 		if (joining)
13821a3b2ec9SScott Feldman 			mark = switchdev_port_fwd_mark_get(dev, group_dev);
1383771acac2SJiri Pirko 		else if (dev->offload_fwd_mark == mark)
13841a3b2ec9SScott Feldman 			/* Ohoh, this port was the mark reference port,
13851a3b2ec9SScott Feldman 			 * but it's leaving the group, so reset the
13861a3b2ec9SScott Feldman 			 * mark for the remaining ports in the group.
13871a3b2ec9SScott Feldman 			 */
13881a3b2ec9SScott Feldman 			switchdev_port_fwd_mark_reset(group_dev, mark,
13891a3b2ec9SScott Feldman 						      &reset_mark);
13901a3b2ec9SScott Feldman 	}
13911a3b2ec9SScott Feldman 
13921a3b2ec9SScott Feldman 	dev->offload_fwd_mark = mark;
13931a3b2ec9SScott Feldman }
13941a3b2ec9SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);
1395