1007f790cSJiri Pirko /* 2007f790cSJiri Pirko * net/switchdev/switchdev.c - Switch device API 3007f790cSJiri Pirko * Copyright (c) 2014 Jiri Pirko <[email protected]> 4f8f21471SScott Feldman * Copyright (c) 2014-2015 Scott Feldman <[email protected]> 5007f790cSJiri Pirko * 6007f790cSJiri Pirko * This program is free software; you can redistribute it and/or modify 7007f790cSJiri Pirko * it under the terms of the GNU General Public License as published by 8007f790cSJiri Pirko * the Free Software Foundation; either version 2 of the License, or 9007f790cSJiri Pirko * (at your option) any later version. 10007f790cSJiri Pirko */ 11007f790cSJiri Pirko 12007f790cSJiri Pirko #include <linux/kernel.h> 13007f790cSJiri Pirko #include <linux/types.h> 14007f790cSJiri Pirko #include <linux/init.h> 1503bf0c28SJiri Pirko #include <linux/mutex.h> 1603bf0c28SJiri Pirko #include <linux/notifier.h> 17007f790cSJiri Pirko #include <linux/netdevice.h> 1847f8328bSScott Feldman #include <linux/if_bridge.h> 195e8d9049SScott Feldman #include <net/ip_fib.h> 20007f790cSJiri Pirko #include <net/switchdev.h> 21007f790cSJiri Pirko 22007f790cSJiri Pirko /** 233094333dSScott Feldman * switchdev_port_attr_get - Get port attribute 243094333dSScott Feldman * 253094333dSScott Feldman * @dev: port device 263094333dSScott Feldman * @attr: attribute to get 273094333dSScott Feldman */ 283094333dSScott Feldman int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) 293094333dSScott Feldman { 303094333dSScott Feldman const struct switchdev_ops *ops = dev->switchdev_ops; 313094333dSScott Feldman struct net_device *lower_dev; 323094333dSScott Feldman struct list_head *iter; 333094333dSScott Feldman struct switchdev_attr first = { 343094333dSScott Feldman .id = SWITCHDEV_ATTR_UNDEFINED 353094333dSScott Feldman }; 363094333dSScott Feldman int err = -EOPNOTSUPP; 373094333dSScott Feldman 383094333dSScott Feldman if (ops && ops->switchdev_port_attr_get) 393094333dSScott Feldman return ops->switchdev_port_attr_get(dev, attr); 403094333dSScott Feldman 413094333dSScott Feldman if (attr->flags & SWITCHDEV_F_NO_RECURSE) 423094333dSScott Feldman return err; 433094333dSScott Feldman 443094333dSScott Feldman /* Switch device port(s) may be stacked under 453094333dSScott Feldman * bond/team/vlan dev, so recurse down to get attr on 463094333dSScott Feldman * each port. Return -ENODATA if attr values don't 473094333dSScott Feldman * compare across ports. 483094333dSScott Feldman */ 493094333dSScott Feldman 503094333dSScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 513094333dSScott Feldman err = switchdev_port_attr_get(lower_dev, attr); 523094333dSScott Feldman if (err) 533094333dSScott Feldman break; 543094333dSScott Feldman if (first.id == SWITCHDEV_ATTR_UNDEFINED) 553094333dSScott Feldman first = *attr; 563094333dSScott Feldman else if (memcmp(&first, attr, sizeof(*attr))) 573094333dSScott Feldman return -ENODATA; 583094333dSScott Feldman } 593094333dSScott Feldman 603094333dSScott Feldman return err; 613094333dSScott Feldman } 623094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_get); 633094333dSScott Feldman 643094333dSScott Feldman static int __switchdev_port_attr_set(struct net_device *dev, 653094333dSScott Feldman struct switchdev_attr *attr) 663094333dSScott Feldman { 673094333dSScott Feldman const struct switchdev_ops *ops = dev->switchdev_ops; 683094333dSScott Feldman struct net_device *lower_dev; 693094333dSScott Feldman struct list_head *iter; 703094333dSScott Feldman int err = -EOPNOTSUPP; 713094333dSScott Feldman 723094333dSScott Feldman if (ops && ops->switchdev_port_attr_set) 733094333dSScott Feldman return ops->switchdev_port_attr_set(dev, attr); 743094333dSScott Feldman 753094333dSScott Feldman if (attr->flags & SWITCHDEV_F_NO_RECURSE) 763094333dSScott Feldman return err; 773094333dSScott Feldman 783094333dSScott Feldman /* Switch device port(s) may be stacked under 793094333dSScott Feldman * bond/team/vlan dev, so recurse down to set attr on 803094333dSScott Feldman * each port. 813094333dSScott Feldman */ 823094333dSScott Feldman 833094333dSScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 843094333dSScott Feldman err = __switchdev_port_attr_set(lower_dev, attr); 853094333dSScott Feldman if (err) 863094333dSScott Feldman break; 873094333dSScott Feldman } 883094333dSScott Feldman 893094333dSScott Feldman return err; 903094333dSScott Feldman } 913094333dSScott Feldman 923094333dSScott Feldman struct switchdev_attr_set_work { 933094333dSScott Feldman struct work_struct work; 943094333dSScott Feldman struct net_device *dev; 953094333dSScott Feldman struct switchdev_attr attr; 963094333dSScott Feldman }; 973094333dSScott Feldman 983094333dSScott Feldman static void switchdev_port_attr_set_work(struct work_struct *work) 993094333dSScott Feldman { 1003094333dSScott Feldman struct switchdev_attr_set_work *asw = 1013094333dSScott Feldman container_of(work, struct switchdev_attr_set_work, work); 1023094333dSScott Feldman int err; 1033094333dSScott Feldman 1043094333dSScott Feldman rtnl_lock(); 1053094333dSScott Feldman err = switchdev_port_attr_set(asw->dev, &asw->attr); 10657225e77SScott Feldman if (err && err != -EOPNOTSUPP) 10757225e77SScott Feldman netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n", 10857225e77SScott Feldman err, asw->attr.id); 1093094333dSScott Feldman rtnl_unlock(); 1103094333dSScott Feldman 1113094333dSScott Feldman dev_put(asw->dev); 1123094333dSScott Feldman kfree(work); 1133094333dSScott Feldman } 1143094333dSScott Feldman 1153094333dSScott Feldman static int switchdev_port_attr_set_defer(struct net_device *dev, 1163094333dSScott Feldman struct switchdev_attr *attr) 1173094333dSScott Feldman { 1183094333dSScott Feldman struct switchdev_attr_set_work *asw; 1193094333dSScott Feldman 1203094333dSScott Feldman asw = kmalloc(sizeof(*asw), GFP_ATOMIC); 1213094333dSScott Feldman if (!asw) 1223094333dSScott Feldman return -ENOMEM; 1233094333dSScott Feldman 1243094333dSScott Feldman INIT_WORK(&asw->work, switchdev_port_attr_set_work); 1253094333dSScott Feldman 1263094333dSScott Feldman dev_hold(dev); 1273094333dSScott Feldman asw->dev = dev; 1283094333dSScott Feldman memcpy(&asw->attr, attr, sizeof(asw->attr)); 1293094333dSScott Feldman 1303094333dSScott Feldman schedule_work(&asw->work); 1313094333dSScott Feldman 1323094333dSScott Feldman return 0; 1333094333dSScott Feldman } 1343094333dSScott Feldman 1353094333dSScott Feldman /** 1363094333dSScott Feldman * switchdev_port_attr_set - Set port attribute 1373094333dSScott Feldman * 1383094333dSScott Feldman * @dev: port device 1393094333dSScott Feldman * @attr: attribute to set 1403094333dSScott Feldman * 1413094333dSScott Feldman * Use a 2-phase prepare-commit transaction model to ensure 1423094333dSScott Feldman * system is not left in a partially updated state due to 1433094333dSScott Feldman * failure from driver/device. 1443094333dSScott Feldman */ 1453094333dSScott Feldman int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr) 1463094333dSScott Feldman { 1473094333dSScott Feldman int err; 1483094333dSScott Feldman 1493094333dSScott Feldman if (!rtnl_is_locked()) { 1503094333dSScott Feldman /* Running prepare-commit transaction across stacked 1513094333dSScott Feldman * devices requires nothing moves, so if rtnl_lock is 1523094333dSScott Feldman * not held, schedule a worker thread to hold rtnl_lock 1533094333dSScott Feldman * while setting attr. 1543094333dSScott Feldman */ 1553094333dSScott Feldman 1563094333dSScott Feldman return switchdev_port_attr_set_defer(dev, attr); 1573094333dSScott Feldman } 1583094333dSScott Feldman 1593094333dSScott Feldman /* Phase I: prepare for attr set. Driver/device should fail 1603094333dSScott Feldman * here if there are going to be issues in the commit phase, 1613094333dSScott Feldman * such as lack of resources or support. The driver/device 1623094333dSScott Feldman * should reserve resources needed for the commit phase here, 1633094333dSScott Feldman * but should not commit the attr. 1643094333dSScott Feldman */ 1653094333dSScott Feldman 1663094333dSScott Feldman attr->trans = SWITCHDEV_TRANS_PREPARE; 1673094333dSScott Feldman err = __switchdev_port_attr_set(dev, attr); 1683094333dSScott Feldman if (err) { 1693094333dSScott Feldman /* Prepare phase failed: abort the transaction. Any 1703094333dSScott Feldman * resources reserved in the prepare phase are 1713094333dSScott Feldman * released. 1723094333dSScott Feldman */ 1733094333dSScott Feldman 1743094333dSScott Feldman attr->trans = SWITCHDEV_TRANS_ABORT; 1753094333dSScott Feldman __switchdev_port_attr_set(dev, attr); 1763094333dSScott Feldman 1773094333dSScott Feldman return err; 1783094333dSScott Feldman } 1793094333dSScott Feldman 1803094333dSScott Feldman /* Phase II: commit attr set. This cannot fail as a fault 1813094333dSScott Feldman * of driver/device. If it does, it's a bug in the driver/device 1823094333dSScott Feldman * because the driver said everythings was OK in phase I. 1833094333dSScott Feldman */ 1843094333dSScott Feldman 1853094333dSScott Feldman attr->trans = SWITCHDEV_TRANS_COMMIT; 1863094333dSScott Feldman err = __switchdev_port_attr_set(dev, attr); 1873094333dSScott Feldman BUG_ON(err); 1883094333dSScott Feldman 1893094333dSScott Feldman return err; 1903094333dSScott Feldman } 1913094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set); 1923094333dSScott Feldman 19322c1f67eSScott Feldman static int __switchdev_port_obj_add(struct net_device *dev, 19422c1f67eSScott Feldman struct switchdev_obj *obj) 195491d0f15SScott Feldman { 196491d0f15SScott Feldman const struct switchdev_ops *ops = dev->switchdev_ops; 197491d0f15SScott Feldman struct net_device *lower_dev; 198491d0f15SScott Feldman struct list_head *iter; 199491d0f15SScott Feldman int err = -EOPNOTSUPP; 200491d0f15SScott Feldman 201491d0f15SScott Feldman if (ops && ops->switchdev_port_obj_add) 202491d0f15SScott Feldman return ops->switchdev_port_obj_add(dev, obj); 203491d0f15SScott Feldman 204491d0f15SScott Feldman /* Switch device port(s) may be stacked under 205491d0f15SScott Feldman * bond/team/vlan dev, so recurse down to add object on 206491d0f15SScott Feldman * each port. 207491d0f15SScott Feldman */ 208491d0f15SScott Feldman 209491d0f15SScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 210491d0f15SScott Feldman err = __switchdev_port_obj_add(lower_dev, obj); 211491d0f15SScott Feldman if (err) 212491d0f15SScott Feldman break; 213491d0f15SScott Feldman } 214491d0f15SScott Feldman 215491d0f15SScott Feldman return err; 216491d0f15SScott Feldman } 217491d0f15SScott Feldman 218491d0f15SScott Feldman /** 219491d0f15SScott Feldman * switchdev_port_obj_add - Add port object 220491d0f15SScott Feldman * 221491d0f15SScott Feldman * @dev: port device 222491d0f15SScott Feldman * @obj: object to add 223491d0f15SScott Feldman * 224491d0f15SScott Feldman * Use a 2-phase prepare-commit transaction model to ensure 225491d0f15SScott Feldman * system is not left in a partially updated state due to 226491d0f15SScott Feldman * failure from driver/device. 227491d0f15SScott Feldman * 228491d0f15SScott Feldman * rtnl_lock must be held. 229491d0f15SScott Feldman */ 230491d0f15SScott Feldman int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj) 231491d0f15SScott Feldman { 232491d0f15SScott Feldman int err; 233491d0f15SScott Feldman 234491d0f15SScott Feldman ASSERT_RTNL(); 235491d0f15SScott Feldman 236491d0f15SScott Feldman /* Phase I: prepare for obj add. Driver/device should fail 237491d0f15SScott Feldman * here if there are going to be issues in the commit phase, 238491d0f15SScott Feldman * such as lack of resources or support. The driver/device 239491d0f15SScott Feldman * should reserve resources needed for the commit phase here, 240491d0f15SScott Feldman * but should not commit the obj. 241491d0f15SScott Feldman */ 242491d0f15SScott Feldman 243491d0f15SScott Feldman obj->trans = SWITCHDEV_TRANS_PREPARE; 244491d0f15SScott Feldman err = __switchdev_port_obj_add(dev, obj); 245491d0f15SScott Feldman if (err) { 246491d0f15SScott Feldman /* Prepare phase failed: abort the transaction. Any 247491d0f15SScott Feldman * resources reserved in the prepare phase are 248491d0f15SScott Feldman * released. 249491d0f15SScott Feldman */ 250491d0f15SScott Feldman 251491d0f15SScott Feldman obj->trans = SWITCHDEV_TRANS_ABORT; 252491d0f15SScott Feldman __switchdev_port_obj_add(dev, obj); 253491d0f15SScott Feldman 254491d0f15SScott Feldman return err; 255491d0f15SScott Feldman } 256491d0f15SScott Feldman 257491d0f15SScott Feldman /* Phase II: commit obj add. This cannot fail as a fault 258491d0f15SScott Feldman * of driver/device. If it does, it's a bug in the driver/device 259491d0f15SScott Feldman * because the driver said everythings was OK in phase I. 260491d0f15SScott Feldman */ 261491d0f15SScott Feldman 262491d0f15SScott Feldman obj->trans = SWITCHDEV_TRANS_COMMIT; 263491d0f15SScott Feldman err = __switchdev_port_obj_add(dev, obj); 264491d0f15SScott Feldman WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id); 265491d0f15SScott Feldman 266491d0f15SScott Feldman return err; 267491d0f15SScott Feldman } 268491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_add); 269491d0f15SScott Feldman 270491d0f15SScott Feldman /** 271491d0f15SScott Feldman * switchdev_port_obj_del - Delete port object 272491d0f15SScott Feldman * 273491d0f15SScott Feldman * @dev: port device 274491d0f15SScott Feldman * @obj: object to delete 275491d0f15SScott Feldman */ 276491d0f15SScott Feldman int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj) 277491d0f15SScott Feldman { 278491d0f15SScott Feldman const struct switchdev_ops *ops = dev->switchdev_ops; 279491d0f15SScott Feldman struct net_device *lower_dev; 280491d0f15SScott Feldman struct list_head *iter; 281491d0f15SScott Feldman int err = -EOPNOTSUPP; 282491d0f15SScott Feldman 283491d0f15SScott Feldman if (ops && ops->switchdev_port_obj_del) 284491d0f15SScott Feldman return ops->switchdev_port_obj_del(dev, obj); 285491d0f15SScott Feldman 286491d0f15SScott Feldman /* Switch device port(s) may be stacked under 287491d0f15SScott Feldman * bond/team/vlan dev, so recurse down to delete object on 288491d0f15SScott Feldman * each port. 289491d0f15SScott Feldman */ 290491d0f15SScott Feldman 291491d0f15SScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 292491d0f15SScott Feldman err = switchdev_port_obj_del(lower_dev, obj); 293491d0f15SScott Feldman if (err) 294491d0f15SScott Feldman break; 295491d0f15SScott Feldman } 296491d0f15SScott Feldman 297491d0f15SScott Feldman return err; 298491d0f15SScott Feldman } 299491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del); 300491d0f15SScott Feldman 30145d4122cSSamudrala, Sridhar /** 30245d4122cSSamudrala, Sridhar * switchdev_port_obj_dump - Dump port objects 30345d4122cSSamudrala, Sridhar * 30445d4122cSSamudrala, Sridhar * @dev: port device 30545d4122cSSamudrala, Sridhar * @obj: object to dump 30645d4122cSSamudrala, Sridhar */ 30745d4122cSSamudrala, Sridhar int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj) 30845d4122cSSamudrala, Sridhar { 30945d4122cSSamudrala, Sridhar const struct switchdev_ops *ops = dev->switchdev_ops; 31045d4122cSSamudrala, Sridhar struct net_device *lower_dev; 31145d4122cSSamudrala, Sridhar struct list_head *iter; 31245d4122cSSamudrala, Sridhar int err = -EOPNOTSUPP; 31345d4122cSSamudrala, Sridhar 31445d4122cSSamudrala, Sridhar if (ops && ops->switchdev_port_obj_dump) 31545d4122cSSamudrala, Sridhar return ops->switchdev_port_obj_dump(dev, obj); 31645d4122cSSamudrala, Sridhar 31745d4122cSSamudrala, Sridhar /* Switch device port(s) may be stacked under 31845d4122cSSamudrala, Sridhar * bond/team/vlan dev, so recurse down to dump objects on 31945d4122cSSamudrala, Sridhar * first port at bottom of stack. 32045d4122cSSamudrala, Sridhar */ 32145d4122cSSamudrala, Sridhar 32245d4122cSSamudrala, Sridhar netdev_for_each_lower_dev(dev, lower_dev, iter) { 32345d4122cSSamudrala, Sridhar err = switchdev_port_obj_dump(lower_dev, obj); 32445d4122cSSamudrala, Sridhar break; 32545d4122cSSamudrala, Sridhar } 32645d4122cSSamudrala, Sridhar 32745d4122cSSamudrala, Sridhar return err; 32845d4122cSSamudrala, Sridhar } 32945d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_obj_dump); 33045d4122cSSamudrala, Sridhar 331ebb9a03aSJiri Pirko static DEFINE_MUTEX(switchdev_mutex); 332ebb9a03aSJiri Pirko static RAW_NOTIFIER_HEAD(switchdev_notif_chain); 33303bf0c28SJiri Pirko 33403bf0c28SJiri Pirko /** 335ebb9a03aSJiri Pirko * register_switchdev_notifier - Register notifier 33603bf0c28SJiri Pirko * @nb: notifier_block 33703bf0c28SJiri Pirko * 33803bf0c28SJiri Pirko * Register switch device notifier. This should be used by code 33903bf0c28SJiri Pirko * which needs to monitor events happening in particular device. 34003bf0c28SJiri Pirko * Return values are same as for atomic_notifier_chain_register(). 34103bf0c28SJiri Pirko */ 342ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb) 34303bf0c28SJiri Pirko { 34403bf0c28SJiri Pirko int err; 34503bf0c28SJiri Pirko 346ebb9a03aSJiri Pirko mutex_lock(&switchdev_mutex); 347ebb9a03aSJiri Pirko err = raw_notifier_chain_register(&switchdev_notif_chain, nb); 348ebb9a03aSJiri Pirko mutex_unlock(&switchdev_mutex); 34903bf0c28SJiri Pirko return err; 35003bf0c28SJiri Pirko } 351ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier); 35203bf0c28SJiri Pirko 35303bf0c28SJiri Pirko /** 354ebb9a03aSJiri Pirko * unregister_switchdev_notifier - Unregister notifier 35503bf0c28SJiri Pirko * @nb: notifier_block 35603bf0c28SJiri Pirko * 35703bf0c28SJiri Pirko * Unregister switch device notifier. 35803bf0c28SJiri Pirko * Return values are same as for atomic_notifier_chain_unregister(). 35903bf0c28SJiri Pirko */ 360ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb) 36103bf0c28SJiri Pirko { 36203bf0c28SJiri Pirko int err; 36303bf0c28SJiri Pirko 364ebb9a03aSJiri Pirko mutex_lock(&switchdev_mutex); 365ebb9a03aSJiri Pirko err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb); 366ebb9a03aSJiri Pirko mutex_unlock(&switchdev_mutex); 36703bf0c28SJiri Pirko return err; 36803bf0c28SJiri Pirko } 369ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); 37003bf0c28SJiri Pirko 37103bf0c28SJiri Pirko /** 372ebb9a03aSJiri Pirko * call_switchdev_notifiers - Call notifiers 37303bf0c28SJiri Pirko * @val: value passed unmodified to notifier function 37403bf0c28SJiri Pirko * @dev: port device 37503bf0c28SJiri Pirko * @info: notifier information data 37603bf0c28SJiri Pirko * 37703bf0c28SJiri Pirko * Call all network notifier blocks. This should be called by driver 37803bf0c28SJiri Pirko * when it needs to propagate hardware event. 37903bf0c28SJiri Pirko * Return values are same as for atomic_notifier_call_chain(). 38003bf0c28SJiri Pirko */ 381ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 382ebb9a03aSJiri Pirko struct switchdev_notifier_info *info) 38303bf0c28SJiri Pirko { 38403bf0c28SJiri Pirko int err; 38503bf0c28SJiri Pirko 38603bf0c28SJiri Pirko info->dev = dev; 387ebb9a03aSJiri Pirko mutex_lock(&switchdev_mutex); 388ebb9a03aSJiri Pirko err = raw_notifier_call_chain(&switchdev_notif_chain, val, info); 389ebb9a03aSJiri Pirko mutex_unlock(&switchdev_mutex); 39003bf0c28SJiri Pirko return err; 39103bf0c28SJiri Pirko } 392ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers); 3938a44dbb2SRoopa Prabhu 394*7d4f8d87SScott Feldman struct switchdev_vlan_dump { 395*7d4f8d87SScott Feldman struct switchdev_obj obj; 396*7d4f8d87SScott Feldman struct sk_buff *skb; 397*7d4f8d87SScott Feldman u32 filter_mask; 398*7d4f8d87SScott Feldman u16 flags; 399*7d4f8d87SScott Feldman u16 begin; 400*7d4f8d87SScott Feldman u16 end; 401*7d4f8d87SScott Feldman }; 402*7d4f8d87SScott Feldman 403*7d4f8d87SScott Feldman static int switchdev_port_vlan_dump_put(struct net_device *dev, 404*7d4f8d87SScott Feldman struct switchdev_vlan_dump *dump) 405*7d4f8d87SScott Feldman { 406*7d4f8d87SScott Feldman struct bridge_vlan_info vinfo; 407*7d4f8d87SScott Feldman 408*7d4f8d87SScott Feldman vinfo.flags = dump->flags; 409*7d4f8d87SScott Feldman 410*7d4f8d87SScott Feldman if (dump->begin == 0 && dump->end == 0) { 411*7d4f8d87SScott Feldman return 0; 412*7d4f8d87SScott Feldman } else if (dump->begin == dump->end) { 413*7d4f8d87SScott Feldman vinfo.vid = dump->begin; 414*7d4f8d87SScott Feldman if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 415*7d4f8d87SScott Feldman sizeof(vinfo), &vinfo)) 416*7d4f8d87SScott Feldman return -EMSGSIZE; 417*7d4f8d87SScott Feldman } else { 418*7d4f8d87SScott Feldman vinfo.vid = dump->begin; 419*7d4f8d87SScott Feldman vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN; 420*7d4f8d87SScott Feldman if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 421*7d4f8d87SScott Feldman sizeof(vinfo), &vinfo)) 422*7d4f8d87SScott Feldman return -EMSGSIZE; 423*7d4f8d87SScott Feldman vinfo.vid = dump->end; 424*7d4f8d87SScott Feldman vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN; 425*7d4f8d87SScott Feldman vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END; 426*7d4f8d87SScott Feldman if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 427*7d4f8d87SScott Feldman sizeof(vinfo), &vinfo)) 428*7d4f8d87SScott Feldman return -EMSGSIZE; 429*7d4f8d87SScott Feldman } 430*7d4f8d87SScott Feldman 431*7d4f8d87SScott Feldman return 0; 432*7d4f8d87SScott Feldman } 433*7d4f8d87SScott Feldman 434*7d4f8d87SScott Feldman static int switchdev_port_vlan_dump_cb(struct net_device *dev, 435*7d4f8d87SScott Feldman struct switchdev_obj *obj) 436*7d4f8d87SScott Feldman { 437*7d4f8d87SScott Feldman struct switchdev_vlan_dump *dump = 438*7d4f8d87SScott Feldman container_of(obj, struct switchdev_vlan_dump, obj); 439*7d4f8d87SScott Feldman struct switchdev_obj_vlan *vlan = &dump->obj.u.vlan; 440*7d4f8d87SScott Feldman int err = 0; 441*7d4f8d87SScott Feldman 442*7d4f8d87SScott Feldman if (vlan->vid_begin > vlan->vid_end) 443*7d4f8d87SScott Feldman return -EINVAL; 444*7d4f8d87SScott Feldman 445*7d4f8d87SScott Feldman if (dump->filter_mask & RTEXT_FILTER_BRVLAN) { 446*7d4f8d87SScott Feldman dump->flags = vlan->flags; 447*7d4f8d87SScott Feldman for (dump->begin = dump->end = vlan->vid_begin; 448*7d4f8d87SScott Feldman dump->begin <= vlan->vid_end; 449*7d4f8d87SScott Feldman dump->begin++, dump->end++) { 450*7d4f8d87SScott Feldman err = switchdev_port_vlan_dump_put(dev, dump); 451*7d4f8d87SScott Feldman if (err) 452*7d4f8d87SScott Feldman return err; 453*7d4f8d87SScott Feldman } 454*7d4f8d87SScott Feldman } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) { 455*7d4f8d87SScott Feldman if (dump->begin > vlan->vid_begin && 456*7d4f8d87SScott Feldman dump->begin >= vlan->vid_end) { 457*7d4f8d87SScott Feldman if ((dump->begin - 1) == vlan->vid_end && 458*7d4f8d87SScott Feldman dump->flags == vlan->flags) { 459*7d4f8d87SScott Feldman /* prepend */ 460*7d4f8d87SScott Feldman dump->begin = vlan->vid_begin; 461*7d4f8d87SScott Feldman } else { 462*7d4f8d87SScott Feldman err = switchdev_port_vlan_dump_put(dev, dump); 463*7d4f8d87SScott Feldman dump->flags = vlan->flags; 464*7d4f8d87SScott Feldman dump->begin = vlan->vid_begin; 465*7d4f8d87SScott Feldman dump->end = vlan->vid_end; 466*7d4f8d87SScott Feldman } 467*7d4f8d87SScott Feldman } else if (dump->end <= vlan->vid_begin && 468*7d4f8d87SScott Feldman dump->end < vlan->vid_end) { 469*7d4f8d87SScott Feldman if ((dump->end + 1) == vlan->vid_begin && 470*7d4f8d87SScott Feldman dump->flags == vlan->flags) { 471*7d4f8d87SScott Feldman /* append */ 472*7d4f8d87SScott Feldman dump->end = vlan->vid_end; 473*7d4f8d87SScott Feldman } else { 474*7d4f8d87SScott Feldman err = switchdev_port_vlan_dump_put(dev, dump); 475*7d4f8d87SScott Feldman dump->flags = vlan->flags; 476*7d4f8d87SScott Feldman dump->begin = vlan->vid_begin; 477*7d4f8d87SScott Feldman dump->end = vlan->vid_end; 478*7d4f8d87SScott Feldman } 479*7d4f8d87SScott Feldman } else { 480*7d4f8d87SScott Feldman err = -EINVAL; 481*7d4f8d87SScott Feldman } 482*7d4f8d87SScott Feldman } 483*7d4f8d87SScott Feldman 484*7d4f8d87SScott Feldman return err; 485*7d4f8d87SScott Feldman } 486*7d4f8d87SScott Feldman 487*7d4f8d87SScott Feldman static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev, 488*7d4f8d87SScott Feldman u32 filter_mask) 489*7d4f8d87SScott Feldman { 490*7d4f8d87SScott Feldman struct switchdev_vlan_dump dump = { 491*7d4f8d87SScott Feldman .obj = { 492*7d4f8d87SScott Feldman .id = SWITCHDEV_OBJ_PORT_VLAN, 493*7d4f8d87SScott Feldman .cb = switchdev_port_vlan_dump_cb, 494*7d4f8d87SScott Feldman }, 495*7d4f8d87SScott Feldman .skb = skb, 496*7d4f8d87SScott Feldman .filter_mask = filter_mask, 497*7d4f8d87SScott Feldman }; 498*7d4f8d87SScott Feldman int err = 0; 499*7d4f8d87SScott Feldman 500*7d4f8d87SScott Feldman if ((filter_mask & RTEXT_FILTER_BRVLAN) || 501*7d4f8d87SScott Feldman (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 502*7d4f8d87SScott Feldman err = switchdev_port_obj_dump(dev, &dump.obj); 503*7d4f8d87SScott Feldman if (err) 504*7d4f8d87SScott Feldman goto err_out; 505*7d4f8d87SScott Feldman if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 506*7d4f8d87SScott Feldman /* last one */ 507*7d4f8d87SScott Feldman err = switchdev_port_vlan_dump_put(dev, &dump); 508*7d4f8d87SScott Feldman } 509*7d4f8d87SScott Feldman 510*7d4f8d87SScott Feldman err_out: 511*7d4f8d87SScott Feldman return err == -EOPNOTSUPP ? 0 : err; 512*7d4f8d87SScott Feldman } 513*7d4f8d87SScott Feldman 5148793d0a6SScott Feldman /** 5158793d0a6SScott Feldman * switchdev_port_bridge_getlink - Get bridge port attributes 5168793d0a6SScott Feldman * 5178793d0a6SScott Feldman * @dev: port device 5188793d0a6SScott Feldman * 5198793d0a6SScott Feldman * Called for SELF on rtnl_bridge_getlink to get bridge port 5208793d0a6SScott Feldman * attributes. 5218793d0a6SScott Feldman */ 5228793d0a6SScott Feldman int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 5238793d0a6SScott Feldman struct net_device *dev, u32 filter_mask, 5248793d0a6SScott Feldman int nlflags) 5258793d0a6SScott Feldman { 5268793d0a6SScott Feldman struct switchdev_attr attr = { 5278793d0a6SScott Feldman .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS, 5288793d0a6SScott Feldman }; 5298793d0a6SScott Feldman u16 mode = BRIDGE_MODE_UNDEF; 5308793d0a6SScott Feldman u32 mask = BR_LEARNING | BR_LEARNING_SYNC; 5318793d0a6SScott Feldman int err; 5328793d0a6SScott Feldman 5338793d0a6SScott Feldman err = switchdev_port_attr_get(dev, &attr); 5348793d0a6SScott Feldman if (err) 5358793d0a6SScott Feldman return err; 5368793d0a6SScott Feldman 5378793d0a6SScott Feldman return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 538*7d4f8d87SScott Feldman attr.u.brport_flags, mask, nlflags, 539*7d4f8d87SScott Feldman filter_mask, switchdev_port_vlan_fill); 5408793d0a6SScott Feldman } 5418793d0a6SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink); 5428793d0a6SScott Feldman 54347f8328bSScott Feldman static int switchdev_port_br_setflag(struct net_device *dev, 54447f8328bSScott Feldman struct nlattr *nlattr, 54547f8328bSScott Feldman unsigned long brport_flag) 54647f8328bSScott Feldman { 54747f8328bSScott Feldman struct switchdev_attr attr = { 54847f8328bSScott Feldman .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS, 54947f8328bSScott Feldman }; 55047f8328bSScott Feldman u8 flag = nla_get_u8(nlattr); 55147f8328bSScott Feldman int err; 55247f8328bSScott Feldman 55347f8328bSScott Feldman err = switchdev_port_attr_get(dev, &attr); 55447f8328bSScott Feldman if (err) 55547f8328bSScott Feldman return err; 55647f8328bSScott Feldman 55747f8328bSScott Feldman if (flag) 55842275bd8SScott Feldman attr.u.brport_flags |= brport_flag; 55947f8328bSScott Feldman else 56042275bd8SScott Feldman attr.u.brport_flags &= ~brport_flag; 56147f8328bSScott Feldman 56247f8328bSScott Feldman return switchdev_port_attr_set(dev, &attr); 56347f8328bSScott Feldman } 56447f8328bSScott Feldman 56547f8328bSScott Feldman static const struct nla_policy 56647f8328bSScott Feldman switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = { 56747f8328bSScott Feldman [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 56847f8328bSScott Feldman [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 56947f8328bSScott Feldman [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 57047f8328bSScott Feldman [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 57147f8328bSScott Feldman [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 57247f8328bSScott Feldman [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 57347f8328bSScott Feldman [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 }, 57447f8328bSScott Feldman [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 57547f8328bSScott Feldman [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 }, 57647f8328bSScott Feldman [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 57747f8328bSScott Feldman }; 57847f8328bSScott Feldman 57947f8328bSScott Feldman static int switchdev_port_br_setlink_protinfo(struct net_device *dev, 58047f8328bSScott Feldman struct nlattr *protinfo) 58147f8328bSScott Feldman { 58247f8328bSScott Feldman struct nlattr *attr; 58347f8328bSScott Feldman int rem; 58447f8328bSScott Feldman int err; 58547f8328bSScott Feldman 58647f8328bSScott Feldman err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX, 58747f8328bSScott Feldman switchdev_port_bridge_policy); 58847f8328bSScott Feldman if (err) 58947f8328bSScott Feldman return err; 59047f8328bSScott Feldman 59147f8328bSScott Feldman nla_for_each_nested(attr, protinfo, rem) { 59247f8328bSScott Feldman switch (nla_type(attr)) { 59347f8328bSScott Feldman case IFLA_BRPORT_LEARNING: 59447f8328bSScott Feldman err = switchdev_port_br_setflag(dev, attr, 59547f8328bSScott Feldman BR_LEARNING); 59647f8328bSScott Feldman break; 59747f8328bSScott Feldman case IFLA_BRPORT_LEARNING_SYNC: 59847f8328bSScott Feldman err = switchdev_port_br_setflag(dev, attr, 59947f8328bSScott Feldman BR_LEARNING_SYNC); 60047f8328bSScott Feldman break; 60147f8328bSScott Feldman default: 60247f8328bSScott Feldman err = -EOPNOTSUPP; 60347f8328bSScott Feldman break; 60447f8328bSScott Feldman } 60547f8328bSScott Feldman if (err) 60647f8328bSScott Feldman return err; 60747f8328bSScott Feldman } 60847f8328bSScott Feldman 60947f8328bSScott Feldman return 0; 61047f8328bSScott Feldman } 61147f8328bSScott Feldman 61247f8328bSScott Feldman static int switchdev_port_br_afspec(struct net_device *dev, 61347f8328bSScott Feldman struct nlattr *afspec, 61447f8328bSScott Feldman int (*f)(struct net_device *dev, 61547f8328bSScott Feldman struct switchdev_obj *obj)) 61647f8328bSScott Feldman { 61747f8328bSScott Feldman struct nlattr *attr; 61847f8328bSScott Feldman struct bridge_vlan_info *vinfo; 61947f8328bSScott Feldman struct switchdev_obj obj = { 62047f8328bSScott Feldman .id = SWITCHDEV_OBJ_PORT_VLAN, 62147f8328bSScott Feldman }; 62242275bd8SScott Feldman struct switchdev_obj_vlan *vlan = &obj.u.vlan; 62347f8328bSScott Feldman int rem; 62447f8328bSScott Feldman int err; 62547f8328bSScott Feldman 62647f8328bSScott Feldman nla_for_each_nested(attr, afspec, rem) { 62747f8328bSScott Feldman if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) 62847f8328bSScott Feldman continue; 62947f8328bSScott Feldman if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 63047f8328bSScott Feldman return -EINVAL; 63147f8328bSScott Feldman vinfo = nla_data(attr); 63242275bd8SScott Feldman vlan->flags = vinfo->flags; 63347f8328bSScott Feldman if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 6343e3a78b4SScott Feldman if (vlan->vid_begin) 63547f8328bSScott Feldman return -EINVAL; 6363e3a78b4SScott Feldman vlan->vid_begin = vinfo->vid; 63747f8328bSScott Feldman } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) { 6383e3a78b4SScott Feldman if (!vlan->vid_begin) 63947f8328bSScott Feldman return -EINVAL; 64042275bd8SScott Feldman vlan->vid_end = vinfo->vid; 6413e3a78b4SScott Feldman if (vlan->vid_end <= vlan->vid_begin) 64247f8328bSScott Feldman return -EINVAL; 64347f8328bSScott Feldman err = f(dev, &obj); 64447f8328bSScott Feldman if (err) 64547f8328bSScott Feldman return err; 64642275bd8SScott Feldman memset(vlan, 0, sizeof(*vlan)); 64747f8328bSScott Feldman } else { 6483e3a78b4SScott Feldman if (vlan->vid_begin) 64947f8328bSScott Feldman return -EINVAL; 6503e3a78b4SScott Feldman vlan->vid_begin = vinfo->vid; 65142275bd8SScott Feldman vlan->vid_end = vinfo->vid; 65247f8328bSScott Feldman err = f(dev, &obj); 65347f8328bSScott Feldman if (err) 65447f8328bSScott Feldman return err; 65542275bd8SScott Feldman memset(vlan, 0, sizeof(*vlan)); 65647f8328bSScott Feldman } 65747f8328bSScott Feldman } 65847f8328bSScott Feldman 65947f8328bSScott Feldman return 0; 66047f8328bSScott Feldman } 66147f8328bSScott Feldman 6628a44dbb2SRoopa Prabhu /** 66347f8328bSScott Feldman * switchdev_port_bridge_setlink - Set bridge port attributes 6648a44dbb2SRoopa Prabhu * 6658a44dbb2SRoopa Prabhu * @dev: port device 66647f8328bSScott Feldman * @nlh: netlink header 66747f8328bSScott Feldman * @flags: netlink flags 6688a44dbb2SRoopa Prabhu * 66947f8328bSScott Feldman * Called for SELF on rtnl_bridge_setlink to set bridge port 67047f8328bSScott Feldman * attributes. 6718a44dbb2SRoopa Prabhu */ 672ebb9a03aSJiri Pirko int switchdev_port_bridge_setlink(struct net_device *dev, 6738a44dbb2SRoopa Prabhu struct nlmsghdr *nlh, u16 flags) 6748a44dbb2SRoopa Prabhu { 67547f8328bSScott Feldman struct nlattr *protinfo; 67647f8328bSScott Feldman struct nlattr *afspec; 67747f8328bSScott Feldman int err = 0; 6788a44dbb2SRoopa Prabhu 67947f8328bSScott Feldman protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 68047f8328bSScott Feldman IFLA_PROTINFO); 68147f8328bSScott Feldman if (protinfo) { 68247f8328bSScott Feldman err = switchdev_port_br_setlink_protinfo(dev, protinfo); 68347f8328bSScott Feldman if (err) 68447f8328bSScott Feldman return err; 68547f8328bSScott Feldman } 6868a44dbb2SRoopa Prabhu 68747f8328bSScott Feldman afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 68847f8328bSScott Feldman IFLA_AF_SPEC); 68947f8328bSScott Feldman if (afspec) 69047f8328bSScott Feldman err = switchdev_port_br_afspec(dev, afspec, 69147f8328bSScott Feldman switchdev_port_obj_add); 6928a44dbb2SRoopa Prabhu 69347f8328bSScott Feldman return err; 6948a44dbb2SRoopa Prabhu } 695ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink); 6968a44dbb2SRoopa Prabhu 6978a44dbb2SRoopa Prabhu /** 6985c34e022SScott Feldman * switchdev_port_bridge_dellink - Set bridge port attributes 6998a44dbb2SRoopa Prabhu * 7008a44dbb2SRoopa Prabhu * @dev: port device 7015c34e022SScott Feldman * @nlh: netlink header 7025c34e022SScott Feldman * @flags: netlink flags 7038a44dbb2SRoopa Prabhu * 7045c34e022SScott Feldman * Called for SELF on rtnl_bridge_dellink to set bridge port 7055c34e022SScott Feldman * attributes. 7068a44dbb2SRoopa Prabhu */ 707ebb9a03aSJiri Pirko int switchdev_port_bridge_dellink(struct net_device *dev, 7088a44dbb2SRoopa Prabhu struct nlmsghdr *nlh, u16 flags) 7098a44dbb2SRoopa Prabhu { 7105c34e022SScott Feldman struct nlattr *afspec; 7118a44dbb2SRoopa Prabhu 7125c34e022SScott Feldman afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 7135c34e022SScott Feldman IFLA_AF_SPEC); 7145c34e022SScott Feldman if (afspec) 7155c34e022SScott Feldman return switchdev_port_br_afspec(dev, afspec, 7165c34e022SScott Feldman switchdev_port_obj_del); 7175c34e022SScott Feldman 7188a44dbb2SRoopa Prabhu return 0; 7198a44dbb2SRoopa Prabhu } 720ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink); 7218a44dbb2SRoopa Prabhu 72245d4122cSSamudrala, Sridhar /** 72345d4122cSSamudrala, Sridhar * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port 72445d4122cSSamudrala, Sridhar * 72545d4122cSSamudrala, Sridhar * @ndmsg: netlink hdr 72645d4122cSSamudrala, Sridhar * @nlattr: netlink attributes 72745d4122cSSamudrala, Sridhar * @dev: port device 72845d4122cSSamudrala, Sridhar * @addr: MAC address to add 72945d4122cSSamudrala, Sridhar * @vid: VLAN to add 73045d4122cSSamudrala, Sridhar * 73145d4122cSSamudrala, Sridhar * Add FDB entry to switch device. 73245d4122cSSamudrala, Sridhar */ 73345d4122cSSamudrala, Sridhar int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 73445d4122cSSamudrala, Sridhar struct net_device *dev, const unsigned char *addr, 73545d4122cSSamudrala, Sridhar u16 vid, u16 nlm_flags) 73645d4122cSSamudrala, Sridhar { 73745d4122cSSamudrala, Sridhar struct switchdev_obj obj = { 73845d4122cSSamudrala, Sridhar .id = SWITCHDEV_OBJ_PORT_FDB, 73945d4122cSSamudrala, Sridhar .u.fdb = { 74045d4122cSSamudrala, Sridhar .addr = addr, 74145d4122cSSamudrala, Sridhar .vid = vid, 74245d4122cSSamudrala, Sridhar }, 74345d4122cSSamudrala, Sridhar }; 74445d4122cSSamudrala, Sridhar 74545d4122cSSamudrala, Sridhar return switchdev_port_obj_add(dev, &obj); 74645d4122cSSamudrala, Sridhar } 74745d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_add); 74845d4122cSSamudrala, Sridhar 74945d4122cSSamudrala, Sridhar /** 75045d4122cSSamudrala, Sridhar * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port 75145d4122cSSamudrala, Sridhar * 75245d4122cSSamudrala, Sridhar * @ndmsg: netlink hdr 75345d4122cSSamudrala, Sridhar * @nlattr: netlink attributes 75445d4122cSSamudrala, Sridhar * @dev: port device 75545d4122cSSamudrala, Sridhar * @addr: MAC address to delete 75645d4122cSSamudrala, Sridhar * @vid: VLAN to delete 75745d4122cSSamudrala, Sridhar * 75845d4122cSSamudrala, Sridhar * Delete FDB entry from switch device. 75945d4122cSSamudrala, Sridhar */ 76045d4122cSSamudrala, Sridhar int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], 76145d4122cSSamudrala, Sridhar struct net_device *dev, const unsigned char *addr, 76245d4122cSSamudrala, Sridhar u16 vid) 76345d4122cSSamudrala, Sridhar { 76445d4122cSSamudrala, Sridhar struct switchdev_obj obj = { 76545d4122cSSamudrala, Sridhar .id = SWITCHDEV_OBJ_PORT_FDB, 76645d4122cSSamudrala, Sridhar .u.fdb = { 76745d4122cSSamudrala, Sridhar .addr = addr, 76845d4122cSSamudrala, Sridhar .vid = vid, 76945d4122cSSamudrala, Sridhar }, 77045d4122cSSamudrala, Sridhar }; 77145d4122cSSamudrala, Sridhar 77245d4122cSSamudrala, Sridhar return switchdev_port_obj_del(dev, &obj); 77345d4122cSSamudrala, Sridhar } 77445d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); 77545d4122cSSamudrala, Sridhar 77645d4122cSSamudrala, Sridhar struct switchdev_fdb_dump { 77745d4122cSSamudrala, Sridhar struct switchdev_obj obj; 77845d4122cSSamudrala, Sridhar struct sk_buff *skb; 77945d4122cSSamudrala, Sridhar struct netlink_callback *cb; 78045d4122cSSamudrala, Sridhar int idx; 78145d4122cSSamudrala, Sridhar }; 78245d4122cSSamudrala, Sridhar 78345d4122cSSamudrala, Sridhar static int switchdev_port_fdb_dump_cb(struct net_device *dev, 78445d4122cSSamudrala, Sridhar struct switchdev_obj *obj) 78545d4122cSSamudrala, Sridhar { 78645d4122cSSamudrala, Sridhar struct switchdev_fdb_dump *dump = 78745d4122cSSamudrala, Sridhar container_of(obj, struct switchdev_fdb_dump, obj); 78845d4122cSSamudrala, Sridhar u32 portid = NETLINK_CB(dump->cb->skb).portid; 78945d4122cSSamudrala, Sridhar u32 seq = dump->cb->nlh->nlmsg_seq; 79045d4122cSSamudrala, Sridhar struct nlmsghdr *nlh; 79145d4122cSSamudrala, Sridhar struct ndmsg *ndm; 79245d4122cSSamudrala, Sridhar 79345d4122cSSamudrala, Sridhar if (dump->idx < dump->cb->args[0]) 79445d4122cSSamudrala, Sridhar goto skip; 79545d4122cSSamudrala, Sridhar 79645d4122cSSamudrala, Sridhar nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 79745d4122cSSamudrala, Sridhar sizeof(*ndm), NLM_F_MULTI); 79845d4122cSSamudrala, Sridhar if (!nlh) 79945d4122cSSamudrala, Sridhar return -EMSGSIZE; 80045d4122cSSamudrala, Sridhar 80145d4122cSSamudrala, Sridhar ndm = nlmsg_data(nlh); 80245d4122cSSamudrala, Sridhar ndm->ndm_family = AF_BRIDGE; 80345d4122cSSamudrala, Sridhar ndm->ndm_pad1 = 0; 80445d4122cSSamudrala, Sridhar ndm->ndm_pad2 = 0; 80545d4122cSSamudrala, Sridhar ndm->ndm_flags = NTF_SELF; 80645d4122cSSamudrala, Sridhar ndm->ndm_type = 0; 80745d4122cSSamudrala, Sridhar ndm->ndm_ifindex = dev->ifindex; 80845d4122cSSamudrala, Sridhar ndm->ndm_state = NUD_REACHABLE; 80945d4122cSSamudrala, Sridhar 81045d4122cSSamudrala, Sridhar if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr)) 81145d4122cSSamudrala, Sridhar goto nla_put_failure; 81245d4122cSSamudrala, Sridhar 81345d4122cSSamudrala, Sridhar if (obj->u.fdb.vid && nla_put_u16(dump->skb, NDA_VLAN, obj->u.fdb.vid)) 81445d4122cSSamudrala, Sridhar goto nla_put_failure; 81545d4122cSSamudrala, Sridhar 81645d4122cSSamudrala, Sridhar nlmsg_end(dump->skb, nlh); 81745d4122cSSamudrala, Sridhar 81845d4122cSSamudrala, Sridhar skip: 81945d4122cSSamudrala, Sridhar dump->idx++; 82045d4122cSSamudrala, Sridhar return 0; 82145d4122cSSamudrala, Sridhar 82245d4122cSSamudrala, Sridhar nla_put_failure: 82345d4122cSSamudrala, Sridhar nlmsg_cancel(dump->skb, nlh); 82445d4122cSSamudrala, Sridhar return -EMSGSIZE; 82545d4122cSSamudrala, Sridhar } 82645d4122cSSamudrala, Sridhar 82745d4122cSSamudrala, Sridhar /** 82845d4122cSSamudrala, Sridhar * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries 82945d4122cSSamudrala, Sridhar * 83045d4122cSSamudrala, Sridhar * @skb: netlink skb 83145d4122cSSamudrala, Sridhar * @cb: netlink callback 83245d4122cSSamudrala, Sridhar * @dev: port device 83345d4122cSSamudrala, Sridhar * @filter_dev: filter device 83445d4122cSSamudrala, Sridhar * @idx: 83545d4122cSSamudrala, Sridhar * 83645d4122cSSamudrala, Sridhar * Delete FDB entry from switch device. 83745d4122cSSamudrala, Sridhar */ 83845d4122cSSamudrala, Sridhar int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 83945d4122cSSamudrala, Sridhar struct net_device *dev, 84045d4122cSSamudrala, Sridhar struct net_device *filter_dev, int idx) 84145d4122cSSamudrala, Sridhar { 84245d4122cSSamudrala, Sridhar struct switchdev_fdb_dump dump = { 84345d4122cSSamudrala, Sridhar .obj = { 84445d4122cSSamudrala, Sridhar .id = SWITCHDEV_OBJ_PORT_FDB, 84545d4122cSSamudrala, Sridhar .cb = switchdev_port_fdb_dump_cb, 84645d4122cSSamudrala, Sridhar }, 84745d4122cSSamudrala, Sridhar .skb = skb, 84845d4122cSSamudrala, Sridhar .cb = cb, 84945d4122cSSamudrala, Sridhar .idx = idx, 85045d4122cSSamudrala, Sridhar }; 85145d4122cSSamudrala, Sridhar int err; 85245d4122cSSamudrala, Sridhar 85345d4122cSSamudrala, Sridhar err = switchdev_port_obj_dump(dev, &dump.obj); 85445d4122cSSamudrala, Sridhar if (err) 85545d4122cSSamudrala, Sridhar return err; 85645d4122cSSamudrala, Sridhar 85745d4122cSSamudrala, Sridhar return dump.idx; 85845d4122cSSamudrala, Sridhar } 85945d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); 86045d4122cSSamudrala, Sridhar 861ebb9a03aSJiri Pirko static struct net_device *switchdev_get_lowest_dev(struct net_device *dev) 862b5d6fbdeSScott Feldman { 8639d47c0a2SJiri Pirko const struct switchdev_ops *ops = dev->switchdev_ops; 864b5d6fbdeSScott Feldman struct net_device *lower_dev; 865b5d6fbdeSScott Feldman struct net_device *port_dev; 866b5d6fbdeSScott Feldman struct list_head *iter; 867b5d6fbdeSScott Feldman 868b5d6fbdeSScott Feldman /* Recusively search down until we find a sw port dev. 869f8e20a9fSScott Feldman * (A sw port dev supports switchdev_port_attr_get). 870b5d6fbdeSScott Feldman */ 871b5d6fbdeSScott Feldman 872f8e20a9fSScott Feldman if (ops && ops->switchdev_port_attr_get) 873b5d6fbdeSScott Feldman return dev; 874b5d6fbdeSScott Feldman 875b5d6fbdeSScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 876ebb9a03aSJiri Pirko port_dev = switchdev_get_lowest_dev(lower_dev); 877b5d6fbdeSScott Feldman if (port_dev) 878b5d6fbdeSScott Feldman return port_dev; 879b5d6fbdeSScott Feldman } 880b5d6fbdeSScott Feldman 881b5d6fbdeSScott Feldman return NULL; 882b5d6fbdeSScott Feldman } 883b5d6fbdeSScott Feldman 884ebb9a03aSJiri Pirko static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) 885b5d6fbdeSScott Feldman { 886f8e20a9fSScott Feldman struct switchdev_attr attr = { 887f8e20a9fSScott Feldman .id = SWITCHDEV_ATTR_PORT_PARENT_ID, 888f8e20a9fSScott Feldman }; 889f8e20a9fSScott Feldman struct switchdev_attr prev_attr; 890b5d6fbdeSScott Feldman struct net_device *dev = NULL; 891b5d6fbdeSScott Feldman int nhsel; 892b5d6fbdeSScott Feldman 893b5d6fbdeSScott Feldman /* For this route, all nexthop devs must be on the same switch. */ 894b5d6fbdeSScott Feldman 895b5d6fbdeSScott Feldman for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { 896b5d6fbdeSScott Feldman const struct fib_nh *nh = &fi->fib_nh[nhsel]; 897b5d6fbdeSScott Feldman 898b5d6fbdeSScott Feldman if (!nh->nh_dev) 899b5d6fbdeSScott Feldman return NULL; 900b5d6fbdeSScott Feldman 901ebb9a03aSJiri Pirko dev = switchdev_get_lowest_dev(nh->nh_dev); 902b5d6fbdeSScott Feldman if (!dev) 903b5d6fbdeSScott Feldman return NULL; 904b5d6fbdeSScott Feldman 905f8e20a9fSScott Feldman if (switchdev_port_attr_get(dev, &attr)) 906b5d6fbdeSScott Feldman return NULL; 907b5d6fbdeSScott Feldman 908b5d6fbdeSScott Feldman if (nhsel > 0) { 90942275bd8SScott Feldman if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len) 910b5d6fbdeSScott Feldman return NULL; 91142275bd8SScott Feldman if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id, 91242275bd8SScott Feldman attr.u.ppid.id_len)) 913b5d6fbdeSScott Feldman return NULL; 914b5d6fbdeSScott Feldman } 915b5d6fbdeSScott Feldman 916f8e20a9fSScott Feldman prev_attr = attr; 917b5d6fbdeSScott Feldman } 918b5d6fbdeSScott Feldman 919b5d6fbdeSScott Feldman return dev; 920b5d6fbdeSScott Feldman } 921b5d6fbdeSScott Feldman 9225e8d9049SScott Feldman /** 9237616dcbbSScott Feldman * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry 9245e8d9049SScott Feldman * 9255e8d9049SScott Feldman * @dst: route's IPv4 destination address 9265e8d9049SScott Feldman * @dst_len: destination address length (prefix length) 9275e8d9049SScott Feldman * @fi: route FIB info structure 9285e8d9049SScott Feldman * @tos: route TOS 9295e8d9049SScott Feldman * @type: route type 930f8f21471SScott Feldman * @nlflags: netlink flags passed in (NLM_F_*) 9315e8d9049SScott Feldman * @tb_id: route table ID 9325e8d9049SScott Feldman * 9337616dcbbSScott Feldman * Add/modify switch IPv4 route entry. 9345e8d9049SScott Feldman */ 935ebb9a03aSJiri Pirko int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, 936f8f21471SScott Feldman u8 tos, u8 type, u32 nlflags, u32 tb_id) 9375e8d9049SScott Feldman { 93858c2cb16SScott Feldman struct switchdev_obj fib_obj = { 93958c2cb16SScott Feldman .id = SWITCHDEV_OBJ_IPV4_FIB, 94042275bd8SScott Feldman .u.ipv4_fib = { 9417a7ee531SScott Feldman .dst = dst, 94258c2cb16SScott Feldman .dst_len = dst_len, 94358c2cb16SScott Feldman .fi = fi, 94458c2cb16SScott Feldman .tos = tos, 94558c2cb16SScott Feldman .type = type, 94658c2cb16SScott Feldman .nlflags = nlflags, 94758c2cb16SScott Feldman .tb_id = tb_id, 94858c2cb16SScott Feldman }, 94958c2cb16SScott Feldman }; 950b5d6fbdeSScott Feldman struct net_device *dev; 951b5d6fbdeSScott Feldman int err = 0; 952b5d6fbdeSScott Feldman 9538e05fd71SScott Feldman /* Don't offload route if using custom ip rules or if 9548e05fd71SScott Feldman * IPv4 FIB offloading has been disabled completely. 9558e05fd71SScott Feldman */ 9568e05fd71SScott Feldman 957e1315db1SScott Feldman #ifdef CONFIG_IP_MULTIPLE_TABLES 958e1315db1SScott Feldman if (fi->fib_net->ipv4.fib_has_custom_rules) 959e1315db1SScott Feldman return 0; 960e1315db1SScott Feldman #endif 961e1315db1SScott Feldman 962e1315db1SScott Feldman if (fi->fib_net->ipv4.fib_offload_disabled) 963104616e7SScott Feldman return 0; 964104616e7SScott Feldman 965ebb9a03aSJiri Pirko dev = switchdev_get_dev_by_nhs(fi); 966b5d6fbdeSScott Feldman if (!dev) 9675e8d9049SScott Feldman return 0; 968b5d6fbdeSScott Feldman 96958c2cb16SScott Feldman err = switchdev_port_obj_add(dev, &fib_obj); 970b5d6fbdeSScott Feldman if (!err) 971eea39946SRoopa Prabhu fi->fib_flags |= RTNH_F_OFFLOAD; 972b5d6fbdeSScott Feldman 973af201f72SScott Feldman return err == -EOPNOTSUPP ? 0 : err; 9745e8d9049SScott Feldman } 975ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add); 9765e8d9049SScott Feldman 9775e8d9049SScott Feldman /** 978ebb9a03aSJiri Pirko * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch 9795e8d9049SScott Feldman * 9805e8d9049SScott Feldman * @dst: route's IPv4 destination address 9815e8d9049SScott Feldman * @dst_len: destination address length (prefix length) 9825e8d9049SScott Feldman * @fi: route FIB info structure 9835e8d9049SScott Feldman * @tos: route TOS 9845e8d9049SScott Feldman * @type: route type 9855e8d9049SScott Feldman * @tb_id: route table ID 9865e8d9049SScott Feldman * 9875e8d9049SScott Feldman * Delete IPv4 route entry from switch device. 9885e8d9049SScott Feldman */ 989ebb9a03aSJiri Pirko int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, 9905e8d9049SScott Feldman u8 tos, u8 type, u32 tb_id) 9915e8d9049SScott Feldman { 99258c2cb16SScott Feldman struct switchdev_obj fib_obj = { 99358c2cb16SScott Feldman .id = SWITCHDEV_OBJ_IPV4_FIB, 99442275bd8SScott Feldman .u.ipv4_fib = { 9957a7ee531SScott Feldman .dst = dst, 99658c2cb16SScott Feldman .dst_len = dst_len, 99758c2cb16SScott Feldman .fi = fi, 99858c2cb16SScott Feldman .tos = tos, 99958c2cb16SScott Feldman .type = type, 100058c2cb16SScott Feldman .nlflags = 0, 100158c2cb16SScott Feldman .tb_id = tb_id, 100258c2cb16SScott Feldman }, 100358c2cb16SScott Feldman }; 1004b5d6fbdeSScott Feldman struct net_device *dev; 1005b5d6fbdeSScott Feldman int err = 0; 1006b5d6fbdeSScott Feldman 1007eea39946SRoopa Prabhu if (!(fi->fib_flags & RTNH_F_OFFLOAD)) 10085e8d9049SScott Feldman return 0; 1009b5d6fbdeSScott Feldman 1010ebb9a03aSJiri Pirko dev = switchdev_get_dev_by_nhs(fi); 1011b5d6fbdeSScott Feldman if (!dev) 1012b5d6fbdeSScott Feldman return 0; 1013b5d6fbdeSScott Feldman 101458c2cb16SScott Feldman err = switchdev_port_obj_del(dev, &fib_obj); 1015b5d6fbdeSScott Feldman if (!err) 1016eea39946SRoopa Prabhu fi->fib_flags &= ~RTNH_F_OFFLOAD; 1017b5d6fbdeSScott Feldman 1018af201f72SScott Feldman return err == -EOPNOTSUPP ? 0 : err; 10195e8d9049SScott Feldman } 1020ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del); 10218e05fd71SScott Feldman 10228e05fd71SScott Feldman /** 1023ebb9a03aSJiri Pirko * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation 10248e05fd71SScott Feldman * 10258e05fd71SScott Feldman * @fi: route FIB info structure 10268e05fd71SScott Feldman */ 1027ebb9a03aSJiri Pirko void switchdev_fib_ipv4_abort(struct fib_info *fi) 10288e05fd71SScott Feldman { 10298e05fd71SScott Feldman /* There was a problem installing this route to the offload 10308e05fd71SScott Feldman * device. For now, until we come up with more refined 10318e05fd71SScott Feldman * policy handling, abruptly end IPv4 fib offloading for 10328e05fd71SScott Feldman * for entire net by flushing offload device(s) of all 10338e05fd71SScott Feldman * IPv4 routes, and mark IPv4 fib offloading broken from 10348e05fd71SScott Feldman * this point forward. 10358e05fd71SScott Feldman */ 10368e05fd71SScott Feldman 10378e05fd71SScott Feldman fib_flush_external(fi->fib_net); 10388e05fd71SScott Feldman fi->fib_net->ipv4.fib_offload_disabled = true; 10398e05fd71SScott Feldman } 1040ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort); 1041