1007f790cSJiri Pirko /* 2007f790cSJiri Pirko * net/switchdev/switchdev.c - Switch device API 3*7ea6eb3fSJiri Pirko * Copyright (c) 2014-2015 Jiri Pirko <[email protected]> 4f8f21471SScott Feldman * Copyright (c) 2014-2015 Scott Feldman <[email protected]> 5007f790cSJiri Pirko * 6007f790cSJiri Pirko * This program is free software; you can redistribute it and/or modify 7007f790cSJiri Pirko * it under the terms of the GNU General Public License as published by 8007f790cSJiri Pirko * the Free Software Foundation; either version 2 of the License, or 9007f790cSJiri Pirko * (at your option) any later version. 10007f790cSJiri Pirko */ 11007f790cSJiri Pirko 12007f790cSJiri Pirko #include <linux/kernel.h> 13007f790cSJiri Pirko #include <linux/types.h> 14007f790cSJiri Pirko #include <linux/init.h> 1503bf0c28SJiri Pirko #include <linux/mutex.h> 1603bf0c28SJiri Pirko #include <linux/notifier.h> 17007f790cSJiri Pirko #include <linux/netdevice.h> 1847f8328bSScott Feldman #include <linux/if_bridge.h> 19*7ea6eb3fSJiri Pirko #include <linux/list.h> 205e8d9049SScott Feldman #include <net/ip_fib.h> 21007f790cSJiri Pirko #include <net/switchdev.h> 22007f790cSJiri Pirko 23007f790cSJiri Pirko /** 24*7ea6eb3fSJiri Pirko * switchdev_trans_item_enqueue - Enqueue data item to transaction queue 25*7ea6eb3fSJiri Pirko * 26*7ea6eb3fSJiri Pirko * @trans: transaction 27*7ea6eb3fSJiri Pirko * @data: pointer to data being queued 28*7ea6eb3fSJiri Pirko * @destructor: data destructor 29*7ea6eb3fSJiri Pirko * @tritem: transaction item being queued 30*7ea6eb3fSJiri Pirko * 31*7ea6eb3fSJiri Pirko * Enqeueue data item to transaction queue. tritem is typically placed in 32*7ea6eb3fSJiri Pirko * cointainter pointed at by data pointer. Destructor is called on 33*7ea6eb3fSJiri Pirko * transaction abort and after successful commit phase in case 34*7ea6eb3fSJiri Pirko * the caller did not dequeue the item before. 35*7ea6eb3fSJiri Pirko */ 36*7ea6eb3fSJiri Pirko void switchdev_trans_item_enqueue(struct switchdev_trans *trans, 37*7ea6eb3fSJiri Pirko void *data, void (*destructor)(void const *), 38*7ea6eb3fSJiri Pirko struct switchdev_trans_item *tritem) 39*7ea6eb3fSJiri Pirko { 40*7ea6eb3fSJiri Pirko tritem->data = data; 41*7ea6eb3fSJiri Pirko tritem->destructor = destructor; 42*7ea6eb3fSJiri Pirko list_add_tail(&tritem->list, &trans->item_list); 43*7ea6eb3fSJiri Pirko } 44*7ea6eb3fSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue); 45*7ea6eb3fSJiri Pirko 46*7ea6eb3fSJiri Pirko static struct switchdev_trans_item * 47*7ea6eb3fSJiri Pirko __switchdev_trans_item_dequeue(struct switchdev_trans *trans) 48*7ea6eb3fSJiri Pirko { 49*7ea6eb3fSJiri Pirko struct switchdev_trans_item *tritem; 50*7ea6eb3fSJiri Pirko 51*7ea6eb3fSJiri Pirko if (list_empty(&trans->item_list)) 52*7ea6eb3fSJiri Pirko return NULL; 53*7ea6eb3fSJiri Pirko tritem = list_first_entry(&trans->item_list, 54*7ea6eb3fSJiri Pirko struct switchdev_trans_item, list); 55*7ea6eb3fSJiri Pirko list_del(&tritem->list); 56*7ea6eb3fSJiri Pirko return tritem; 57*7ea6eb3fSJiri Pirko } 58*7ea6eb3fSJiri Pirko 59*7ea6eb3fSJiri Pirko /** 60*7ea6eb3fSJiri Pirko * switchdev_trans_item_dequeue - Dequeue data item from transaction queue 61*7ea6eb3fSJiri Pirko * 62*7ea6eb3fSJiri Pirko * @trans: transaction 63*7ea6eb3fSJiri Pirko */ 64*7ea6eb3fSJiri Pirko void *switchdev_trans_item_dequeue(struct switchdev_trans *trans) 65*7ea6eb3fSJiri Pirko { 66*7ea6eb3fSJiri Pirko struct switchdev_trans_item *tritem; 67*7ea6eb3fSJiri Pirko 68*7ea6eb3fSJiri Pirko tritem = __switchdev_trans_item_dequeue(trans); 69*7ea6eb3fSJiri Pirko BUG_ON(!tritem); 70*7ea6eb3fSJiri Pirko return tritem->data; 71*7ea6eb3fSJiri Pirko } 72*7ea6eb3fSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue); 73*7ea6eb3fSJiri Pirko 74*7ea6eb3fSJiri Pirko static void switchdev_trans_init(struct switchdev_trans *trans) 75*7ea6eb3fSJiri Pirko { 76*7ea6eb3fSJiri Pirko INIT_LIST_HEAD(&trans->item_list); 77*7ea6eb3fSJiri Pirko } 78*7ea6eb3fSJiri Pirko 79*7ea6eb3fSJiri Pirko static void switchdev_trans_items_destroy(struct switchdev_trans *trans) 80*7ea6eb3fSJiri Pirko { 81*7ea6eb3fSJiri Pirko struct switchdev_trans_item *tritem; 82*7ea6eb3fSJiri Pirko 83*7ea6eb3fSJiri Pirko while ((tritem = __switchdev_trans_item_dequeue(trans))) 84*7ea6eb3fSJiri Pirko tritem->destructor(tritem->data); 85*7ea6eb3fSJiri Pirko } 86*7ea6eb3fSJiri Pirko 87*7ea6eb3fSJiri Pirko static void switchdev_trans_items_warn_destroy(struct net_device *dev, 88*7ea6eb3fSJiri Pirko struct switchdev_trans *trans) 89*7ea6eb3fSJiri Pirko { 90*7ea6eb3fSJiri Pirko WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n", 91*7ea6eb3fSJiri Pirko dev->name); 92*7ea6eb3fSJiri Pirko switchdev_trans_items_destroy(trans); 93*7ea6eb3fSJiri Pirko } 94*7ea6eb3fSJiri Pirko 95*7ea6eb3fSJiri Pirko /** 963094333dSScott Feldman * switchdev_port_attr_get - Get port attribute 973094333dSScott Feldman * 983094333dSScott Feldman * @dev: port device 993094333dSScott Feldman * @attr: attribute to get 1003094333dSScott Feldman */ 1013094333dSScott Feldman int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) 1023094333dSScott Feldman { 1033094333dSScott Feldman const struct switchdev_ops *ops = dev->switchdev_ops; 1043094333dSScott Feldman struct net_device *lower_dev; 1053094333dSScott Feldman struct list_head *iter; 1063094333dSScott Feldman struct switchdev_attr first = { 1073094333dSScott Feldman .id = SWITCHDEV_ATTR_UNDEFINED 1083094333dSScott Feldman }; 1093094333dSScott Feldman int err = -EOPNOTSUPP; 1103094333dSScott Feldman 1113094333dSScott Feldman if (ops && ops->switchdev_port_attr_get) 1123094333dSScott Feldman return ops->switchdev_port_attr_get(dev, attr); 1133094333dSScott Feldman 1143094333dSScott Feldman if (attr->flags & SWITCHDEV_F_NO_RECURSE) 1153094333dSScott Feldman return err; 1163094333dSScott Feldman 1173094333dSScott Feldman /* Switch device port(s) may be stacked under 1183094333dSScott Feldman * bond/team/vlan dev, so recurse down to get attr on 1193094333dSScott Feldman * each port. Return -ENODATA if attr values don't 1203094333dSScott Feldman * compare across ports. 1213094333dSScott Feldman */ 1223094333dSScott Feldman 1233094333dSScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 1243094333dSScott Feldman err = switchdev_port_attr_get(lower_dev, attr); 1253094333dSScott Feldman if (err) 1263094333dSScott Feldman break; 1273094333dSScott Feldman if (first.id == SWITCHDEV_ATTR_UNDEFINED) 1283094333dSScott Feldman first = *attr; 1293094333dSScott Feldman else if (memcmp(&first, attr, sizeof(*attr))) 1303094333dSScott Feldman return -ENODATA; 1313094333dSScott Feldman } 1323094333dSScott Feldman 1333094333dSScott Feldman return err; 1343094333dSScott Feldman } 1353094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_get); 1363094333dSScott Feldman 1373094333dSScott Feldman static int __switchdev_port_attr_set(struct net_device *dev, 138*7ea6eb3fSJiri Pirko struct switchdev_attr *attr, 139*7ea6eb3fSJiri Pirko struct switchdev_trans *trans) 1403094333dSScott Feldman { 1413094333dSScott Feldman const struct switchdev_ops *ops = dev->switchdev_ops; 1423094333dSScott Feldman struct net_device *lower_dev; 1433094333dSScott Feldman struct list_head *iter; 1443094333dSScott Feldman int err = -EOPNOTSUPP; 1453094333dSScott Feldman 1463094333dSScott Feldman if (ops && ops->switchdev_port_attr_set) 147*7ea6eb3fSJiri Pirko return ops->switchdev_port_attr_set(dev, attr, trans); 1483094333dSScott Feldman 1493094333dSScott Feldman if (attr->flags & SWITCHDEV_F_NO_RECURSE) 1503094333dSScott Feldman return err; 1513094333dSScott Feldman 1523094333dSScott Feldman /* Switch device port(s) may be stacked under 1533094333dSScott Feldman * bond/team/vlan dev, so recurse down to set attr on 1543094333dSScott Feldman * each port. 1553094333dSScott Feldman */ 1563094333dSScott Feldman 1573094333dSScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 158*7ea6eb3fSJiri Pirko err = __switchdev_port_attr_set(lower_dev, attr, trans); 1593094333dSScott Feldman if (err) 1603094333dSScott Feldman break; 1613094333dSScott Feldman } 1623094333dSScott Feldman 1633094333dSScott Feldman return err; 1643094333dSScott Feldman } 1653094333dSScott Feldman 1663094333dSScott Feldman struct switchdev_attr_set_work { 1673094333dSScott Feldman struct work_struct work; 1683094333dSScott Feldman struct net_device *dev; 1693094333dSScott Feldman struct switchdev_attr attr; 1703094333dSScott Feldman }; 1713094333dSScott Feldman 1723094333dSScott Feldman static void switchdev_port_attr_set_work(struct work_struct *work) 1733094333dSScott Feldman { 1743094333dSScott Feldman struct switchdev_attr_set_work *asw = 1753094333dSScott Feldman container_of(work, struct switchdev_attr_set_work, work); 1763094333dSScott Feldman int err; 1773094333dSScott Feldman 1783094333dSScott Feldman rtnl_lock(); 1793094333dSScott Feldman err = switchdev_port_attr_set(asw->dev, &asw->attr); 18057225e77SScott Feldman if (err && err != -EOPNOTSUPP) 18157225e77SScott Feldman netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n", 18257225e77SScott Feldman err, asw->attr.id); 1833094333dSScott Feldman rtnl_unlock(); 1843094333dSScott Feldman 1853094333dSScott Feldman dev_put(asw->dev); 1863094333dSScott Feldman kfree(work); 1873094333dSScott Feldman } 1883094333dSScott Feldman 1893094333dSScott Feldman static int switchdev_port_attr_set_defer(struct net_device *dev, 1903094333dSScott Feldman struct switchdev_attr *attr) 1913094333dSScott Feldman { 1923094333dSScott Feldman struct switchdev_attr_set_work *asw; 1933094333dSScott Feldman 1943094333dSScott Feldman asw = kmalloc(sizeof(*asw), GFP_ATOMIC); 1953094333dSScott Feldman if (!asw) 1963094333dSScott Feldman return -ENOMEM; 1973094333dSScott Feldman 1983094333dSScott Feldman INIT_WORK(&asw->work, switchdev_port_attr_set_work); 1993094333dSScott Feldman 2003094333dSScott Feldman dev_hold(dev); 2013094333dSScott Feldman asw->dev = dev; 2023094333dSScott Feldman memcpy(&asw->attr, attr, sizeof(asw->attr)); 2033094333dSScott Feldman 2043094333dSScott Feldman schedule_work(&asw->work); 2053094333dSScott Feldman 2063094333dSScott Feldman return 0; 2073094333dSScott Feldman } 2083094333dSScott Feldman 2093094333dSScott Feldman /** 2103094333dSScott Feldman * switchdev_port_attr_set - Set port attribute 2113094333dSScott Feldman * 2123094333dSScott Feldman * @dev: port device 2133094333dSScott Feldman * @attr: attribute to set 2143094333dSScott Feldman * 2153094333dSScott Feldman * Use a 2-phase prepare-commit transaction model to ensure 2163094333dSScott Feldman * system is not left in a partially updated state due to 2173094333dSScott Feldman * failure from driver/device. 2183094333dSScott Feldman */ 2193094333dSScott Feldman int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr) 2203094333dSScott Feldman { 221*7ea6eb3fSJiri Pirko struct switchdev_trans trans; 2223094333dSScott Feldman int err; 2233094333dSScott Feldman 2243094333dSScott Feldman if (!rtnl_is_locked()) { 2253094333dSScott Feldman /* Running prepare-commit transaction across stacked 2263094333dSScott Feldman * devices requires nothing moves, so if rtnl_lock is 2273094333dSScott Feldman * not held, schedule a worker thread to hold rtnl_lock 2283094333dSScott Feldman * while setting attr. 2293094333dSScott Feldman */ 2303094333dSScott Feldman 2313094333dSScott Feldman return switchdev_port_attr_set_defer(dev, attr); 2323094333dSScott Feldman } 2333094333dSScott Feldman 234*7ea6eb3fSJiri Pirko switchdev_trans_init(&trans); 235*7ea6eb3fSJiri Pirko 2363094333dSScott Feldman /* Phase I: prepare for attr set. Driver/device should fail 2373094333dSScott Feldman * here if there are going to be issues in the commit phase, 2383094333dSScott Feldman * such as lack of resources or support. The driver/device 2393094333dSScott Feldman * should reserve resources needed for the commit phase here, 2403094333dSScott Feldman * but should not commit the attr. 2413094333dSScott Feldman */ 2423094333dSScott Feldman 24369f5df49SJiri Pirko attr->trans_ph = SWITCHDEV_TRANS_PREPARE; 244*7ea6eb3fSJiri Pirko err = __switchdev_port_attr_set(dev, attr, &trans); 2453094333dSScott Feldman if (err) { 2463094333dSScott Feldman /* Prepare phase failed: abort the transaction. Any 2473094333dSScott Feldman * resources reserved in the prepare phase are 2483094333dSScott Feldman * released. 2493094333dSScott Feldman */ 2503094333dSScott Feldman 2512ee94014SVivien Didelot if (err != -EOPNOTSUPP) { 25269f5df49SJiri Pirko attr->trans_ph = SWITCHDEV_TRANS_ABORT; 253*7ea6eb3fSJiri Pirko __switchdev_port_attr_set(dev, attr, &trans); 254*7ea6eb3fSJiri Pirko switchdev_trans_items_destroy(&trans); 2552ee94014SVivien Didelot } 2563094333dSScott Feldman 2573094333dSScott Feldman return err; 2583094333dSScott Feldman } 2593094333dSScott Feldman 2603094333dSScott Feldman /* Phase II: commit attr set. This cannot fail as a fault 2613094333dSScott Feldman * of driver/device. If it does, it's a bug in the driver/device 2623094333dSScott Feldman * because the driver said everythings was OK in phase I. 2633094333dSScott Feldman */ 2643094333dSScott Feldman 26569f5df49SJiri Pirko attr->trans_ph = SWITCHDEV_TRANS_COMMIT; 266*7ea6eb3fSJiri Pirko err = __switchdev_port_attr_set(dev, attr, &trans); 267e9fdaec0SScott Feldman WARN(err, "%s: Commit of attribute (id=%d) failed.\n", 268e9fdaec0SScott Feldman dev->name, attr->id); 269*7ea6eb3fSJiri Pirko switchdev_trans_items_warn_destroy(dev, &trans); 2703094333dSScott Feldman 2713094333dSScott Feldman return err; 2723094333dSScott Feldman } 2733094333dSScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_attr_set); 2743094333dSScott Feldman 27522c1f67eSScott Feldman static int __switchdev_port_obj_add(struct net_device *dev, 276*7ea6eb3fSJiri Pirko struct switchdev_obj *obj, 277*7ea6eb3fSJiri Pirko struct switchdev_trans *trans) 278491d0f15SScott Feldman { 279491d0f15SScott Feldman const struct switchdev_ops *ops = dev->switchdev_ops; 280491d0f15SScott Feldman struct net_device *lower_dev; 281491d0f15SScott Feldman struct list_head *iter; 282491d0f15SScott Feldman int err = -EOPNOTSUPP; 283491d0f15SScott Feldman 284491d0f15SScott Feldman if (ops && ops->switchdev_port_obj_add) 285*7ea6eb3fSJiri Pirko return ops->switchdev_port_obj_add(dev, obj, trans); 286491d0f15SScott Feldman 287491d0f15SScott Feldman /* Switch device port(s) may be stacked under 288491d0f15SScott Feldman * bond/team/vlan dev, so recurse down to add object on 289491d0f15SScott Feldman * each port. 290491d0f15SScott Feldman */ 291491d0f15SScott Feldman 292491d0f15SScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 293*7ea6eb3fSJiri Pirko err = __switchdev_port_obj_add(lower_dev, obj, trans); 294491d0f15SScott Feldman if (err) 295491d0f15SScott Feldman break; 296491d0f15SScott Feldman } 297491d0f15SScott Feldman 298491d0f15SScott Feldman return err; 299491d0f15SScott Feldman } 300491d0f15SScott Feldman 301491d0f15SScott Feldman /** 302491d0f15SScott Feldman * switchdev_port_obj_add - Add port object 303491d0f15SScott Feldman * 304491d0f15SScott Feldman * @dev: port device 305491d0f15SScott Feldman * @obj: object to add 306491d0f15SScott Feldman * 307491d0f15SScott Feldman * Use a 2-phase prepare-commit transaction model to ensure 308491d0f15SScott Feldman * system is not left in a partially updated state due to 309491d0f15SScott Feldman * failure from driver/device. 310491d0f15SScott Feldman * 311491d0f15SScott Feldman * rtnl_lock must be held. 312491d0f15SScott Feldman */ 313491d0f15SScott Feldman int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj) 314491d0f15SScott Feldman { 315*7ea6eb3fSJiri Pirko struct switchdev_trans trans; 316491d0f15SScott Feldman int err; 317491d0f15SScott Feldman 318491d0f15SScott Feldman ASSERT_RTNL(); 319491d0f15SScott Feldman 320*7ea6eb3fSJiri Pirko switchdev_trans_init(&trans); 321*7ea6eb3fSJiri Pirko 322491d0f15SScott Feldman /* Phase I: prepare for obj add. Driver/device should fail 323491d0f15SScott Feldman * here if there are going to be issues in the commit phase, 324491d0f15SScott Feldman * such as lack of resources or support. The driver/device 325491d0f15SScott Feldman * should reserve resources needed for the commit phase here, 326491d0f15SScott Feldman * but should not commit the obj. 327491d0f15SScott Feldman */ 328491d0f15SScott Feldman 32969f5df49SJiri Pirko obj->trans_ph = SWITCHDEV_TRANS_PREPARE; 330*7ea6eb3fSJiri Pirko err = __switchdev_port_obj_add(dev, obj, &trans); 331491d0f15SScott Feldman if (err) { 332491d0f15SScott Feldman /* Prepare phase failed: abort the transaction. Any 333491d0f15SScott Feldman * resources reserved in the prepare phase are 334491d0f15SScott Feldman * released. 335491d0f15SScott Feldman */ 336491d0f15SScott Feldman 3372ee94014SVivien Didelot if (err != -EOPNOTSUPP) { 33869f5df49SJiri Pirko obj->trans_ph = SWITCHDEV_TRANS_ABORT; 339*7ea6eb3fSJiri Pirko __switchdev_port_obj_add(dev, obj, &trans); 340*7ea6eb3fSJiri Pirko switchdev_trans_items_destroy(&trans); 3412ee94014SVivien Didelot } 342491d0f15SScott Feldman 343491d0f15SScott Feldman return err; 344491d0f15SScott Feldman } 345491d0f15SScott Feldman 346491d0f15SScott Feldman /* Phase II: commit obj add. This cannot fail as a fault 347491d0f15SScott Feldman * of driver/device. If it does, it's a bug in the driver/device 348491d0f15SScott Feldman * because the driver said everythings was OK in phase I. 349491d0f15SScott Feldman */ 350491d0f15SScott Feldman 35169f5df49SJiri Pirko obj->trans_ph = SWITCHDEV_TRANS_COMMIT; 352*7ea6eb3fSJiri Pirko err = __switchdev_port_obj_add(dev, obj, &trans); 353491d0f15SScott Feldman WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id); 354*7ea6eb3fSJiri Pirko switchdev_trans_items_warn_destroy(dev, &trans); 355491d0f15SScott Feldman 356491d0f15SScott Feldman return err; 357491d0f15SScott Feldman } 358491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_add); 359491d0f15SScott Feldman 360491d0f15SScott Feldman /** 361491d0f15SScott Feldman * switchdev_port_obj_del - Delete port object 362491d0f15SScott Feldman * 363491d0f15SScott Feldman * @dev: port device 364491d0f15SScott Feldman * @obj: object to delete 365491d0f15SScott Feldman */ 366491d0f15SScott Feldman int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj) 367491d0f15SScott Feldman { 368491d0f15SScott Feldman const struct switchdev_ops *ops = dev->switchdev_ops; 369491d0f15SScott Feldman struct net_device *lower_dev; 370491d0f15SScott Feldman struct list_head *iter; 371491d0f15SScott Feldman int err = -EOPNOTSUPP; 372491d0f15SScott Feldman 373491d0f15SScott Feldman if (ops && ops->switchdev_port_obj_del) 374491d0f15SScott Feldman return ops->switchdev_port_obj_del(dev, obj); 375491d0f15SScott Feldman 376491d0f15SScott Feldman /* Switch device port(s) may be stacked under 377491d0f15SScott Feldman * bond/team/vlan dev, so recurse down to delete object on 378491d0f15SScott Feldman * each port. 379491d0f15SScott Feldman */ 380491d0f15SScott Feldman 381491d0f15SScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 382491d0f15SScott Feldman err = switchdev_port_obj_del(lower_dev, obj); 383491d0f15SScott Feldman if (err) 384491d0f15SScott Feldman break; 385491d0f15SScott Feldman } 386491d0f15SScott Feldman 387491d0f15SScott Feldman return err; 388491d0f15SScott Feldman } 389491d0f15SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_obj_del); 390491d0f15SScott Feldman 39145d4122cSSamudrala, Sridhar /** 39245d4122cSSamudrala, Sridhar * switchdev_port_obj_dump - Dump port objects 39345d4122cSSamudrala, Sridhar * 39445d4122cSSamudrala, Sridhar * @dev: port device 39545d4122cSSamudrala, Sridhar * @obj: object to dump 39645d4122cSSamudrala, Sridhar */ 39745d4122cSSamudrala, Sridhar int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj) 39845d4122cSSamudrala, Sridhar { 39945d4122cSSamudrala, Sridhar const struct switchdev_ops *ops = dev->switchdev_ops; 40045d4122cSSamudrala, Sridhar struct net_device *lower_dev; 40145d4122cSSamudrala, Sridhar struct list_head *iter; 40245d4122cSSamudrala, Sridhar int err = -EOPNOTSUPP; 40345d4122cSSamudrala, Sridhar 40445d4122cSSamudrala, Sridhar if (ops && ops->switchdev_port_obj_dump) 40545d4122cSSamudrala, Sridhar return ops->switchdev_port_obj_dump(dev, obj); 40645d4122cSSamudrala, Sridhar 40745d4122cSSamudrala, Sridhar /* Switch device port(s) may be stacked under 40845d4122cSSamudrala, Sridhar * bond/team/vlan dev, so recurse down to dump objects on 40945d4122cSSamudrala, Sridhar * first port at bottom of stack. 41045d4122cSSamudrala, Sridhar */ 41145d4122cSSamudrala, Sridhar 41245d4122cSSamudrala, Sridhar netdev_for_each_lower_dev(dev, lower_dev, iter) { 41345d4122cSSamudrala, Sridhar err = switchdev_port_obj_dump(lower_dev, obj); 41445d4122cSSamudrala, Sridhar break; 41545d4122cSSamudrala, Sridhar } 41645d4122cSSamudrala, Sridhar 41745d4122cSSamudrala, Sridhar return err; 41845d4122cSSamudrala, Sridhar } 41945d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_obj_dump); 42045d4122cSSamudrala, Sridhar 421ebb9a03aSJiri Pirko static DEFINE_MUTEX(switchdev_mutex); 422ebb9a03aSJiri Pirko static RAW_NOTIFIER_HEAD(switchdev_notif_chain); 42303bf0c28SJiri Pirko 42403bf0c28SJiri Pirko /** 425ebb9a03aSJiri Pirko * register_switchdev_notifier - Register notifier 42603bf0c28SJiri Pirko * @nb: notifier_block 42703bf0c28SJiri Pirko * 42803bf0c28SJiri Pirko * Register switch device notifier. This should be used by code 42903bf0c28SJiri Pirko * which needs to monitor events happening in particular device. 43003bf0c28SJiri Pirko * Return values are same as for atomic_notifier_chain_register(). 43103bf0c28SJiri Pirko */ 432ebb9a03aSJiri Pirko int register_switchdev_notifier(struct notifier_block *nb) 43303bf0c28SJiri Pirko { 43403bf0c28SJiri Pirko int err; 43503bf0c28SJiri Pirko 436ebb9a03aSJiri Pirko mutex_lock(&switchdev_mutex); 437ebb9a03aSJiri Pirko err = raw_notifier_chain_register(&switchdev_notif_chain, nb); 438ebb9a03aSJiri Pirko mutex_unlock(&switchdev_mutex); 43903bf0c28SJiri Pirko return err; 44003bf0c28SJiri Pirko } 441ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(register_switchdev_notifier); 44203bf0c28SJiri Pirko 44303bf0c28SJiri Pirko /** 444ebb9a03aSJiri Pirko * unregister_switchdev_notifier - Unregister notifier 44503bf0c28SJiri Pirko * @nb: notifier_block 44603bf0c28SJiri Pirko * 44703bf0c28SJiri Pirko * Unregister switch device notifier. 44803bf0c28SJiri Pirko * Return values are same as for atomic_notifier_chain_unregister(). 44903bf0c28SJiri Pirko */ 450ebb9a03aSJiri Pirko int unregister_switchdev_notifier(struct notifier_block *nb) 45103bf0c28SJiri Pirko { 45203bf0c28SJiri Pirko int err; 45303bf0c28SJiri Pirko 454ebb9a03aSJiri Pirko mutex_lock(&switchdev_mutex); 455ebb9a03aSJiri Pirko err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb); 456ebb9a03aSJiri Pirko mutex_unlock(&switchdev_mutex); 45703bf0c28SJiri Pirko return err; 45803bf0c28SJiri Pirko } 459ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); 46003bf0c28SJiri Pirko 46103bf0c28SJiri Pirko /** 462ebb9a03aSJiri Pirko * call_switchdev_notifiers - Call notifiers 46303bf0c28SJiri Pirko * @val: value passed unmodified to notifier function 46403bf0c28SJiri Pirko * @dev: port device 46503bf0c28SJiri Pirko * @info: notifier information data 46603bf0c28SJiri Pirko * 46703bf0c28SJiri Pirko * Call all network notifier blocks. This should be called by driver 46803bf0c28SJiri Pirko * when it needs to propagate hardware event. 46903bf0c28SJiri Pirko * Return values are same as for atomic_notifier_call_chain(). 47003bf0c28SJiri Pirko */ 471ebb9a03aSJiri Pirko int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 472ebb9a03aSJiri Pirko struct switchdev_notifier_info *info) 47303bf0c28SJiri Pirko { 47403bf0c28SJiri Pirko int err; 47503bf0c28SJiri Pirko 47603bf0c28SJiri Pirko info->dev = dev; 477ebb9a03aSJiri Pirko mutex_lock(&switchdev_mutex); 478ebb9a03aSJiri Pirko err = raw_notifier_call_chain(&switchdev_notif_chain, val, info); 479ebb9a03aSJiri Pirko mutex_unlock(&switchdev_mutex); 48003bf0c28SJiri Pirko return err; 48103bf0c28SJiri Pirko } 482ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(call_switchdev_notifiers); 4838a44dbb2SRoopa Prabhu 4847d4f8d87SScott Feldman struct switchdev_vlan_dump { 4857d4f8d87SScott Feldman struct switchdev_obj obj; 4867d4f8d87SScott Feldman struct sk_buff *skb; 4877d4f8d87SScott Feldman u32 filter_mask; 4887d4f8d87SScott Feldman u16 flags; 4897d4f8d87SScott Feldman u16 begin; 4907d4f8d87SScott Feldman u16 end; 4917d4f8d87SScott Feldman }; 4927d4f8d87SScott Feldman 4937d4f8d87SScott Feldman static int switchdev_port_vlan_dump_put(struct net_device *dev, 4947d4f8d87SScott Feldman struct switchdev_vlan_dump *dump) 4957d4f8d87SScott Feldman { 4967d4f8d87SScott Feldman struct bridge_vlan_info vinfo; 4977d4f8d87SScott Feldman 4987d4f8d87SScott Feldman vinfo.flags = dump->flags; 4997d4f8d87SScott Feldman 5007d4f8d87SScott Feldman if (dump->begin == 0 && dump->end == 0) { 5017d4f8d87SScott Feldman return 0; 5027d4f8d87SScott Feldman } else if (dump->begin == dump->end) { 5037d4f8d87SScott Feldman vinfo.vid = dump->begin; 5047d4f8d87SScott Feldman if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 5057d4f8d87SScott Feldman sizeof(vinfo), &vinfo)) 5067d4f8d87SScott Feldman return -EMSGSIZE; 5077d4f8d87SScott Feldman } else { 5087d4f8d87SScott Feldman vinfo.vid = dump->begin; 5097d4f8d87SScott Feldman vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN; 5107d4f8d87SScott Feldman if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 5117d4f8d87SScott Feldman sizeof(vinfo), &vinfo)) 5127d4f8d87SScott Feldman return -EMSGSIZE; 5137d4f8d87SScott Feldman vinfo.vid = dump->end; 5147d4f8d87SScott Feldman vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN; 5157d4f8d87SScott Feldman vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END; 5167d4f8d87SScott Feldman if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, 5177d4f8d87SScott Feldman sizeof(vinfo), &vinfo)) 5187d4f8d87SScott Feldman return -EMSGSIZE; 5197d4f8d87SScott Feldman } 5207d4f8d87SScott Feldman 5217d4f8d87SScott Feldman return 0; 5227d4f8d87SScott Feldman } 5237d4f8d87SScott Feldman 5247d4f8d87SScott Feldman static int switchdev_port_vlan_dump_cb(struct net_device *dev, 5257d4f8d87SScott Feldman struct switchdev_obj *obj) 5267d4f8d87SScott Feldman { 5277d4f8d87SScott Feldman struct switchdev_vlan_dump *dump = 5287d4f8d87SScott Feldman container_of(obj, struct switchdev_vlan_dump, obj); 5297d4f8d87SScott Feldman struct switchdev_obj_vlan *vlan = &dump->obj.u.vlan; 5307d4f8d87SScott Feldman int err = 0; 5317d4f8d87SScott Feldman 5327d4f8d87SScott Feldman if (vlan->vid_begin > vlan->vid_end) 5337d4f8d87SScott Feldman return -EINVAL; 5347d4f8d87SScott Feldman 5357d4f8d87SScott Feldman if (dump->filter_mask & RTEXT_FILTER_BRVLAN) { 5367d4f8d87SScott Feldman dump->flags = vlan->flags; 5377d4f8d87SScott Feldman for (dump->begin = dump->end = vlan->vid_begin; 5387d4f8d87SScott Feldman dump->begin <= vlan->vid_end; 5397d4f8d87SScott Feldman dump->begin++, dump->end++) { 5407d4f8d87SScott Feldman err = switchdev_port_vlan_dump_put(dev, dump); 5417d4f8d87SScott Feldman if (err) 5427d4f8d87SScott Feldman return err; 5437d4f8d87SScott Feldman } 5447d4f8d87SScott Feldman } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) { 5457d4f8d87SScott Feldman if (dump->begin > vlan->vid_begin && 5467d4f8d87SScott Feldman dump->begin >= vlan->vid_end) { 5477d4f8d87SScott Feldman if ((dump->begin - 1) == vlan->vid_end && 5487d4f8d87SScott Feldman dump->flags == vlan->flags) { 5497d4f8d87SScott Feldman /* prepend */ 5507d4f8d87SScott Feldman dump->begin = vlan->vid_begin; 5517d4f8d87SScott Feldman } else { 5527d4f8d87SScott Feldman err = switchdev_port_vlan_dump_put(dev, dump); 5537d4f8d87SScott Feldman dump->flags = vlan->flags; 5547d4f8d87SScott Feldman dump->begin = vlan->vid_begin; 5557d4f8d87SScott Feldman dump->end = vlan->vid_end; 5567d4f8d87SScott Feldman } 5577d4f8d87SScott Feldman } else if (dump->end <= vlan->vid_begin && 5587d4f8d87SScott Feldman dump->end < vlan->vid_end) { 5597d4f8d87SScott Feldman if ((dump->end + 1) == vlan->vid_begin && 5607d4f8d87SScott Feldman dump->flags == vlan->flags) { 5617d4f8d87SScott Feldman /* append */ 5627d4f8d87SScott Feldman dump->end = vlan->vid_end; 5637d4f8d87SScott Feldman } else { 5647d4f8d87SScott Feldman err = switchdev_port_vlan_dump_put(dev, dump); 5657d4f8d87SScott Feldman dump->flags = vlan->flags; 5667d4f8d87SScott Feldman dump->begin = vlan->vid_begin; 5677d4f8d87SScott Feldman dump->end = vlan->vid_end; 5687d4f8d87SScott Feldman } 5697d4f8d87SScott Feldman } else { 5707d4f8d87SScott Feldman err = -EINVAL; 5717d4f8d87SScott Feldman } 5727d4f8d87SScott Feldman } 5737d4f8d87SScott Feldman 5747d4f8d87SScott Feldman return err; 5757d4f8d87SScott Feldman } 5767d4f8d87SScott Feldman 5777d4f8d87SScott Feldman static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev, 5787d4f8d87SScott Feldman u32 filter_mask) 5797d4f8d87SScott Feldman { 5807d4f8d87SScott Feldman struct switchdev_vlan_dump dump = { 5817d4f8d87SScott Feldman .obj = { 5827d4f8d87SScott Feldman .id = SWITCHDEV_OBJ_PORT_VLAN, 5837d4f8d87SScott Feldman .cb = switchdev_port_vlan_dump_cb, 5847d4f8d87SScott Feldman }, 5857d4f8d87SScott Feldman .skb = skb, 5867d4f8d87SScott Feldman .filter_mask = filter_mask, 5877d4f8d87SScott Feldman }; 5887d4f8d87SScott Feldman int err = 0; 5897d4f8d87SScott Feldman 5907d4f8d87SScott Feldman if ((filter_mask & RTEXT_FILTER_BRVLAN) || 5917d4f8d87SScott Feldman (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 5927d4f8d87SScott Feldman err = switchdev_port_obj_dump(dev, &dump.obj); 5937d4f8d87SScott Feldman if (err) 5947d4f8d87SScott Feldman goto err_out; 5957d4f8d87SScott Feldman if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 5967d4f8d87SScott Feldman /* last one */ 5977d4f8d87SScott Feldman err = switchdev_port_vlan_dump_put(dev, &dump); 5987d4f8d87SScott Feldman } 5997d4f8d87SScott Feldman 6007d4f8d87SScott Feldman err_out: 6017d4f8d87SScott Feldman return err == -EOPNOTSUPP ? 0 : err; 6027d4f8d87SScott Feldman } 6037d4f8d87SScott Feldman 6048793d0a6SScott Feldman /** 6058793d0a6SScott Feldman * switchdev_port_bridge_getlink - Get bridge port attributes 6068793d0a6SScott Feldman * 6078793d0a6SScott Feldman * @dev: port device 6088793d0a6SScott Feldman * 6098793d0a6SScott Feldman * Called for SELF on rtnl_bridge_getlink to get bridge port 6108793d0a6SScott Feldman * attributes. 6118793d0a6SScott Feldman */ 6128793d0a6SScott Feldman int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 6138793d0a6SScott Feldman struct net_device *dev, u32 filter_mask, 6148793d0a6SScott Feldman int nlflags) 6158793d0a6SScott Feldman { 6168793d0a6SScott Feldman struct switchdev_attr attr = { 6178793d0a6SScott Feldman .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS, 6188793d0a6SScott Feldman }; 6198793d0a6SScott Feldman u16 mode = BRIDGE_MODE_UNDEF; 6208793d0a6SScott Feldman u32 mask = BR_LEARNING | BR_LEARNING_SYNC; 6218793d0a6SScott Feldman int err; 6228793d0a6SScott Feldman 6238793d0a6SScott Feldman err = switchdev_port_attr_get(dev, &attr); 6245c8079d0SVivien Didelot if (err && err != -EOPNOTSUPP) 6258793d0a6SScott Feldman return err; 6268793d0a6SScott Feldman 6278793d0a6SScott Feldman return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 6287d4f8d87SScott Feldman attr.u.brport_flags, mask, nlflags, 6297d4f8d87SScott Feldman filter_mask, switchdev_port_vlan_fill); 6308793d0a6SScott Feldman } 6318793d0a6SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink); 6328793d0a6SScott Feldman 63347f8328bSScott Feldman static int switchdev_port_br_setflag(struct net_device *dev, 63447f8328bSScott Feldman struct nlattr *nlattr, 63547f8328bSScott Feldman unsigned long brport_flag) 63647f8328bSScott Feldman { 63747f8328bSScott Feldman struct switchdev_attr attr = { 63847f8328bSScott Feldman .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS, 63947f8328bSScott Feldman }; 64047f8328bSScott Feldman u8 flag = nla_get_u8(nlattr); 64147f8328bSScott Feldman int err; 64247f8328bSScott Feldman 64347f8328bSScott Feldman err = switchdev_port_attr_get(dev, &attr); 64447f8328bSScott Feldman if (err) 64547f8328bSScott Feldman return err; 64647f8328bSScott Feldman 64747f8328bSScott Feldman if (flag) 64842275bd8SScott Feldman attr.u.brport_flags |= brport_flag; 64947f8328bSScott Feldman else 65042275bd8SScott Feldman attr.u.brport_flags &= ~brport_flag; 65147f8328bSScott Feldman 65247f8328bSScott Feldman return switchdev_port_attr_set(dev, &attr); 65347f8328bSScott Feldman } 65447f8328bSScott Feldman 65547f8328bSScott Feldman static const struct nla_policy 65647f8328bSScott Feldman switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = { 65747f8328bSScott Feldman [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, 65847f8328bSScott Feldman [IFLA_BRPORT_COST] = { .type = NLA_U32 }, 65947f8328bSScott Feldman [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, 66047f8328bSScott Feldman [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 66147f8328bSScott Feldman [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 66247f8328bSScott Feldman [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 66347f8328bSScott Feldman [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 }, 66447f8328bSScott Feldman [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 66547f8328bSScott Feldman [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 }, 66647f8328bSScott Feldman [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 66747f8328bSScott Feldman }; 66847f8328bSScott Feldman 66947f8328bSScott Feldman static int switchdev_port_br_setlink_protinfo(struct net_device *dev, 67047f8328bSScott Feldman struct nlattr *protinfo) 67147f8328bSScott Feldman { 67247f8328bSScott Feldman struct nlattr *attr; 67347f8328bSScott Feldman int rem; 67447f8328bSScott Feldman int err; 67547f8328bSScott Feldman 67647f8328bSScott Feldman err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX, 67747f8328bSScott Feldman switchdev_port_bridge_policy); 67847f8328bSScott Feldman if (err) 67947f8328bSScott Feldman return err; 68047f8328bSScott Feldman 68147f8328bSScott Feldman nla_for_each_nested(attr, protinfo, rem) { 68247f8328bSScott Feldman switch (nla_type(attr)) { 68347f8328bSScott Feldman case IFLA_BRPORT_LEARNING: 68447f8328bSScott Feldman err = switchdev_port_br_setflag(dev, attr, 68547f8328bSScott Feldman BR_LEARNING); 68647f8328bSScott Feldman break; 68747f8328bSScott Feldman case IFLA_BRPORT_LEARNING_SYNC: 68847f8328bSScott Feldman err = switchdev_port_br_setflag(dev, attr, 68947f8328bSScott Feldman BR_LEARNING_SYNC); 69047f8328bSScott Feldman break; 69147f8328bSScott Feldman default: 69247f8328bSScott Feldman err = -EOPNOTSUPP; 69347f8328bSScott Feldman break; 69447f8328bSScott Feldman } 69547f8328bSScott Feldman if (err) 69647f8328bSScott Feldman return err; 69747f8328bSScott Feldman } 69847f8328bSScott Feldman 69947f8328bSScott Feldman return 0; 70047f8328bSScott Feldman } 70147f8328bSScott Feldman 70247f8328bSScott Feldman static int switchdev_port_br_afspec(struct net_device *dev, 70347f8328bSScott Feldman struct nlattr *afspec, 70447f8328bSScott Feldman int (*f)(struct net_device *dev, 70547f8328bSScott Feldman struct switchdev_obj *obj)) 70647f8328bSScott Feldman { 70747f8328bSScott Feldman struct nlattr *attr; 70847f8328bSScott Feldman struct bridge_vlan_info *vinfo; 70947f8328bSScott Feldman struct switchdev_obj obj = { 71047f8328bSScott Feldman .id = SWITCHDEV_OBJ_PORT_VLAN, 71147f8328bSScott Feldman }; 71242275bd8SScott Feldman struct switchdev_obj_vlan *vlan = &obj.u.vlan; 71347f8328bSScott Feldman int rem; 71447f8328bSScott Feldman int err; 71547f8328bSScott Feldman 71647f8328bSScott Feldman nla_for_each_nested(attr, afspec, rem) { 71747f8328bSScott Feldman if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) 71847f8328bSScott Feldman continue; 71947f8328bSScott Feldman if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 72047f8328bSScott Feldman return -EINVAL; 72147f8328bSScott Feldman vinfo = nla_data(attr); 72242275bd8SScott Feldman vlan->flags = vinfo->flags; 72347f8328bSScott Feldman if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 7243e3a78b4SScott Feldman if (vlan->vid_begin) 72547f8328bSScott Feldman return -EINVAL; 7263e3a78b4SScott Feldman vlan->vid_begin = vinfo->vid; 72747f8328bSScott Feldman } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) { 7283e3a78b4SScott Feldman if (!vlan->vid_begin) 72947f8328bSScott Feldman return -EINVAL; 73042275bd8SScott Feldman vlan->vid_end = vinfo->vid; 7313e3a78b4SScott Feldman if (vlan->vid_end <= vlan->vid_begin) 73247f8328bSScott Feldman return -EINVAL; 73347f8328bSScott Feldman err = f(dev, &obj); 73447f8328bSScott Feldman if (err) 73547f8328bSScott Feldman return err; 73642275bd8SScott Feldman memset(vlan, 0, sizeof(*vlan)); 73747f8328bSScott Feldman } else { 7383e3a78b4SScott Feldman if (vlan->vid_begin) 73947f8328bSScott Feldman return -EINVAL; 7403e3a78b4SScott Feldman vlan->vid_begin = vinfo->vid; 74142275bd8SScott Feldman vlan->vid_end = vinfo->vid; 74247f8328bSScott Feldman err = f(dev, &obj); 74347f8328bSScott Feldman if (err) 74447f8328bSScott Feldman return err; 74542275bd8SScott Feldman memset(vlan, 0, sizeof(*vlan)); 74647f8328bSScott Feldman } 74747f8328bSScott Feldman } 74847f8328bSScott Feldman 74947f8328bSScott Feldman return 0; 75047f8328bSScott Feldman } 75147f8328bSScott Feldman 7528a44dbb2SRoopa Prabhu /** 75347f8328bSScott Feldman * switchdev_port_bridge_setlink - Set bridge port attributes 7548a44dbb2SRoopa Prabhu * 7558a44dbb2SRoopa Prabhu * @dev: port device 75647f8328bSScott Feldman * @nlh: netlink header 75747f8328bSScott Feldman * @flags: netlink flags 7588a44dbb2SRoopa Prabhu * 75947f8328bSScott Feldman * Called for SELF on rtnl_bridge_setlink to set bridge port 76047f8328bSScott Feldman * attributes. 7618a44dbb2SRoopa Prabhu */ 762ebb9a03aSJiri Pirko int switchdev_port_bridge_setlink(struct net_device *dev, 7638a44dbb2SRoopa Prabhu struct nlmsghdr *nlh, u16 flags) 7648a44dbb2SRoopa Prabhu { 76547f8328bSScott Feldman struct nlattr *protinfo; 76647f8328bSScott Feldman struct nlattr *afspec; 76747f8328bSScott Feldman int err = 0; 7688a44dbb2SRoopa Prabhu 76947f8328bSScott Feldman protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 77047f8328bSScott Feldman IFLA_PROTINFO); 77147f8328bSScott Feldman if (protinfo) { 77247f8328bSScott Feldman err = switchdev_port_br_setlink_protinfo(dev, protinfo); 77347f8328bSScott Feldman if (err) 77447f8328bSScott Feldman return err; 77547f8328bSScott Feldman } 7768a44dbb2SRoopa Prabhu 77747f8328bSScott Feldman afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 77847f8328bSScott Feldman IFLA_AF_SPEC); 77947f8328bSScott Feldman if (afspec) 78047f8328bSScott Feldman err = switchdev_port_br_afspec(dev, afspec, 78147f8328bSScott Feldman switchdev_port_obj_add); 7828a44dbb2SRoopa Prabhu 78347f8328bSScott Feldman return err; 7848a44dbb2SRoopa Prabhu } 785ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink); 7868a44dbb2SRoopa Prabhu 7878a44dbb2SRoopa Prabhu /** 7885c34e022SScott Feldman * switchdev_port_bridge_dellink - Set bridge port attributes 7898a44dbb2SRoopa Prabhu * 7908a44dbb2SRoopa Prabhu * @dev: port device 7915c34e022SScott Feldman * @nlh: netlink header 7925c34e022SScott Feldman * @flags: netlink flags 7938a44dbb2SRoopa Prabhu * 7945c34e022SScott Feldman * Called for SELF on rtnl_bridge_dellink to set bridge port 7955c34e022SScott Feldman * attributes. 7968a44dbb2SRoopa Prabhu */ 797ebb9a03aSJiri Pirko int switchdev_port_bridge_dellink(struct net_device *dev, 7988a44dbb2SRoopa Prabhu struct nlmsghdr *nlh, u16 flags) 7998a44dbb2SRoopa Prabhu { 8005c34e022SScott Feldman struct nlattr *afspec; 8018a44dbb2SRoopa Prabhu 8025c34e022SScott Feldman afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 8035c34e022SScott Feldman IFLA_AF_SPEC); 8045c34e022SScott Feldman if (afspec) 8055c34e022SScott Feldman return switchdev_port_br_afspec(dev, afspec, 8065c34e022SScott Feldman switchdev_port_obj_del); 8075c34e022SScott Feldman 8088a44dbb2SRoopa Prabhu return 0; 8098a44dbb2SRoopa Prabhu } 810ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink); 8118a44dbb2SRoopa Prabhu 81245d4122cSSamudrala, Sridhar /** 81345d4122cSSamudrala, Sridhar * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port 81445d4122cSSamudrala, Sridhar * 81545d4122cSSamudrala, Sridhar * @ndmsg: netlink hdr 81645d4122cSSamudrala, Sridhar * @nlattr: netlink attributes 81745d4122cSSamudrala, Sridhar * @dev: port device 81845d4122cSSamudrala, Sridhar * @addr: MAC address to add 81945d4122cSSamudrala, Sridhar * @vid: VLAN to add 82045d4122cSSamudrala, Sridhar * 82145d4122cSSamudrala, Sridhar * Add FDB entry to switch device. 82245d4122cSSamudrala, Sridhar */ 82345d4122cSSamudrala, Sridhar int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 82445d4122cSSamudrala, Sridhar struct net_device *dev, const unsigned char *addr, 82545d4122cSSamudrala, Sridhar u16 vid, u16 nlm_flags) 82645d4122cSSamudrala, Sridhar { 82745d4122cSSamudrala, Sridhar struct switchdev_obj obj = { 82845d4122cSSamudrala, Sridhar .id = SWITCHDEV_OBJ_PORT_FDB, 82945d4122cSSamudrala, Sridhar .u.fdb = { 830cdf09697SDavid S. Miller .addr = addr, 83145d4122cSSamudrala, Sridhar .vid = vid, 83245d4122cSSamudrala, Sridhar }, 83345d4122cSSamudrala, Sridhar }; 83445d4122cSSamudrala, Sridhar 83545d4122cSSamudrala, Sridhar return switchdev_port_obj_add(dev, &obj); 83645d4122cSSamudrala, Sridhar } 83745d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_add); 83845d4122cSSamudrala, Sridhar 83945d4122cSSamudrala, Sridhar /** 84045d4122cSSamudrala, Sridhar * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port 84145d4122cSSamudrala, Sridhar * 84245d4122cSSamudrala, Sridhar * @ndmsg: netlink hdr 84345d4122cSSamudrala, Sridhar * @nlattr: netlink attributes 84445d4122cSSamudrala, Sridhar * @dev: port device 84545d4122cSSamudrala, Sridhar * @addr: MAC address to delete 84645d4122cSSamudrala, Sridhar * @vid: VLAN to delete 84745d4122cSSamudrala, Sridhar * 84845d4122cSSamudrala, Sridhar * Delete FDB entry from switch device. 84945d4122cSSamudrala, Sridhar */ 85045d4122cSSamudrala, Sridhar int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], 85145d4122cSSamudrala, Sridhar struct net_device *dev, const unsigned char *addr, 85245d4122cSSamudrala, Sridhar u16 vid) 85345d4122cSSamudrala, Sridhar { 85445d4122cSSamudrala, Sridhar struct switchdev_obj obj = { 85545d4122cSSamudrala, Sridhar .id = SWITCHDEV_OBJ_PORT_FDB, 85645d4122cSSamudrala, Sridhar .u.fdb = { 857cdf09697SDavid S. Miller .addr = addr, 85845d4122cSSamudrala, Sridhar .vid = vid, 85945d4122cSSamudrala, Sridhar }, 86045d4122cSSamudrala, Sridhar }; 86145d4122cSSamudrala, Sridhar 86245d4122cSSamudrala, Sridhar return switchdev_port_obj_del(dev, &obj); 86345d4122cSSamudrala, Sridhar } 86445d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); 86545d4122cSSamudrala, Sridhar 86645d4122cSSamudrala, Sridhar struct switchdev_fdb_dump { 86745d4122cSSamudrala, Sridhar struct switchdev_obj obj; 86845d4122cSSamudrala, Sridhar struct sk_buff *skb; 86945d4122cSSamudrala, Sridhar struct netlink_callback *cb; 87045d4122cSSamudrala, Sridhar int idx; 87145d4122cSSamudrala, Sridhar }; 87245d4122cSSamudrala, Sridhar 87345d4122cSSamudrala, Sridhar static int switchdev_port_fdb_dump_cb(struct net_device *dev, 87445d4122cSSamudrala, Sridhar struct switchdev_obj *obj) 87545d4122cSSamudrala, Sridhar { 87645d4122cSSamudrala, Sridhar struct switchdev_fdb_dump *dump = 87745d4122cSSamudrala, Sridhar container_of(obj, struct switchdev_fdb_dump, obj); 87845d4122cSSamudrala, Sridhar u32 portid = NETLINK_CB(dump->cb->skb).portid; 87945d4122cSSamudrala, Sridhar u32 seq = dump->cb->nlh->nlmsg_seq; 88045d4122cSSamudrala, Sridhar struct nlmsghdr *nlh; 88145d4122cSSamudrala, Sridhar struct ndmsg *ndm; 88245d4122cSSamudrala, Sridhar 88345d4122cSSamudrala, Sridhar if (dump->idx < dump->cb->args[0]) 88445d4122cSSamudrala, Sridhar goto skip; 88545d4122cSSamudrala, Sridhar 88645d4122cSSamudrala, Sridhar nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, 88745d4122cSSamudrala, Sridhar sizeof(*ndm), NLM_F_MULTI); 88845d4122cSSamudrala, Sridhar if (!nlh) 88945d4122cSSamudrala, Sridhar return -EMSGSIZE; 89045d4122cSSamudrala, Sridhar 89145d4122cSSamudrala, Sridhar ndm = nlmsg_data(nlh); 89245d4122cSSamudrala, Sridhar ndm->ndm_family = AF_BRIDGE; 89345d4122cSSamudrala, Sridhar ndm->ndm_pad1 = 0; 89445d4122cSSamudrala, Sridhar ndm->ndm_pad2 = 0; 89545d4122cSSamudrala, Sridhar ndm->ndm_flags = NTF_SELF; 89645d4122cSSamudrala, Sridhar ndm->ndm_type = 0; 89745d4122cSSamudrala, Sridhar ndm->ndm_ifindex = dev->ifindex; 898ce80e7bcSVivien Didelot ndm->ndm_state = obj->u.fdb.ndm_state; 89945d4122cSSamudrala, Sridhar 90045d4122cSSamudrala, Sridhar if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr)) 90145d4122cSSamudrala, Sridhar goto nla_put_failure; 90245d4122cSSamudrala, Sridhar 90345d4122cSSamudrala, Sridhar if (obj->u.fdb.vid && nla_put_u16(dump->skb, NDA_VLAN, obj->u.fdb.vid)) 90445d4122cSSamudrala, Sridhar goto nla_put_failure; 90545d4122cSSamudrala, Sridhar 90645d4122cSSamudrala, Sridhar nlmsg_end(dump->skb, nlh); 90745d4122cSSamudrala, Sridhar 90845d4122cSSamudrala, Sridhar skip: 90945d4122cSSamudrala, Sridhar dump->idx++; 91045d4122cSSamudrala, Sridhar return 0; 91145d4122cSSamudrala, Sridhar 91245d4122cSSamudrala, Sridhar nla_put_failure: 91345d4122cSSamudrala, Sridhar nlmsg_cancel(dump->skb, nlh); 91445d4122cSSamudrala, Sridhar return -EMSGSIZE; 91545d4122cSSamudrala, Sridhar } 91645d4122cSSamudrala, Sridhar 91745d4122cSSamudrala, Sridhar /** 91845d4122cSSamudrala, Sridhar * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries 91945d4122cSSamudrala, Sridhar * 92045d4122cSSamudrala, Sridhar * @skb: netlink skb 92145d4122cSSamudrala, Sridhar * @cb: netlink callback 92245d4122cSSamudrala, Sridhar * @dev: port device 92345d4122cSSamudrala, Sridhar * @filter_dev: filter device 92445d4122cSSamudrala, Sridhar * @idx: 92545d4122cSSamudrala, Sridhar * 92645d4122cSSamudrala, Sridhar * Delete FDB entry from switch device. 92745d4122cSSamudrala, Sridhar */ 92845d4122cSSamudrala, Sridhar int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 92945d4122cSSamudrala, Sridhar struct net_device *dev, 93045d4122cSSamudrala, Sridhar struct net_device *filter_dev, int idx) 93145d4122cSSamudrala, Sridhar { 93245d4122cSSamudrala, Sridhar struct switchdev_fdb_dump dump = { 93345d4122cSSamudrala, Sridhar .obj = { 93445d4122cSSamudrala, Sridhar .id = SWITCHDEV_OBJ_PORT_FDB, 93545d4122cSSamudrala, Sridhar .cb = switchdev_port_fdb_dump_cb, 93645d4122cSSamudrala, Sridhar }, 93745d4122cSSamudrala, Sridhar .skb = skb, 93845d4122cSSamudrala, Sridhar .cb = cb, 93945d4122cSSamudrala, Sridhar .idx = idx, 94045d4122cSSamudrala, Sridhar }; 94145d4122cSSamudrala, Sridhar 9420890cf6cSJiri Pirko switchdev_port_obj_dump(dev, &dump.obj); 94345d4122cSSamudrala, Sridhar return dump.idx; 94445d4122cSSamudrala, Sridhar } 94545d4122cSSamudrala, Sridhar EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); 94645d4122cSSamudrala, Sridhar 947ebb9a03aSJiri Pirko static struct net_device *switchdev_get_lowest_dev(struct net_device *dev) 948b5d6fbdeSScott Feldman { 9499d47c0a2SJiri Pirko const struct switchdev_ops *ops = dev->switchdev_ops; 950b5d6fbdeSScott Feldman struct net_device *lower_dev; 951b5d6fbdeSScott Feldman struct net_device *port_dev; 952b5d6fbdeSScott Feldman struct list_head *iter; 953b5d6fbdeSScott Feldman 954b5d6fbdeSScott Feldman /* Recusively search down until we find a sw port dev. 955f8e20a9fSScott Feldman * (A sw port dev supports switchdev_port_attr_get). 956b5d6fbdeSScott Feldman */ 957b5d6fbdeSScott Feldman 958f8e20a9fSScott Feldman if (ops && ops->switchdev_port_attr_get) 959b5d6fbdeSScott Feldman return dev; 960b5d6fbdeSScott Feldman 961b5d6fbdeSScott Feldman netdev_for_each_lower_dev(dev, lower_dev, iter) { 962ebb9a03aSJiri Pirko port_dev = switchdev_get_lowest_dev(lower_dev); 963b5d6fbdeSScott Feldman if (port_dev) 964b5d6fbdeSScott Feldman return port_dev; 965b5d6fbdeSScott Feldman } 966b5d6fbdeSScott Feldman 967b5d6fbdeSScott Feldman return NULL; 968b5d6fbdeSScott Feldman } 969b5d6fbdeSScott Feldman 970ebb9a03aSJiri Pirko static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) 971b5d6fbdeSScott Feldman { 972f8e20a9fSScott Feldman struct switchdev_attr attr = { 973f8e20a9fSScott Feldman .id = SWITCHDEV_ATTR_PORT_PARENT_ID, 974f8e20a9fSScott Feldman }; 975f8e20a9fSScott Feldman struct switchdev_attr prev_attr; 976b5d6fbdeSScott Feldman struct net_device *dev = NULL; 977b5d6fbdeSScott Feldman int nhsel; 978b5d6fbdeSScott Feldman 979b5d6fbdeSScott Feldman /* For this route, all nexthop devs must be on the same switch. */ 980b5d6fbdeSScott Feldman 981b5d6fbdeSScott Feldman for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { 982b5d6fbdeSScott Feldman const struct fib_nh *nh = &fi->fib_nh[nhsel]; 983b5d6fbdeSScott Feldman 984b5d6fbdeSScott Feldman if (!nh->nh_dev) 985b5d6fbdeSScott Feldman return NULL; 986b5d6fbdeSScott Feldman 987ebb9a03aSJiri Pirko dev = switchdev_get_lowest_dev(nh->nh_dev); 988b5d6fbdeSScott Feldman if (!dev) 989b5d6fbdeSScott Feldman return NULL; 990b5d6fbdeSScott Feldman 991f8e20a9fSScott Feldman if (switchdev_port_attr_get(dev, &attr)) 992b5d6fbdeSScott Feldman return NULL; 993b5d6fbdeSScott Feldman 994d754f98bSScott Feldman if (nhsel > 0 && 995d754f98bSScott Feldman !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid)) 996b5d6fbdeSScott Feldman return NULL; 997b5d6fbdeSScott Feldman 998f8e20a9fSScott Feldman prev_attr = attr; 999b5d6fbdeSScott Feldman } 1000b5d6fbdeSScott Feldman 1001b5d6fbdeSScott Feldman return dev; 1002b5d6fbdeSScott Feldman } 1003b5d6fbdeSScott Feldman 10045e8d9049SScott Feldman /** 10057616dcbbSScott Feldman * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry 10065e8d9049SScott Feldman * 10075e8d9049SScott Feldman * @dst: route's IPv4 destination address 10085e8d9049SScott Feldman * @dst_len: destination address length (prefix length) 10095e8d9049SScott Feldman * @fi: route FIB info structure 10105e8d9049SScott Feldman * @tos: route TOS 10115e8d9049SScott Feldman * @type: route type 1012f8f21471SScott Feldman * @nlflags: netlink flags passed in (NLM_F_*) 10135e8d9049SScott Feldman * @tb_id: route table ID 10145e8d9049SScott Feldman * 10157616dcbbSScott Feldman * Add/modify switch IPv4 route entry. 10165e8d9049SScott Feldman */ 1017ebb9a03aSJiri Pirko int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, 1018f8f21471SScott Feldman u8 tos, u8 type, u32 nlflags, u32 tb_id) 10195e8d9049SScott Feldman { 102058c2cb16SScott Feldman struct switchdev_obj fib_obj = { 102158c2cb16SScott Feldman .id = SWITCHDEV_OBJ_IPV4_FIB, 102242275bd8SScott Feldman .u.ipv4_fib = { 10237a7ee531SScott Feldman .dst = dst, 102458c2cb16SScott Feldman .dst_len = dst_len, 102558c2cb16SScott Feldman .fi = fi, 102658c2cb16SScott Feldman .tos = tos, 102758c2cb16SScott Feldman .type = type, 102858c2cb16SScott Feldman .nlflags = nlflags, 102958c2cb16SScott Feldman .tb_id = tb_id, 103058c2cb16SScott Feldman }, 103158c2cb16SScott Feldman }; 1032b5d6fbdeSScott Feldman struct net_device *dev; 1033b5d6fbdeSScott Feldman int err = 0; 1034b5d6fbdeSScott Feldman 10358e05fd71SScott Feldman /* Don't offload route if using custom ip rules or if 10368e05fd71SScott Feldman * IPv4 FIB offloading has been disabled completely. 10378e05fd71SScott Feldman */ 10388e05fd71SScott Feldman 1039e1315db1SScott Feldman #ifdef CONFIG_IP_MULTIPLE_TABLES 1040e1315db1SScott Feldman if (fi->fib_net->ipv4.fib_has_custom_rules) 1041e1315db1SScott Feldman return 0; 1042e1315db1SScott Feldman #endif 1043e1315db1SScott Feldman 1044e1315db1SScott Feldman if (fi->fib_net->ipv4.fib_offload_disabled) 1045104616e7SScott Feldman return 0; 1046104616e7SScott Feldman 1047ebb9a03aSJiri Pirko dev = switchdev_get_dev_by_nhs(fi); 1048b5d6fbdeSScott Feldman if (!dev) 10495e8d9049SScott Feldman return 0; 1050b5d6fbdeSScott Feldman 105158c2cb16SScott Feldman err = switchdev_port_obj_add(dev, &fib_obj); 1052b5d6fbdeSScott Feldman if (!err) 1053eea39946SRoopa Prabhu fi->fib_flags |= RTNH_F_OFFLOAD; 1054b5d6fbdeSScott Feldman 1055af201f72SScott Feldman return err == -EOPNOTSUPP ? 0 : err; 10565e8d9049SScott Feldman } 1057ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add); 10585e8d9049SScott Feldman 10595e8d9049SScott Feldman /** 1060ebb9a03aSJiri Pirko * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch 10615e8d9049SScott Feldman * 10625e8d9049SScott Feldman * @dst: route's IPv4 destination address 10635e8d9049SScott Feldman * @dst_len: destination address length (prefix length) 10645e8d9049SScott Feldman * @fi: route FIB info structure 10655e8d9049SScott Feldman * @tos: route TOS 10665e8d9049SScott Feldman * @type: route type 10675e8d9049SScott Feldman * @tb_id: route table ID 10685e8d9049SScott Feldman * 10695e8d9049SScott Feldman * Delete IPv4 route entry from switch device. 10705e8d9049SScott Feldman */ 1071ebb9a03aSJiri Pirko int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, 10725e8d9049SScott Feldman u8 tos, u8 type, u32 tb_id) 10735e8d9049SScott Feldman { 107458c2cb16SScott Feldman struct switchdev_obj fib_obj = { 107558c2cb16SScott Feldman .id = SWITCHDEV_OBJ_IPV4_FIB, 107642275bd8SScott Feldman .u.ipv4_fib = { 10777a7ee531SScott Feldman .dst = dst, 107858c2cb16SScott Feldman .dst_len = dst_len, 107958c2cb16SScott Feldman .fi = fi, 108058c2cb16SScott Feldman .tos = tos, 108158c2cb16SScott Feldman .type = type, 108258c2cb16SScott Feldman .nlflags = 0, 108358c2cb16SScott Feldman .tb_id = tb_id, 108458c2cb16SScott Feldman }, 108558c2cb16SScott Feldman }; 1086b5d6fbdeSScott Feldman struct net_device *dev; 1087b5d6fbdeSScott Feldman int err = 0; 1088b5d6fbdeSScott Feldman 1089eea39946SRoopa Prabhu if (!(fi->fib_flags & RTNH_F_OFFLOAD)) 10905e8d9049SScott Feldman return 0; 1091b5d6fbdeSScott Feldman 1092ebb9a03aSJiri Pirko dev = switchdev_get_dev_by_nhs(fi); 1093b5d6fbdeSScott Feldman if (!dev) 1094b5d6fbdeSScott Feldman return 0; 1095b5d6fbdeSScott Feldman 109658c2cb16SScott Feldman err = switchdev_port_obj_del(dev, &fib_obj); 1097b5d6fbdeSScott Feldman if (!err) 1098eea39946SRoopa Prabhu fi->fib_flags &= ~RTNH_F_OFFLOAD; 1099b5d6fbdeSScott Feldman 1100af201f72SScott Feldman return err == -EOPNOTSUPP ? 0 : err; 11015e8d9049SScott Feldman } 1102ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del); 11038e05fd71SScott Feldman 11048e05fd71SScott Feldman /** 1105ebb9a03aSJiri Pirko * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation 11068e05fd71SScott Feldman * 11078e05fd71SScott Feldman * @fi: route FIB info structure 11088e05fd71SScott Feldman */ 1109ebb9a03aSJiri Pirko void switchdev_fib_ipv4_abort(struct fib_info *fi) 11108e05fd71SScott Feldman { 11118e05fd71SScott Feldman /* There was a problem installing this route to the offload 11128e05fd71SScott Feldman * device. For now, until we come up with more refined 11138e05fd71SScott Feldman * policy handling, abruptly end IPv4 fib offloading for 11148e05fd71SScott Feldman * for entire net by flushing offload device(s) of all 11158e05fd71SScott Feldman * IPv4 routes, and mark IPv4 fib offloading broken from 11168e05fd71SScott Feldman * this point forward. 11178e05fd71SScott Feldman */ 11188e05fd71SScott Feldman 11198e05fd71SScott Feldman fib_flush_external(fi->fib_net); 11208e05fd71SScott Feldman fi->fib_net->ipv4.fib_offload_disabled = true; 11218e05fd71SScott Feldman } 1122ebb9a03aSJiri Pirko EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort); 11231a3b2ec9SScott Feldman 11241a3b2ec9SScott Feldman static bool switchdev_port_same_parent_id(struct net_device *a, 11251a3b2ec9SScott Feldman struct net_device *b) 11261a3b2ec9SScott Feldman { 11271a3b2ec9SScott Feldman struct switchdev_attr a_attr = { 11281a3b2ec9SScott Feldman .id = SWITCHDEV_ATTR_PORT_PARENT_ID, 11291a3b2ec9SScott Feldman .flags = SWITCHDEV_F_NO_RECURSE, 11301a3b2ec9SScott Feldman }; 11311a3b2ec9SScott Feldman struct switchdev_attr b_attr = { 11321a3b2ec9SScott Feldman .id = SWITCHDEV_ATTR_PORT_PARENT_ID, 11331a3b2ec9SScott Feldman .flags = SWITCHDEV_F_NO_RECURSE, 11341a3b2ec9SScott Feldman }; 11351a3b2ec9SScott Feldman 11361a3b2ec9SScott Feldman if (switchdev_port_attr_get(a, &a_attr) || 11371a3b2ec9SScott Feldman switchdev_port_attr_get(b, &b_attr)) 11381a3b2ec9SScott Feldman return false; 11391a3b2ec9SScott Feldman 11401a3b2ec9SScott Feldman return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid); 11411a3b2ec9SScott Feldman } 11421a3b2ec9SScott Feldman 11431a3b2ec9SScott Feldman static u32 switchdev_port_fwd_mark_get(struct net_device *dev, 11441a3b2ec9SScott Feldman struct net_device *group_dev) 11451a3b2ec9SScott Feldman { 11461a3b2ec9SScott Feldman struct net_device *lower_dev; 11471a3b2ec9SScott Feldman struct list_head *iter; 11481a3b2ec9SScott Feldman 11491a3b2ec9SScott Feldman netdev_for_each_lower_dev(group_dev, lower_dev, iter) { 11501a3b2ec9SScott Feldman if (lower_dev == dev) 11511a3b2ec9SScott Feldman continue; 11521a3b2ec9SScott Feldman if (switchdev_port_same_parent_id(dev, lower_dev)) 11531a3b2ec9SScott Feldman return lower_dev->offload_fwd_mark; 11541a3b2ec9SScott Feldman return switchdev_port_fwd_mark_get(dev, lower_dev); 11551a3b2ec9SScott Feldman } 11561a3b2ec9SScott Feldman 11571a3b2ec9SScott Feldman return dev->ifindex; 11581a3b2ec9SScott Feldman } 11591a3b2ec9SScott Feldman 11601a3b2ec9SScott Feldman static void switchdev_port_fwd_mark_reset(struct net_device *group_dev, 11611a3b2ec9SScott Feldman u32 old_mark, u32 *reset_mark) 11621a3b2ec9SScott Feldman { 11631a3b2ec9SScott Feldman struct net_device *lower_dev; 11641a3b2ec9SScott Feldman struct list_head *iter; 11651a3b2ec9SScott Feldman 11661a3b2ec9SScott Feldman netdev_for_each_lower_dev(group_dev, lower_dev, iter) { 11671a3b2ec9SScott Feldman if (lower_dev->offload_fwd_mark == old_mark) { 11681a3b2ec9SScott Feldman if (!*reset_mark) 11691a3b2ec9SScott Feldman *reset_mark = lower_dev->ifindex; 11701a3b2ec9SScott Feldman lower_dev->offload_fwd_mark = *reset_mark; 11711a3b2ec9SScott Feldman } 11721a3b2ec9SScott Feldman switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark); 11731a3b2ec9SScott Feldman } 11741a3b2ec9SScott Feldman } 11751a3b2ec9SScott Feldman 11761a3b2ec9SScott Feldman /** 11771a3b2ec9SScott Feldman * switchdev_port_fwd_mark_set - Set port offload forwarding mark 11781a3b2ec9SScott Feldman * 11791a3b2ec9SScott Feldman * @dev: port device 11801a3b2ec9SScott Feldman * @group_dev: containing device 11811a3b2ec9SScott Feldman * @joining: true if dev is joining group; false if leaving group 11821a3b2ec9SScott Feldman * 11831a3b2ec9SScott Feldman * An ungrouped port's offload mark is just its ifindex. A grouped 11841a3b2ec9SScott Feldman * port's (member of a bridge, for example) offload mark is the ifindex 11851a3b2ec9SScott Feldman * of one of the ports in the group with the same parent (switch) ID. 11861a3b2ec9SScott Feldman * Ports on the same device in the same group will have the same mark. 11871a3b2ec9SScott Feldman * 11881a3b2ec9SScott Feldman * Example: 11891a3b2ec9SScott Feldman * 11901a3b2ec9SScott Feldman * br0 ifindex=9 11911a3b2ec9SScott Feldman * sw1p1 ifindex=2 mark=2 11921a3b2ec9SScott Feldman * sw1p2 ifindex=3 mark=2 11931a3b2ec9SScott Feldman * sw2p1 ifindex=4 mark=5 11941a3b2ec9SScott Feldman * sw2p2 ifindex=5 mark=5 11951a3b2ec9SScott Feldman * 11961a3b2ec9SScott Feldman * If sw2p2 leaves the bridge, we'll have: 11971a3b2ec9SScott Feldman * 11981a3b2ec9SScott Feldman * br0 ifindex=9 11991a3b2ec9SScott Feldman * sw1p1 ifindex=2 mark=2 12001a3b2ec9SScott Feldman * sw1p2 ifindex=3 mark=2 12011a3b2ec9SScott Feldman * sw2p1 ifindex=4 mark=4 12021a3b2ec9SScott Feldman * sw2p2 ifindex=5 mark=5 12031a3b2ec9SScott Feldman */ 12041a3b2ec9SScott Feldman void switchdev_port_fwd_mark_set(struct net_device *dev, 12051a3b2ec9SScott Feldman struct net_device *group_dev, 12061a3b2ec9SScott Feldman bool joining) 12071a3b2ec9SScott Feldman { 12081a3b2ec9SScott Feldman u32 mark = dev->ifindex; 12091a3b2ec9SScott Feldman u32 reset_mark = 0; 12101a3b2ec9SScott Feldman 12111a3b2ec9SScott Feldman if (group_dev && joining) { 12121a3b2ec9SScott Feldman mark = switchdev_port_fwd_mark_get(dev, group_dev); 12131a3b2ec9SScott Feldman } else if (group_dev && !joining) { 12141a3b2ec9SScott Feldman if (dev->offload_fwd_mark == mark) 12151a3b2ec9SScott Feldman /* Ohoh, this port was the mark reference port, 12161a3b2ec9SScott Feldman * but it's leaving the group, so reset the 12171a3b2ec9SScott Feldman * mark for the remaining ports in the group. 12181a3b2ec9SScott Feldman */ 12191a3b2ec9SScott Feldman switchdev_port_fwd_mark_reset(group_dev, mark, 12201a3b2ec9SScott Feldman &reset_mark); 12211a3b2ec9SScott Feldman } 12221a3b2ec9SScott Feldman 12231a3b2ec9SScott Feldman dev->offload_fwd_mark = mark; 12241a3b2ec9SScott Feldman } 12251a3b2ec9SScott Feldman EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set); 1226