1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/switchdev/switchdev.c - Switch device API 4 * Copyright (c) 2014-2015 Jiri Pirko <[email protected]> 5 * Copyright (c) 2014-2015 Scott Feldman <[email protected]> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/init.h> 11 #include <linux/mutex.h> 12 #include <linux/notifier.h> 13 #include <linux/netdevice.h> 14 #include <linux/etherdevice.h> 15 #include <linux/if_bridge.h> 16 #include <linux/list.h> 17 #include <linux/workqueue.h> 18 #include <linux/if_vlan.h> 19 #include <linux/rtnetlink.h> 20 #include <net/switchdev.h> 21 22 static LIST_HEAD(deferred); 23 static DEFINE_SPINLOCK(deferred_lock); 24 25 typedef void switchdev_deferred_func_t(struct net_device *dev, 26 const void *data); 27 28 struct switchdev_deferred_item { 29 struct list_head list; 30 struct net_device *dev; 31 switchdev_deferred_func_t *func; 32 unsigned long data[]; 33 }; 34 35 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) 36 { 37 struct switchdev_deferred_item *dfitem; 38 39 spin_lock_bh(&deferred_lock); 40 if (list_empty(&deferred)) { 41 dfitem = NULL; 42 goto unlock; 43 } 44 dfitem = list_first_entry(&deferred, 45 struct switchdev_deferred_item, list); 46 list_del(&dfitem->list); 47 unlock: 48 spin_unlock_bh(&deferred_lock); 49 return dfitem; 50 } 51 52 /** 53 * switchdev_deferred_process - Process ops in deferred queue 54 * 55 * Called to flush the ops currently queued in deferred ops queue. 56 * rtnl_lock must be held. 57 */ 58 void switchdev_deferred_process(void) 59 { 60 struct switchdev_deferred_item *dfitem; 61 62 ASSERT_RTNL(); 63 64 while ((dfitem = switchdev_deferred_dequeue())) { 65 dfitem->func(dfitem->dev, dfitem->data); 66 dev_put(dfitem->dev); 67 kfree(dfitem); 68 } 69 } 70 EXPORT_SYMBOL_GPL(switchdev_deferred_process); 71 72 static void switchdev_deferred_process_work(struct work_struct *work) 73 { 74 rtnl_lock(); 75 switchdev_deferred_process(); 76 rtnl_unlock(); 77 } 78 79 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); 80 81 static int switchdev_deferred_enqueue(struct net_device *dev, 82 const void *data, size_t data_len, 83 switchdev_deferred_func_t *func) 84 { 85 struct switchdev_deferred_item *dfitem; 86 87 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC); 88 if (!dfitem) 89 return -ENOMEM; 90 dfitem->dev = dev; 91 dfitem->func = func; 92 memcpy(dfitem->data, data, data_len); 93 dev_hold(dev); 94 spin_lock_bh(&deferred_lock); 95 list_add_tail(&dfitem->list, &deferred); 96 spin_unlock_bh(&deferred_lock); 97 schedule_work(&deferred_process_work); 98 return 0; 99 } 100 101 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt, 102 struct net_device *dev, 103 const struct switchdev_attr *attr) 104 { 105 int err; 106 int rc; 107 108 struct switchdev_notifier_port_attr_info attr_info = { 109 .attr = attr, 110 .handled = false, 111 }; 112 113 rc = call_switchdev_blocking_notifiers(nt, dev, 114 &attr_info.info, NULL); 115 err = notifier_to_errno(rc); 116 if (err) { 117 WARN_ON(!attr_info.handled); 118 return err; 119 } 120 121 if (!attr_info.handled) 122 return -EOPNOTSUPP; 123 124 return 0; 125 } 126 127 static int switchdev_port_attr_set_now(struct net_device *dev, 128 const struct switchdev_attr *attr) 129 { 130 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr); 131 } 132 133 static void switchdev_port_attr_set_deferred(struct net_device *dev, 134 const void *data) 135 { 136 const struct switchdev_attr *attr = data; 137 int err; 138 139 err = switchdev_port_attr_set_now(dev, attr); 140 if (err && err != -EOPNOTSUPP) 141 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 142 err, attr->id); 143 if (attr->complete) 144 attr->complete(dev, err, attr->complete_priv); 145 } 146 147 static int switchdev_port_attr_set_defer(struct net_device *dev, 148 const struct switchdev_attr *attr) 149 { 150 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), 151 switchdev_port_attr_set_deferred); 152 } 153 154 /** 155 * switchdev_port_attr_set - Set port attribute 156 * 157 * @dev: port device 158 * @attr: attribute to set 159 * 160 * rtnl_lock must be held and must not be in atomic section, 161 * in case SWITCHDEV_F_DEFER flag is not set. 162 */ 163 int switchdev_port_attr_set(struct net_device *dev, 164 const struct switchdev_attr *attr) 165 { 166 if (attr->flags & SWITCHDEV_F_DEFER) 167 return switchdev_port_attr_set_defer(dev, attr); 168 ASSERT_RTNL(); 169 return switchdev_port_attr_set_now(dev, attr); 170 } 171 EXPORT_SYMBOL_GPL(switchdev_port_attr_set); 172 173 static size_t switchdev_obj_size(const struct switchdev_obj *obj) 174 { 175 switch (obj->id) { 176 case SWITCHDEV_OBJ_ID_PORT_VLAN: 177 return sizeof(struct switchdev_obj_port_vlan); 178 case SWITCHDEV_OBJ_ID_PORT_MDB: 179 return sizeof(struct switchdev_obj_port_mdb); 180 case SWITCHDEV_OBJ_ID_HOST_MDB: 181 return sizeof(struct switchdev_obj_port_mdb); 182 default: 183 BUG(); 184 } 185 return 0; 186 } 187 188 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt, 189 struct net_device *dev, 190 const struct switchdev_obj *obj, 191 struct netlink_ext_ack *extack) 192 { 193 int rc; 194 int err; 195 196 struct switchdev_notifier_port_obj_info obj_info = { 197 .obj = obj, 198 .handled = false, 199 }; 200 201 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack); 202 err = notifier_to_errno(rc); 203 if (err) { 204 WARN_ON(!obj_info.handled); 205 return err; 206 } 207 if (!obj_info.handled) 208 return -EOPNOTSUPP; 209 return 0; 210 } 211 212 static void switchdev_port_obj_add_deferred(struct net_device *dev, 213 const void *data) 214 { 215 const struct switchdev_obj *obj = data; 216 int err; 217 218 ASSERT_RTNL(); 219 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, 220 dev, obj, NULL); 221 if (err && err != -EOPNOTSUPP) 222 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 223 err, obj->id); 224 if (obj->complete) 225 obj->complete(dev, err, obj->complete_priv); 226 } 227 228 static int switchdev_port_obj_add_defer(struct net_device *dev, 229 const struct switchdev_obj *obj) 230 { 231 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 232 switchdev_port_obj_add_deferred); 233 } 234 235 /** 236 * switchdev_port_obj_add - Add port object 237 * 238 * @dev: port device 239 * @obj: object to add 240 * @extack: netlink extended ack 241 * 242 * rtnl_lock must be held and must not be in atomic section, 243 * in case SWITCHDEV_F_DEFER flag is not set. 244 */ 245 int switchdev_port_obj_add(struct net_device *dev, 246 const struct switchdev_obj *obj, 247 struct netlink_ext_ack *extack) 248 { 249 if (obj->flags & SWITCHDEV_F_DEFER) 250 return switchdev_port_obj_add_defer(dev, obj); 251 ASSERT_RTNL(); 252 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, 253 dev, obj, extack); 254 } 255 EXPORT_SYMBOL_GPL(switchdev_port_obj_add); 256 257 static int switchdev_port_obj_del_now(struct net_device *dev, 258 const struct switchdev_obj *obj) 259 { 260 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL, 261 dev, obj, NULL); 262 } 263 264 static void switchdev_port_obj_del_deferred(struct net_device *dev, 265 const void *data) 266 { 267 const struct switchdev_obj *obj = data; 268 int err; 269 270 err = switchdev_port_obj_del_now(dev, obj); 271 if (err && err != -EOPNOTSUPP) 272 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 273 err, obj->id); 274 if (obj->complete) 275 obj->complete(dev, err, obj->complete_priv); 276 } 277 278 static int switchdev_port_obj_del_defer(struct net_device *dev, 279 const struct switchdev_obj *obj) 280 { 281 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), 282 switchdev_port_obj_del_deferred); 283 } 284 285 /** 286 * switchdev_port_obj_del - Delete port object 287 * 288 * @dev: port device 289 * @obj: object to delete 290 * 291 * rtnl_lock must be held and must not be in atomic section, 292 * in case SWITCHDEV_F_DEFER flag is not set. 293 */ 294 int switchdev_port_obj_del(struct net_device *dev, 295 const struct switchdev_obj *obj) 296 { 297 if (obj->flags & SWITCHDEV_F_DEFER) 298 return switchdev_port_obj_del_defer(dev, obj); 299 ASSERT_RTNL(); 300 return switchdev_port_obj_del_now(dev, obj); 301 } 302 EXPORT_SYMBOL_GPL(switchdev_port_obj_del); 303 304 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); 305 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); 306 307 /** 308 * register_switchdev_notifier - Register notifier 309 * @nb: notifier_block 310 * 311 * Register switch device notifier. 312 */ 313 int register_switchdev_notifier(struct notifier_block *nb) 314 { 315 return atomic_notifier_chain_register(&switchdev_notif_chain, nb); 316 } 317 EXPORT_SYMBOL_GPL(register_switchdev_notifier); 318 319 /** 320 * unregister_switchdev_notifier - Unregister notifier 321 * @nb: notifier_block 322 * 323 * Unregister switch device notifier. 324 */ 325 int unregister_switchdev_notifier(struct notifier_block *nb) 326 { 327 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb); 328 } 329 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); 330 331 /** 332 * call_switchdev_notifiers - Call notifiers 333 * @val: value passed unmodified to notifier function 334 * @dev: port device 335 * @info: notifier information data 336 * @extack: netlink extended ack 337 * Call all network notifier blocks. 338 */ 339 int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 340 struct switchdev_notifier_info *info, 341 struct netlink_ext_ack *extack) 342 { 343 info->dev = dev; 344 info->extack = extack; 345 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info); 346 } 347 EXPORT_SYMBOL_GPL(call_switchdev_notifiers); 348 349 int register_switchdev_blocking_notifier(struct notifier_block *nb) 350 { 351 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; 352 353 return blocking_notifier_chain_register(chain, nb); 354 } 355 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); 356 357 int unregister_switchdev_blocking_notifier(struct notifier_block *nb) 358 { 359 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; 360 361 return blocking_notifier_chain_unregister(chain, nb); 362 } 363 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); 364 365 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, 366 struct switchdev_notifier_info *info, 367 struct netlink_ext_ack *extack) 368 { 369 info->dev = dev; 370 info->extack = extack; 371 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, 372 val, info); 373 } 374 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); 375 376 static int __switchdev_handle_port_obj_add(struct net_device *dev, 377 struct switchdev_notifier_port_obj_info *port_obj_info, 378 bool (*check_cb)(const struct net_device *dev), 379 int (*add_cb)(struct net_device *dev, 380 const struct switchdev_obj *obj, 381 struct netlink_ext_ack *extack)) 382 { 383 struct netlink_ext_ack *extack; 384 struct net_device *lower_dev; 385 struct list_head *iter; 386 int err = -EOPNOTSUPP; 387 388 extack = switchdev_notifier_info_to_extack(&port_obj_info->info); 389 390 if (check_cb(dev)) { 391 /* This flag is only checked if the return value is success. */ 392 port_obj_info->handled = true; 393 return add_cb(dev, port_obj_info->obj, extack); 394 } 395 396 /* Switch ports might be stacked under e.g. a LAG. Ignore the 397 * unsupported devices, another driver might be able to handle them. But 398 * propagate to the callers any hard errors. 399 * 400 * If the driver does its own bookkeeping of stacked ports, it's not 401 * necessary to go through this helper. 402 */ 403 netdev_for_each_lower_dev(dev, lower_dev, iter) { 404 if (netif_is_bridge_master(lower_dev)) 405 continue; 406 407 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info, 408 check_cb, add_cb); 409 if (err && err != -EOPNOTSUPP) 410 return err; 411 } 412 413 return err; 414 } 415 416 int switchdev_handle_port_obj_add(struct net_device *dev, 417 struct switchdev_notifier_port_obj_info *port_obj_info, 418 bool (*check_cb)(const struct net_device *dev), 419 int (*add_cb)(struct net_device *dev, 420 const struct switchdev_obj *obj, 421 struct netlink_ext_ack *extack)) 422 { 423 int err; 424 425 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, 426 add_cb); 427 if (err == -EOPNOTSUPP) 428 err = 0; 429 return err; 430 } 431 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add); 432 433 static int __switchdev_handle_port_obj_del(struct net_device *dev, 434 struct switchdev_notifier_port_obj_info *port_obj_info, 435 bool (*check_cb)(const struct net_device *dev), 436 int (*del_cb)(struct net_device *dev, 437 const struct switchdev_obj *obj)) 438 { 439 struct net_device *lower_dev; 440 struct list_head *iter; 441 int err = -EOPNOTSUPP; 442 443 if (check_cb(dev)) { 444 /* This flag is only checked if the return value is success. */ 445 port_obj_info->handled = true; 446 return del_cb(dev, port_obj_info->obj); 447 } 448 449 /* Switch ports might be stacked under e.g. a LAG. Ignore the 450 * unsupported devices, another driver might be able to handle them. But 451 * propagate to the callers any hard errors. 452 * 453 * If the driver does its own bookkeeping of stacked ports, it's not 454 * necessary to go through this helper. 455 */ 456 netdev_for_each_lower_dev(dev, lower_dev, iter) { 457 if (netif_is_bridge_master(lower_dev)) 458 continue; 459 460 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info, 461 check_cb, del_cb); 462 if (err && err != -EOPNOTSUPP) 463 return err; 464 } 465 466 return err; 467 } 468 469 int switchdev_handle_port_obj_del(struct net_device *dev, 470 struct switchdev_notifier_port_obj_info *port_obj_info, 471 bool (*check_cb)(const struct net_device *dev), 472 int (*del_cb)(struct net_device *dev, 473 const struct switchdev_obj *obj)) 474 { 475 int err; 476 477 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, 478 del_cb); 479 if (err == -EOPNOTSUPP) 480 err = 0; 481 return err; 482 } 483 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del); 484 485 static int __switchdev_handle_port_attr_set(struct net_device *dev, 486 struct switchdev_notifier_port_attr_info *port_attr_info, 487 bool (*check_cb)(const struct net_device *dev), 488 int (*set_cb)(struct net_device *dev, 489 const struct switchdev_attr *attr)) 490 { 491 struct net_device *lower_dev; 492 struct list_head *iter; 493 int err = -EOPNOTSUPP; 494 495 if (check_cb(dev)) { 496 port_attr_info->handled = true; 497 return set_cb(dev, port_attr_info->attr); 498 } 499 500 /* Switch ports might be stacked under e.g. a LAG. Ignore the 501 * unsupported devices, another driver might be able to handle them. But 502 * propagate to the callers any hard errors. 503 * 504 * If the driver does its own bookkeeping of stacked ports, it's not 505 * necessary to go through this helper. 506 */ 507 netdev_for_each_lower_dev(dev, lower_dev, iter) { 508 if (netif_is_bridge_master(lower_dev)) 509 continue; 510 511 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info, 512 check_cb, set_cb); 513 if (err && err != -EOPNOTSUPP) 514 return err; 515 } 516 517 return err; 518 } 519 520 int switchdev_handle_port_attr_set(struct net_device *dev, 521 struct switchdev_notifier_port_attr_info *port_attr_info, 522 bool (*check_cb)(const struct net_device *dev), 523 int (*set_cb)(struct net_device *dev, 524 const struct switchdev_attr *attr)) 525 { 526 int err; 527 528 err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb, 529 set_cb); 530 if (err == -EOPNOTSUPP) 531 err = 0; 532 return err; 533 } 534 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set); 535