1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/core/dev_addr_lists.c - Functions for handling net device lists 4 * Copyright (c) 2010 Jiri Pirko <[email protected]> 5 * 6 * This file contains functions for working with unicast, multicast and device 7 * addresses lists. 8 */ 9 10 #include <linux/netdevice.h> 11 #include <linux/rtnetlink.h> 12 #include <linux/export.h> 13 #include <linux/list.h> 14 15 #include "dev.h" 16 17 /* 18 * General list handling functions 19 */ 20 21 static int __hw_addr_insert(struct netdev_hw_addr_list *list, 22 struct netdev_hw_addr *new, int addr_len) 23 { 24 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL; 25 struct netdev_hw_addr *ha; 26 27 while (*ins_point) { 28 int diff; 29 30 ha = rb_entry(*ins_point, struct netdev_hw_addr, node); 31 diff = memcmp(new->addr, ha->addr, addr_len); 32 if (diff == 0) 33 diff = memcmp(&new->type, &ha->type, sizeof(new->type)); 34 35 parent = *ins_point; 36 if (diff < 0) 37 ins_point = &parent->rb_left; 38 else if (diff > 0) 39 ins_point = &parent->rb_right; 40 else 41 return -EEXIST; 42 } 43 44 rb_link_node_rcu(&new->node, parent, ins_point); 45 rb_insert_color(&new->node, &list->tree); 46 47 return 0; 48 } 49 50 static struct netdev_hw_addr* 51 __hw_addr_create(const unsigned char *addr, int addr_len, 52 unsigned char addr_type, bool global, bool sync) 53 { 54 struct netdev_hw_addr *ha; 55 int alloc_size; 56 57 alloc_size = sizeof(*ha); 58 if (alloc_size < L1_CACHE_BYTES) 59 alloc_size = L1_CACHE_BYTES; 60 ha = kmalloc(alloc_size, GFP_ATOMIC); 61 if (!ha) 62 return NULL; 63 memcpy(ha->addr, addr, addr_len); 64 ha->type = addr_type; 65 ha->refcount = 1; 66 ha->global_use = global; 67 ha->synced = sync ? 1 : 0; 68 ha->sync_cnt = 0; 69 70 return ha; 71 } 72 73 static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, 74 const unsigned char *addr, int addr_len, 75 unsigned char addr_type, bool global, bool sync, 76 int sync_count, bool exclusive) 77 { 78 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL; 79 struct netdev_hw_addr *ha; 80 81 if (addr_len > MAX_ADDR_LEN) 82 return -EINVAL; 83 84 while (*ins_point) { 85 int diff; 86 87 ha = rb_entry(*ins_point, struct netdev_hw_addr, node); 88 diff = memcmp(addr, ha->addr, addr_len); 89 if (diff == 0) 90 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type)); 91 92 parent = *ins_point; 93 if (diff < 0) { 94 ins_point = &parent->rb_left; 95 } else if (diff > 0) { 96 ins_point = &parent->rb_right; 97 } else { 98 if (exclusive) 99 return -EEXIST; 100 if (global) { 101 /* check if addr is already used as global */ 102 if (ha->global_use) 103 return 0; 104 else 105 ha->global_use = true; 106 } 107 if (sync) { 108 if (ha->synced && sync_count) 109 return -EEXIST; 110 else 111 ha->synced++; 112 } 113 ha->refcount++; 114 return 0; 115 } 116 } 117 118 ha = __hw_addr_create(addr, addr_len, addr_type, global, sync); 119 if (!ha) 120 return -ENOMEM; 121 122 rb_link_node(&ha->node, parent, ins_point); 123 rb_insert_color(&ha->node, &list->tree); 124 125 list_add_tail_rcu(&ha->list, &list->list); 126 list->count++; 127 128 return 0; 129 } 130 131 static int __hw_addr_add(struct netdev_hw_addr_list *list, 132 const unsigned char *addr, int addr_len, 133 unsigned char addr_type) 134 { 135 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false, 136 0, false); 137 } 138 139 static int __hw_addr_del_entry(struct netdev_hw_addr_list *list, 140 struct netdev_hw_addr *ha, bool global, 141 bool sync) 142 { 143 if (global && !ha->global_use) 144 return -ENOENT; 145 146 if (sync && !ha->synced) 147 return -ENOENT; 148 149 if (global) 150 ha->global_use = false; 151 152 if (sync) 153 ha->synced--; 154 155 if (--ha->refcount) 156 return 0; 157 158 rb_erase(&ha->node, &list->tree); 159 160 list_del_rcu(&ha->list); 161 kfree_rcu(ha, rcu_head); 162 list->count--; 163 return 0; 164 } 165 166 static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list, 167 const unsigned char *addr, int addr_len, 168 unsigned char addr_type) 169 { 170 struct rb_node *node; 171 172 node = list->tree.rb_node; 173 174 while (node) { 175 struct netdev_hw_addr *ha = rb_entry(node, struct netdev_hw_addr, node); 176 int diff = memcmp(addr, ha->addr, addr_len); 177 178 if (diff == 0 && addr_type) 179 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type)); 180 181 if (diff < 0) 182 node = node->rb_left; 183 else if (diff > 0) 184 node = node->rb_right; 185 else 186 return ha; 187 } 188 189 return NULL; 190 } 191 192 static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, 193 const unsigned char *addr, int addr_len, 194 unsigned char addr_type, bool global, bool sync) 195 { 196 struct netdev_hw_addr *ha = __hw_addr_lookup(list, addr, addr_len, addr_type); 197 198 if (!ha) 199 return -ENOENT; 200 return __hw_addr_del_entry(list, ha, global, sync); 201 } 202 203 static int __hw_addr_del(struct netdev_hw_addr_list *list, 204 const unsigned char *addr, int addr_len, 205 unsigned char addr_type) 206 { 207 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false); 208 } 209 210 static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list, 211 struct netdev_hw_addr *ha, 212 int addr_len) 213 { 214 int err; 215 216 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type, 217 false, true, ha->sync_cnt, false); 218 if (err && err != -EEXIST) 219 return err; 220 221 if (!err) { 222 ha->sync_cnt++; 223 ha->refcount++; 224 } 225 226 return 0; 227 } 228 229 static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list, 230 struct netdev_hw_addr_list *from_list, 231 struct netdev_hw_addr *ha, 232 int addr_len) 233 { 234 int err; 235 236 err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type, 237 false, true); 238 if (err) 239 return; 240 ha->sync_cnt--; 241 /* address on from list is not marked synced */ 242 __hw_addr_del_entry(from_list, ha, false, false); 243 } 244 245 int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list, 246 struct netdev_hw_addr_list *from_list, 247 int addr_len) 248 { 249 int err = 0; 250 struct netdev_hw_addr *ha, *tmp; 251 252 list_for_each_entry_safe(ha, tmp, &from_list->list, list) { 253 if (ha->sync_cnt == ha->refcount) { 254 __hw_addr_unsync_one(to_list, from_list, ha, addr_len); 255 } else { 256 err = __hw_addr_sync_one(to_list, ha, addr_len); 257 if (err) 258 break; 259 } 260 } 261 return err; 262 } 263 EXPORT_SYMBOL(__hw_addr_sync_multiple); 264 265 /* This function only works where there is a strict 1-1 relationship 266 * between source and destination of they synch. If you ever need to 267 * sync addresses to more then 1 destination, you need to use 268 * __hw_addr_sync_multiple(). 269 */ 270 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 271 struct netdev_hw_addr_list *from_list, 272 int addr_len) 273 { 274 int err = 0; 275 struct netdev_hw_addr *ha, *tmp; 276 277 list_for_each_entry_safe(ha, tmp, &from_list->list, list) { 278 if (!ha->sync_cnt) { 279 err = __hw_addr_sync_one(to_list, ha, addr_len); 280 if (err) 281 break; 282 } else if (ha->refcount == 1) 283 __hw_addr_unsync_one(to_list, from_list, ha, addr_len); 284 } 285 return err; 286 } 287 EXPORT_SYMBOL(__hw_addr_sync); 288 289 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 290 struct netdev_hw_addr_list *from_list, 291 int addr_len) 292 { 293 struct netdev_hw_addr *ha, *tmp; 294 295 list_for_each_entry_safe(ha, tmp, &from_list->list, list) { 296 if (ha->sync_cnt) 297 __hw_addr_unsync_one(to_list, from_list, ha, addr_len); 298 } 299 } 300 EXPORT_SYMBOL(__hw_addr_unsync); 301 302 /** 303 * __hw_addr_sync_dev - Synchronize device's multicast list 304 * @list: address list to synchronize 305 * @dev: device to sync 306 * @sync: function to call if address should be added 307 * @unsync: function to call if address should be removed 308 * 309 * This function is intended to be called from the ndo_set_rx_mode 310 * function of devices that require explicit address add/remove 311 * notifications. The unsync function may be NULL in which case 312 * the addresses requiring removal will simply be removed without 313 * any notification to the device. 314 **/ 315 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 316 struct net_device *dev, 317 int (*sync)(struct net_device *, const unsigned char *), 318 int (*unsync)(struct net_device *, 319 const unsigned char *)) 320 { 321 struct netdev_hw_addr *ha, *tmp; 322 int err; 323 324 /* first go through and flush out any stale entries */ 325 list_for_each_entry_safe(ha, tmp, &list->list, list) { 326 if (!ha->sync_cnt || ha->refcount != 1) 327 continue; 328 329 /* if unsync is defined and fails defer unsyncing address */ 330 if (unsync && unsync(dev, ha->addr)) 331 continue; 332 333 ha->sync_cnt--; 334 __hw_addr_del_entry(list, ha, false, false); 335 } 336 337 /* go through and sync new entries to the list */ 338 list_for_each_entry_safe(ha, tmp, &list->list, list) { 339 if (ha->sync_cnt) 340 continue; 341 342 err = sync(dev, ha->addr); 343 if (err) 344 return err; 345 346 ha->sync_cnt++; 347 ha->refcount++; 348 } 349 350 return 0; 351 } 352 EXPORT_SYMBOL(__hw_addr_sync_dev); 353 354 /** 355 * __hw_addr_ref_sync_dev - Synchronize device's multicast address list taking 356 * into account references 357 * @list: address list to synchronize 358 * @dev: device to sync 359 * @sync: function to call if address or reference on it should be added 360 * @unsync: function to call if address or some reference on it should removed 361 * 362 * This function is intended to be called from the ndo_set_rx_mode 363 * function of devices that require explicit address or references on it 364 * add/remove notifications. The unsync function may be NULL in which case 365 * the addresses or references on it requiring removal will simply be 366 * removed without any notification to the device. That is responsibility of 367 * the driver to identify and distribute address or references on it between 368 * internal address tables. 369 **/ 370 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 371 struct net_device *dev, 372 int (*sync)(struct net_device *, 373 const unsigned char *, int), 374 int (*unsync)(struct net_device *, 375 const unsigned char *, int)) 376 { 377 struct netdev_hw_addr *ha, *tmp; 378 int err, ref_cnt; 379 380 /* first go through and flush out any unsynced/stale entries */ 381 list_for_each_entry_safe(ha, tmp, &list->list, list) { 382 /* sync if address is not used */ 383 if ((ha->sync_cnt << 1) <= ha->refcount) 384 continue; 385 386 /* if fails defer unsyncing address */ 387 ref_cnt = ha->refcount - ha->sync_cnt; 388 if (unsync && unsync(dev, ha->addr, ref_cnt)) 389 continue; 390 391 ha->refcount = (ref_cnt << 1) + 1; 392 ha->sync_cnt = ref_cnt; 393 __hw_addr_del_entry(list, ha, false, false); 394 } 395 396 /* go through and sync updated/new entries to the list */ 397 list_for_each_entry_safe(ha, tmp, &list->list, list) { 398 /* sync if address added or reused */ 399 if ((ha->sync_cnt << 1) >= ha->refcount) 400 continue; 401 402 ref_cnt = ha->refcount - ha->sync_cnt; 403 err = sync(dev, ha->addr, ref_cnt); 404 if (err) 405 return err; 406 407 ha->refcount = ref_cnt << 1; 408 ha->sync_cnt = ref_cnt; 409 } 410 411 return 0; 412 } 413 EXPORT_SYMBOL(__hw_addr_ref_sync_dev); 414 415 /** 416 * __hw_addr_ref_unsync_dev - Remove synchronized addresses and references on 417 * it from device 418 * @list: address list to remove synchronized addresses (references on it) from 419 * @dev: device to sync 420 * @unsync: function to call if address and references on it should be removed 421 * 422 * Remove all addresses that were added to the device by 423 * __hw_addr_ref_sync_dev(). This function is intended to be called from the 424 * ndo_stop or ndo_open functions on devices that require explicit address (or 425 * references on it) add/remove notifications. If the unsync function pointer 426 * is NULL then this function can be used to just reset the sync_cnt for the 427 * addresses in the list. 428 **/ 429 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 430 struct net_device *dev, 431 int (*unsync)(struct net_device *, 432 const unsigned char *, int)) 433 { 434 struct netdev_hw_addr *ha, *tmp; 435 436 list_for_each_entry_safe(ha, tmp, &list->list, list) { 437 if (!ha->sync_cnt) 438 continue; 439 440 /* if fails defer unsyncing address */ 441 if (unsync && unsync(dev, ha->addr, ha->sync_cnt)) 442 continue; 443 444 ha->refcount -= ha->sync_cnt - 1; 445 ha->sync_cnt = 0; 446 __hw_addr_del_entry(list, ha, false, false); 447 } 448 } 449 EXPORT_SYMBOL(__hw_addr_ref_unsync_dev); 450 451 /** 452 * __hw_addr_unsync_dev - Remove synchronized addresses from device 453 * @list: address list to remove synchronized addresses from 454 * @dev: device to sync 455 * @unsync: function to call if address should be removed 456 * 457 * Remove all addresses that were added to the device by __hw_addr_sync_dev(). 458 * This function is intended to be called from the ndo_stop or ndo_open 459 * functions on devices that require explicit address add/remove 460 * notifications. If the unsync function pointer is NULL then this function 461 * can be used to just reset the sync_cnt for the addresses in the list. 462 **/ 463 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 464 struct net_device *dev, 465 int (*unsync)(struct net_device *, 466 const unsigned char *)) 467 { 468 struct netdev_hw_addr *ha, *tmp; 469 470 list_for_each_entry_safe(ha, tmp, &list->list, list) { 471 if (!ha->sync_cnt) 472 continue; 473 474 /* if unsync is defined and fails defer unsyncing address */ 475 if (unsync && unsync(dev, ha->addr)) 476 continue; 477 478 ha->sync_cnt--; 479 __hw_addr_del_entry(list, ha, false, false); 480 } 481 } 482 EXPORT_SYMBOL(__hw_addr_unsync_dev); 483 484 static void __hw_addr_flush(struct netdev_hw_addr_list *list) 485 { 486 struct netdev_hw_addr *ha, *tmp; 487 488 list->tree = RB_ROOT; 489 list_for_each_entry_safe(ha, tmp, &list->list, list) { 490 list_del_rcu(&ha->list); 491 kfree_rcu(ha, rcu_head); 492 } 493 list->count = 0; 494 } 495 496 void __hw_addr_init(struct netdev_hw_addr_list *list) 497 { 498 INIT_LIST_HEAD(&list->list); 499 list->count = 0; 500 list->tree = RB_ROOT; 501 } 502 EXPORT_SYMBOL(__hw_addr_init); 503 504 /* 505 * Device addresses handling functions 506 */ 507 508 /* Check that netdev->dev_addr is not written to directly as this would 509 * break the rbtree layout. All changes should go thru dev_addr_set() and co. 510 * Remove this check in mid-2024. 511 */ 512 void dev_addr_check(struct net_device *dev) 513 { 514 if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN)) 515 return; 516 517 netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr); 518 netdev_warn(dev, "Expected addr: %*ph\n", 519 MAX_ADDR_LEN, dev->dev_addr_shadow); 520 netdev_WARN(dev, "Incorrect netdev->dev_addr\n"); 521 } 522 523 /** 524 * dev_addr_flush - Flush device address list 525 * @dev: device 526 * 527 * Flush device address list and reset ->dev_addr. 528 * 529 * The caller must hold the rtnl_mutex. 530 */ 531 void dev_addr_flush(struct net_device *dev) 532 { 533 /* rtnl_mutex must be held here */ 534 dev_addr_check(dev); 535 536 __hw_addr_flush(&dev->dev_addrs); 537 dev->dev_addr = NULL; 538 } 539 540 /** 541 * dev_addr_init - Init device address list 542 * @dev: device 543 * 544 * Init device address list and create the first element, 545 * used by ->dev_addr. 546 * 547 * The caller must hold the rtnl_mutex. 548 */ 549 int dev_addr_init(struct net_device *dev) 550 { 551 unsigned char addr[MAX_ADDR_LEN]; 552 struct netdev_hw_addr *ha; 553 int err; 554 555 /* rtnl_mutex must be held here */ 556 557 __hw_addr_init(&dev->dev_addrs); 558 memset(addr, 0, sizeof(addr)); 559 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), 560 NETDEV_HW_ADDR_T_LAN); 561 if (!err) { 562 /* 563 * Get the first (previously created) address from the list 564 * and set dev_addr pointer to this location. 565 */ 566 ha = list_first_entry(&dev->dev_addrs.list, 567 struct netdev_hw_addr, list); 568 dev->dev_addr = ha->addr; 569 } 570 return err; 571 } 572 573 void dev_addr_mod(struct net_device *dev, unsigned int offset, 574 const void *addr, size_t len) 575 { 576 struct netdev_hw_addr *ha; 577 578 dev_addr_check(dev); 579 580 ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]); 581 rb_erase(&ha->node, &dev->dev_addrs.tree); 582 memcpy(&ha->addr[offset], addr, len); 583 memcpy(&dev->dev_addr_shadow[offset], addr, len); 584 WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len)); 585 } 586 EXPORT_SYMBOL(dev_addr_mod); 587 588 /** 589 * dev_addr_add - Add a device address 590 * @dev: device 591 * @addr: address to add 592 * @addr_type: address type 593 * 594 * Add a device address to the device or increase the reference count if 595 * it already exists. 596 * 597 * The caller must hold the rtnl_mutex. 598 */ 599 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 600 unsigned char addr_type) 601 { 602 int err; 603 604 ASSERT_RTNL(); 605 606 err = dev_pre_changeaddr_notify(dev, addr, NULL); 607 if (err) 608 return err; 609 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); 610 if (!err) 611 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 612 return err; 613 } 614 EXPORT_SYMBOL(dev_addr_add); 615 616 /** 617 * dev_addr_del - Release a device address. 618 * @dev: device 619 * @addr: address to delete 620 * @addr_type: address type 621 * 622 * Release reference to a device address and remove it from the device 623 * if the reference count drops to zero. 624 * 625 * The caller must hold the rtnl_mutex. 626 */ 627 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 628 unsigned char addr_type) 629 { 630 int err; 631 struct netdev_hw_addr *ha; 632 633 ASSERT_RTNL(); 634 635 /* 636 * We can not remove the first address from the list because 637 * dev->dev_addr points to that. 638 */ 639 ha = list_first_entry(&dev->dev_addrs.list, 640 struct netdev_hw_addr, list); 641 if (!memcmp(ha->addr, addr, dev->addr_len) && 642 ha->type == addr_type && ha->refcount == 1) 643 return -ENOENT; 644 645 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, 646 addr_type); 647 if (!err) 648 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 649 return err; 650 } 651 EXPORT_SYMBOL(dev_addr_del); 652 653 /* 654 * Unicast list handling functions 655 */ 656 657 /** 658 * dev_uc_add_excl - Add a global secondary unicast address 659 * @dev: device 660 * @addr: address to add 661 */ 662 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr) 663 { 664 int err; 665 666 netif_addr_lock_bh(dev); 667 err = __hw_addr_add_ex(&dev->uc, addr, dev->addr_len, 668 NETDEV_HW_ADDR_T_UNICAST, true, false, 669 0, true); 670 if (!err) 671 __dev_set_rx_mode(dev); 672 netif_addr_unlock_bh(dev); 673 return err; 674 } 675 EXPORT_SYMBOL(dev_uc_add_excl); 676 677 /** 678 * dev_uc_add - Add a secondary unicast address 679 * @dev: device 680 * @addr: address to add 681 * 682 * Add a secondary unicast address to the device or increase 683 * the reference count if it already exists. 684 */ 685 int dev_uc_add(struct net_device *dev, const unsigned char *addr) 686 { 687 int err; 688 689 netif_addr_lock_bh(dev); 690 err = __hw_addr_add(&dev->uc, addr, dev->addr_len, 691 NETDEV_HW_ADDR_T_UNICAST); 692 if (!err) 693 __dev_set_rx_mode(dev); 694 netif_addr_unlock_bh(dev); 695 return err; 696 } 697 EXPORT_SYMBOL(dev_uc_add); 698 699 /** 700 * dev_uc_del - Release secondary unicast address. 701 * @dev: device 702 * @addr: address to delete 703 * 704 * Release reference to a secondary unicast address and remove it 705 * from the device if the reference count drops to zero. 706 */ 707 int dev_uc_del(struct net_device *dev, const unsigned char *addr) 708 { 709 int err; 710 711 netif_addr_lock_bh(dev); 712 err = __hw_addr_del(&dev->uc, addr, dev->addr_len, 713 NETDEV_HW_ADDR_T_UNICAST); 714 if (!err) 715 __dev_set_rx_mode(dev); 716 netif_addr_unlock_bh(dev); 717 return err; 718 } 719 EXPORT_SYMBOL(dev_uc_del); 720 721 /** 722 * dev_uc_sync - Synchronize device's unicast list to another device 723 * @to: destination device 724 * @from: source device 725 * 726 * Add newly added addresses to the destination device and release 727 * addresses that have no users left. The source device must be 728 * locked by netif_addr_lock_bh. 729 * 730 * This function is intended to be called from the dev->set_rx_mode 731 * function of layered software devices. This function assumes that 732 * addresses will only ever be synced to the @to devices and no other. 733 */ 734 int dev_uc_sync(struct net_device *to, struct net_device *from) 735 { 736 int err = 0; 737 738 if (to->addr_len != from->addr_len) 739 return -EINVAL; 740 741 netif_addr_lock(to); 742 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 743 if (!err) 744 __dev_set_rx_mode(to); 745 netif_addr_unlock(to); 746 return err; 747 } 748 EXPORT_SYMBOL(dev_uc_sync); 749 750 /** 751 * dev_uc_sync_multiple - Synchronize device's unicast list to another 752 * device, but allow for multiple calls to sync to multiple devices. 753 * @to: destination device 754 * @from: source device 755 * 756 * Add newly added addresses to the destination device and release 757 * addresses that have been deleted from the source. The source device 758 * must be locked by netif_addr_lock_bh. 759 * 760 * This function is intended to be called from the dev->set_rx_mode 761 * function of layered software devices. It allows for a single source 762 * device to be synced to multiple destination devices. 763 */ 764 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from) 765 { 766 int err = 0; 767 768 if (to->addr_len != from->addr_len) 769 return -EINVAL; 770 771 netif_addr_lock(to); 772 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len); 773 if (!err) 774 __dev_set_rx_mode(to); 775 netif_addr_unlock(to); 776 return err; 777 } 778 EXPORT_SYMBOL(dev_uc_sync_multiple); 779 780 /** 781 * dev_uc_unsync - Remove synchronized addresses from the destination device 782 * @to: destination device 783 * @from: source device 784 * 785 * Remove all addresses that were added to the destination device by 786 * dev_uc_sync(). This function is intended to be called from the 787 * dev->stop function of layered software devices. 788 */ 789 void dev_uc_unsync(struct net_device *to, struct net_device *from) 790 { 791 if (to->addr_len != from->addr_len) 792 return; 793 794 /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two 795 * reasons: 796 * 1) This is always called without any addr_list_lock, so as the 797 * outermost one here, it must be 0. 798 * 2) This is called by some callers after unlinking the upper device, 799 * so the dev->lower_level becomes 1 again. 800 * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or 801 * larger. 802 */ 803 netif_addr_lock_bh(from); 804 netif_addr_lock(to); 805 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 806 __dev_set_rx_mode(to); 807 netif_addr_unlock(to); 808 netif_addr_unlock_bh(from); 809 } 810 EXPORT_SYMBOL(dev_uc_unsync); 811 812 /** 813 * dev_uc_flush - Flush unicast addresses 814 * @dev: device 815 * 816 * Flush unicast addresses. 817 */ 818 void dev_uc_flush(struct net_device *dev) 819 { 820 netif_addr_lock_bh(dev); 821 __hw_addr_flush(&dev->uc); 822 netif_addr_unlock_bh(dev); 823 } 824 EXPORT_SYMBOL(dev_uc_flush); 825 826 /** 827 * dev_uc_init - Init unicast address list 828 * @dev: device 829 * 830 * Init unicast address list. 831 */ 832 void dev_uc_init(struct net_device *dev) 833 { 834 __hw_addr_init(&dev->uc); 835 } 836 EXPORT_SYMBOL(dev_uc_init); 837 838 /* 839 * Multicast list handling functions 840 */ 841 842 /** 843 * dev_mc_add_excl - Add a global secondary multicast address 844 * @dev: device 845 * @addr: address to add 846 */ 847 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr) 848 { 849 int err; 850 851 netif_addr_lock_bh(dev); 852 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, 853 NETDEV_HW_ADDR_T_MULTICAST, true, false, 854 0, true); 855 if (!err) 856 __dev_set_rx_mode(dev); 857 netif_addr_unlock_bh(dev); 858 return err; 859 } 860 EXPORT_SYMBOL(dev_mc_add_excl); 861 862 static int __dev_mc_add(struct net_device *dev, const unsigned char *addr, 863 bool global) 864 { 865 int err; 866 867 netif_addr_lock_bh(dev); 868 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, 869 NETDEV_HW_ADDR_T_MULTICAST, global, false, 870 0, false); 871 if (!err) 872 __dev_set_rx_mode(dev); 873 netif_addr_unlock_bh(dev); 874 return err; 875 } 876 /** 877 * dev_mc_add - Add a multicast address 878 * @dev: device 879 * @addr: address to add 880 * 881 * Add a multicast address to the device or increase 882 * the reference count if it already exists. 883 */ 884 int dev_mc_add(struct net_device *dev, const unsigned char *addr) 885 { 886 return __dev_mc_add(dev, addr, false); 887 } 888 EXPORT_SYMBOL(dev_mc_add); 889 890 /** 891 * dev_mc_add_global - Add a global multicast address 892 * @dev: device 893 * @addr: address to add 894 * 895 * Add a global multicast address to the device. 896 */ 897 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr) 898 { 899 return __dev_mc_add(dev, addr, true); 900 } 901 EXPORT_SYMBOL(dev_mc_add_global); 902 903 static int __dev_mc_del(struct net_device *dev, const unsigned char *addr, 904 bool global) 905 { 906 int err; 907 908 netif_addr_lock_bh(dev); 909 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len, 910 NETDEV_HW_ADDR_T_MULTICAST, global, false); 911 if (!err) 912 __dev_set_rx_mode(dev); 913 netif_addr_unlock_bh(dev); 914 return err; 915 } 916 917 /** 918 * dev_mc_del - Delete a multicast address. 919 * @dev: device 920 * @addr: address to delete 921 * 922 * Release reference to a multicast address and remove it 923 * from the device if the reference count drops to zero. 924 */ 925 int dev_mc_del(struct net_device *dev, const unsigned char *addr) 926 { 927 return __dev_mc_del(dev, addr, false); 928 } 929 EXPORT_SYMBOL(dev_mc_del); 930 931 /** 932 * dev_mc_del_global - Delete a global multicast address. 933 * @dev: device 934 * @addr: address to delete 935 * 936 * Release reference to a multicast address and remove it 937 * from the device if the reference count drops to zero. 938 */ 939 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr) 940 { 941 return __dev_mc_del(dev, addr, true); 942 } 943 EXPORT_SYMBOL(dev_mc_del_global); 944 945 /** 946 * dev_mc_sync - Synchronize device's multicast list to another device 947 * @to: destination device 948 * @from: source device 949 * 950 * Add newly added addresses to the destination device and release 951 * addresses that have no users left. The source device must be 952 * locked by netif_addr_lock_bh. 953 * 954 * This function is intended to be called from the ndo_set_rx_mode 955 * function of layered software devices. 956 */ 957 int dev_mc_sync(struct net_device *to, struct net_device *from) 958 { 959 int err = 0; 960 961 if (to->addr_len != from->addr_len) 962 return -EINVAL; 963 964 netif_addr_lock(to); 965 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); 966 if (!err) 967 __dev_set_rx_mode(to); 968 netif_addr_unlock(to); 969 return err; 970 } 971 EXPORT_SYMBOL(dev_mc_sync); 972 973 /** 974 * dev_mc_sync_multiple - Synchronize device's multicast list to another 975 * device, but allow for multiple calls to sync to multiple devices. 976 * @to: destination device 977 * @from: source device 978 * 979 * Add newly added addresses to the destination device and release 980 * addresses that have no users left. The source device must be 981 * locked by netif_addr_lock_bh. 982 * 983 * This function is intended to be called from the ndo_set_rx_mode 984 * function of layered software devices. It allows for a single 985 * source device to be synced to multiple destination devices. 986 */ 987 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from) 988 { 989 int err = 0; 990 991 if (to->addr_len != from->addr_len) 992 return -EINVAL; 993 994 netif_addr_lock(to); 995 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len); 996 if (!err) 997 __dev_set_rx_mode(to); 998 netif_addr_unlock(to); 999 return err; 1000 } 1001 EXPORT_SYMBOL(dev_mc_sync_multiple); 1002 1003 /** 1004 * dev_mc_unsync - Remove synchronized addresses from the destination device 1005 * @to: destination device 1006 * @from: source device 1007 * 1008 * Remove all addresses that were added to the destination device by 1009 * dev_mc_sync(). This function is intended to be called from the 1010 * dev->stop function of layered software devices. 1011 */ 1012 void dev_mc_unsync(struct net_device *to, struct net_device *from) 1013 { 1014 if (to->addr_len != from->addr_len) 1015 return; 1016 1017 /* See the above comments inside dev_uc_unsync(). */ 1018 netif_addr_lock_bh(from); 1019 netif_addr_lock(to); 1020 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); 1021 __dev_set_rx_mode(to); 1022 netif_addr_unlock(to); 1023 netif_addr_unlock_bh(from); 1024 } 1025 EXPORT_SYMBOL(dev_mc_unsync); 1026 1027 /** 1028 * dev_mc_flush - Flush multicast addresses 1029 * @dev: device 1030 * 1031 * Flush multicast addresses. 1032 */ 1033 void dev_mc_flush(struct net_device *dev) 1034 { 1035 netif_addr_lock_bh(dev); 1036 __hw_addr_flush(&dev->mc); 1037 netif_addr_unlock_bh(dev); 1038 } 1039 EXPORT_SYMBOL(dev_mc_flush); 1040 1041 /** 1042 * dev_mc_init - Init multicast address list 1043 * @dev: device 1044 * 1045 * Init multicast address list. 1046 */ 1047 void dev_mc_init(struct net_device *dev) 1048 { 1049 __hw_addr_init(&dev->mc); 1050 } 1051 EXPORT_SYMBOL(dev_mc_init); 1052