| /linux-6.15/drivers/infiniband/hw/mlx5/ |
| H A D | ib_rep.c | 33 struct mlx5_core_dev *peer_dev; in mlx5_ib_num_ports_update() local 36 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_ib_num_ports_update() 37 u32 peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev); in mlx5_ib_num_ports_update() 39 if (mlx5_lag_is_mpesw(peer_dev)) in mlx5_ib_num_ports_update() 53 struct mlx5_core_dev *peer_dev; in mlx5_ib_vport_rep_load() local 71 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_ib_vport_rep_load() 72 u32 peer_n_ports = mlx5_eswitch_get_total_vports(peer_dev); in mlx5_ib_vport_rep_load() 74 if (mlx5_lag_is_master(peer_dev)) in mlx5_ib_vport_rep_load() 75 lag_master = peer_dev; in mlx5_ib_vport_rep_load() 80 if (mlx5_get_dev_index(peer_dev) < mlx5_get_dev_index(dev)) in mlx5_ib_vport_rep_load()
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| H A D | clock.c | 1362 struct mlx5_core_dev *peer_dev, *next = NULL; in mlx5_shared_clock_register() local 1372 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { in mlx5_shared_clock_register() 1373 if (peer_dev->clock) { in mlx5_shared_clock_register() 1374 next = peer_dev; in mlx5_shared_clock_register() 1396 struct mlx5_core_dev *peer_dev, *next = NULL; in mlx5_shared_clock_unregister() local 1401 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { in mlx5_shared_clock_unregister() 1402 if (peer_dev->clock && peer_dev != mdev) { in mlx5_shared_clock_unregister() 1403 next = peer_dev; in mlx5_shared_clock_unregister() 1479 struct mlx5_core_dev *peer_dev, *next = NULL; in mlx5_clock_unload() local 1493 if (peer_dev->clock && peer_dev != mdev) { in mlx5_clock_unload() [all …]
|
| /linux-6.15/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_topology.c | 1304 struct kfd_topology_device *peer_dev; in kfd_fill_iolink_non_crat_info() local 1313 peer_dev = kfd_topology_device_by_proximity_domain( in kfd_fill_iolink_non_crat_info() 1316 if (!peer_dev) in kfd_fill_iolink_non_crat_info() 1320 if (!peer_dev->gpu && in kfd_fill_iolink_non_crat_info() 1328 peer_dev->node_props.hive_id = dev->node_props.hive_id; in kfd_fill_iolink_non_crat_info() 1331 list_for_each_entry(inbound_link, &peer_dev->io_link_props, in kfd_fill_iolink_non_crat_info() 1337 kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link); in kfd_fill_iolink_non_crat_info() 1338 kfd_set_iolink_non_coherent(peer_dev, link, inbound_link); in kfd_fill_iolink_non_crat_info() 1347 peer_dev = kfd_topology_device_by_proximity_domain( in kfd_fill_iolink_non_crat_info() 1350 if (!peer_dev) in kfd_fill_iolink_non_crat_info() [all …]
|
| H A D | kfd_crat.c | 2241 struct kfd_topology_device *peer_dev; in kfd_create_vcrat_image_gpu() local 2359 peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid); in kfd_create_vcrat_image_gpu() 2360 if (!peer_dev->gpu) in kfd_create_vcrat_image_gpu() 2362 if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) in kfd_create_vcrat_image_gpu() 2364 if (!amdgpu_xgmi_get_is_sharing_enabled(kdev->adev, peer_dev->gpu->adev)) in kfd_create_vcrat_image_gpu() 2370 &avail_size, kdev, peer_dev->gpu, in kfd_create_vcrat_image_gpu()
|
| /linux-6.15/drivers/net/netdevsim/ |
| H A D | netdev.c | 59 struct net_device *peer_dev; in nsim_start_xmit() local 74 peer_dev = peer_ns->netdev; in nsim_start_xmit() 76 if (rxq >= peer_dev->num_rx_queues) in nsim_start_xmit() 77 rxq = rxq % peer_dev->num_rx_queues; in nsim_start_xmit() 80 cfg = peer_dev->cfg; in nsim_start_xmit() 88 if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP)) in nsim_start_xmit()
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | fs_cmd.c | 248 struct mlx5_core_dev *peer_dev; in mlx5_cmd_update_root_ft() local 251 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_cmd_update_root_ft() 252 err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect, in mlx5_cmd_update_root_ft() 255 mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) { in mlx5_cmd_update_root_ft() 257 mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1, in mlx5_cmd_update_root_ft()
|
| H A D | dev.c | 565 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev) in mlx5_same_hw_devs() argument 570 psystem_guid = mlx5_query_nic_system_image_guid(peer_dev); in mlx5_same_hw_devs()
|
| H A D | eswitch_offloads.c | 1127 struct mlx5_core_dev *peer_dev, in peer_miss_rules_setup() argument 1145 MLX5_CAP_GEN(peer_dev, vhca_id)); in peer_miss_rules_setup() 1157 dest->vport.num = peer_dev->priv.eswitch->manager_vport; in peer_miss_rules_setup() 1158 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); in peer_miss_rules_setup() 1183 struct mlx5_core_dev *peer_dev) in esw_add_fdb_peer_miss_rules() argument 1204 peer_miss_rules_setup(esw, peer_dev, spec, &dest); in esw_add_fdb_peer_miss_rules() 1244 peer_dev->priv.eswitch, in esw_add_fdb_peer_miss_rules() 1258 if (i >= mlx5_core_max_ec_vfs(peer_dev)) in esw_add_fdb_peer_miss_rules() 1272 pfindex = mlx5_get_dev_index(peer_dev); in esw_add_fdb_peer_miss_rules() 1314 struct mlx5_core_dev *peer_dev) in esw_del_fdb_peer_miss_rules() argument [all …]
|
| H A D | mlx5_core.h | 453 bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
| H A D | lag.c | 1681 struct mlx5_core_dev *peer_dev = NULL; in mlx5_lag_get_next_peer_mdev() local 1703 peer_dev = ldev->pf[idx].dev; in mlx5_lag_get_next_peer_mdev() 1707 return peer_dev; in mlx5_lag_get_next_peer_mdev()
|