| /f-stack/dpdk/drivers/net/mlx5/ |
| H A D | mlx5_rss.c | 159 struct rte_eth_rss_reta_entry64 *reta_conf, in mlx5_dev_rss_reta_query() argument 173 reta_conf[idx].reta[i % RTE_RETA_GROUP_SIZE] = in mlx5_dev_rss_reta_query() 194 struct rte_eth_rss_reta_entry64 *reta_conf, in mlx5_dev_rss_reta_update() argument 213 if (((reta_conf[idx].mask >> i) & 0x1) == 0) in mlx5_dev_rss_reta_update() 215 MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n); in mlx5_dev_rss_reta_update() 216 (*priv->reta_idx)[i] = reta_conf[idx].reta[pos]; in mlx5_dev_rss_reta_update()
|
| /f-stack/dpdk/examples/ip_pipeline/ |
| H A D | link.c | 72 struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE]; in rss_setup() local 77 memset(reta_conf, 0, sizeof(reta_conf)); in rss_setup() 80 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; in rss_setup() 87 reta_conf[reta_id].reta[reta_pos] = in rss_setup() 93 reta_conf, in rss_setup()
|
| /f-stack/dpdk/drivers/net/null/ |
| H A D | rte_eth_null.c | 64 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 / member 373 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) in eth_rss_reta_update() argument 385 internal->reta_conf[i].mask = reta_conf[i].mask; in eth_rss_reta_update() 387 if ((reta_conf[i].mask >> j) & 0x01) in eth_rss_reta_update() 388 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j]; in eth_rss_reta_update() 398 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) in eth_rss_reta_query() argument 411 if ((reta_conf[i].mask >> j) & 0x01) in eth_rss_reta_query() 412 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j]; in eth_rss_reta_query() 542 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE; in eth_dev_null_create()
|
| /f-stack/dpdk/app/test/ |
| H A D | test_link_bonding_rssconf.c | 55 struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_RETA_GROUP_SIZE]; member 212 struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE]; in reta_set() local 217 reta_conf[i].mask = ~0LL; in reta_set() 219 reta_conf[i].reta[j] = value; in reta_set() 222 return rte_eth_dev_rss_reta_update(port_id, reta_conf, reta_size); in reta_set() 240 if (port->reta_conf[index].reta[shift] != in reta_check_synced() 274 port->reta_conf[j].mask = ~0LL; in slave_reta_fetch() 277 port->reta_conf, port->dev_info.reta_size), in slave_reta_fetch()
|
| /f-stack/dpdk/examples/pipeline/ |
| H A D | obj.c | 137 struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE]; in rss_setup() local 142 memset(reta_conf, 0, sizeof(reta_conf)); in rss_setup() 145 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; in rss_setup() 152 reta_conf[reta_id].reta[reta_pos] = in rss_setup() 158 reta_conf, in rss_setup()
|
| /f-stack/dpdk/drivers/net/octeontx2/ |
| H A D | otx2_rss.c | 71 struct rte_eth_rss_reta_entry64 *reta_conf, in otx2_nix_dev_reta_update() argument 90 if ((reta_conf[i].mask >> j) & 0x01) in otx2_nix_dev_reta_update() 91 rss->ind_tbl[idx] = reta_conf[i].reta[j]; in otx2_nix_dev_reta_update() 104 struct rte_eth_rss_reta_entry64 *reta_conf, in otx2_nix_dev_reta_query() argument 123 if ((reta_conf[i].mask >> j) & 0x01) in otx2_nix_dev_reta_query() 124 reta_conf[i].reta[j] = rss->ind_tbl[j]; in otx2_nix_dev_reta_query()
|
| /f-stack/dpdk/drivers/net/hns3/ |
| H A D | hns3_rss.c | 502 struct rte_eth_rss_reta_entry64 *reta_conf, in hns3_dev_rss_reta_update() argument 525 if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) { in hns3_dev_rss_reta_update() 529 reta_conf[idx].reta[shift], in hns3_dev_rss_reta_update() 534 if (reta_conf[idx].mask & (1ULL << shift)) in hns3_dev_rss_reta_update() 535 indirection_tbl[i] = reta_conf[idx].reta[shift]; in hns3_dev_rss_reta_update() 558 struct rte_eth_rss_reta_entry64 *reta_conf, in hns3_dev_rss_reta_query() argument 577 if (reta_conf[idx].mask & (1ULL << shift)) in hns3_dev_rss_reta_query() 578 reta_conf[idx].reta[shift] = in hns3_dev_rss_reta_query()
|
| H A D | hns3_rss.h | 100 struct rte_eth_rss_reta_entry64 *reta_conf, 103 struct rte_eth_rss_reta_entry64 *reta_conf,
|
| /f-stack/dpdk/drivers/net/ionic/ |
| H A D | ionic_ethdev.c | 39 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); 41 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); 556 struct rte_eth_rss_reta_entry64 *reta_conf, in ionic_dev_rss_reta_update() argument 584 if (reta_conf[i].mask & ((uint64_t)1 << j)) { in ionic_dev_rss_reta_update() 586 lif->rss_ind_tbl[index] = reta_conf[i].reta[j]; in ionic_dev_rss_reta_update() 596 struct rte_eth_rss_reta_entry64 *reta_conf, in ionic_dev_rss_reta_query() argument 622 memcpy(reta_conf->reta, in ionic_dev_rss_reta_query() 625 reta_conf++; in ionic_dev_rss_reta_query()
|
| /f-stack/dpdk/drivers/net/netvsc/ |
| H A D | hn_ethdev.c | 285 struct rte_eth_rss_reta_entry64 *reta_conf, in hn_rss_reta_update() argument 304 if (reta_conf[idx].mask & mask) in hn_rss_reta_update() 305 hv->rss_ind[i] = reta_conf[idx].reta[shift]; in hn_rss_reta_update() 322 return hn_vf_reta_hash_update(dev, reta_conf, reta_size); in hn_rss_reta_update() 326 struct rte_eth_rss_reta_entry64 *reta_conf, in hn_rss_reta_query() argument 344 if (reta_conf[idx].mask & mask) in hn_rss_reta_query() 345 reta_conf[idx].reta[shift] = hv->rss_ind[i]; in hn_rss_reta_query()
|
| H A D | hn_vf.c | 555 struct rte_eth_rss_reta_entry64 *reta_conf, in hn_vf_reta_hash_update() argument 566 reta_conf, reta_size); in hn_vf_reta_hash_update()
|
| /f-stack/dpdk/drivers/net/liquidio/ |
| H A D | lio_ethdev.c | 499 struct rte_eth_rss_reta_entry64 *reta_conf, in lio_dev_rss_reta_update() argument 545 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) { in lio_dev_rss_reta_update() 547 rss_state->itable[index] = reta_conf[i].reta[j]; in lio_dev_rss_reta_update() 572 struct rte_eth_rss_reta_entry64 *reta_conf, in lio_dev_rss_reta_query() argument 589 memcpy(reta_conf->reta, in lio_dev_rss_reta_query() 592 reta_conf++; in lio_dev_rss_reta_query() 1085 struct rte_eth_rss_reta_entry64 reta_conf[8]; in lio_dev_rss_configure() local 1104 memset(reta_conf, 0, sizeof(reta_conf)); in lio_dev_rss_configure() 1112 reta_conf[conf_idx].reta[reta_idx] = q_idx; in lio_dev_rss_configure() 1113 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx); in lio_dev_rss_configure() [all …]
|
| /f-stack/dpdk/drivers/net/bonding/ |
| H A D | rte_eth_bond_pmd.c | 1833 &internals->reta_conf[0], in slave_configure() 2969 internals->reta_conf[i].mask = reta_conf[i].mask; in bond_ethdev_rss_reta_update() 2971 if ((reta_conf[i].mask >> j) & 0x01) in bond_ethdev_rss_reta_update() 2972 internals->reta_conf[i].reta[j] = reta_conf[i].reta[j]; in bond_ethdev_rss_reta_update() 2977 memcpy(&internals->reta_conf[i], &internals->reta_conf[0], in bond_ethdev_rss_reta_update() 2978 sizeof(internals->reta_conf[0]) * reta_count); in bond_ethdev_rss_reta_update() 2984 &internals->reta_conf[0], slave_reta_size); in bond_ethdev_rss_reta_update() 3005 if ((reta_conf[i].mask >> j) & 0x01) in bond_ethdev_rss_reta_query() 3006 reta_conf[i].reta[j] = internals->reta_conf[i].reta[j]; in bond_ethdev_rss_reta_query() 3507 internals->reta_conf[i].mask = ~0LL; in bond_ethdev_configure() [all …]
|
| H A D | eth_bond_private.h | 170 struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 / member
|
| /f-stack/dpdk/drivers/net/enic/ |
| H A D | enic_ethdev.c | 779 *reta_conf, in enicpmd_dev_rss_reta_query() 795 if (reta_conf[idx].mask & (1ULL << shift)) in enicpmd_dev_rss_reta_query() 796 reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx( in enicpmd_dev_rss_reta_query() 805 *reta_conf, in enicpmd_dev_rss_reta_update() 827 if (reta_conf[idx].mask & (1ULL << shift)) in enicpmd_dev_rss_reta_update() 830 reta_conf[idx].reta[shift]); in enicpmd_dev_rss_reta_update()
|
| /f-stack/dpdk/drivers/net/cxgbe/ |
| H A D | cxgbe_ethdev.c | 934 struct rte_eth_rss_reta_entry64 *reta_conf, in cxgbe_dev_rss_reta_update() argument 956 if (!(reta_conf[idx].mask & (1ULL << shift))) in cxgbe_dev_rss_reta_update() 959 rss[i] = reta_conf[idx].reta[shift]; in cxgbe_dev_rss_reta_update() 971 struct rte_eth_rss_reta_entry64 *reta_conf, in cxgbe_dev_rss_reta_query() argument 987 if (!(reta_conf[idx].mask & (1ULL << shift))) in cxgbe_dev_rss_reta_query() 990 reta_conf[idx].reta[shift] = pi->rss[i]; in cxgbe_dev_rss_reta_query()
|
| /f-stack/dpdk/drivers/net/txgbe/ |
| H A D | txgbe_ethdev.h | 394 struct rte_eth_rss_reta_entry64 *reta_conf, 397 struct rte_eth_rss_reta_entry64 *reta_conf,
|
| /f-stack/dpdk/drivers/net/qede/ |
| H A D | qede_ethdev.c | 1056 struct rte_eth_rss_reta_entry64 reta_conf[2]; in qede_config_rss() local 1073 memset(reta_conf, 0, sizeof(reta_conf)); in qede_config_rss() 1075 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; in qede_config_rss() 1081 reta_conf[id].reta[pos] = q; in qede_config_rss() 1083 if (qede_rss_reta_update(eth_dev, &reta_conf[0], in qede_config_rss() 2218 struct rte_eth_rss_reta_entry64 *reta_conf, in qede_rss_reta_update() argument 2256 if (reta_conf[idx].mask & (1ULL << shift)) { in qede_rss_reta_update() 2257 entry = reta_conf[idx].reta[shift]; in qede_rss_reta_update() 2283 struct rte_eth_rss_reta_entry64 *reta_conf, in qede_rss_reta_query() argument 2300 if (reta_conf[idx].mask & (1ULL << shift)) { in qede_rss_reta_query() [all …]
|
| /f-stack/dpdk/drivers/net/ena/ |
| H A D | ena_ethdev.c | 224 struct rte_eth_rss_reta_entry64 *reta_conf, 227 struct rte_eth_rss_reta_entry64 *reta_conf, 553 struct rte_eth_rss_reta_entry64 *reta_conf, in ena_rss_reta_update() argument 563 if ((reta_size == 0) || (reta_conf == NULL)) in ena_rss_reta_update() 579 if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { in ena_rss_reta_update() 581 ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); in ena_rss_reta_update() 610 struct rte_eth_rss_reta_entry64 *reta_conf, in ena_rss_reta_query() argument 621 if (reta_size == 0 || reta_conf == NULL || in ena_rss_reta_query() 622 (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) in ena_rss_reta_query() 636 if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) in ena_rss_reta_query() [all …]
|
| /f-stack/dpdk/drivers/net/iavf/ |
| H A D | iavf_ethdev.c | 104 struct rte_eth_rss_reta_entry64 *reta_conf, 107 struct rte_eth_rss_reta_entry64 *reta_conf, 1029 struct rte_eth_rss_reta_entry64 *reta_conf, in iavf_dev_rss_reta_update() argument 1060 if (reta_conf[idx].mask & (1ULL << shift)) in iavf_dev_rss_reta_update() 1061 lut[i] = reta_conf[idx].reta[shift]; in iavf_dev_rss_reta_update() 1076 struct rte_eth_rss_reta_entry64 *reta_conf, in iavf_dev_rss_reta_query() argument 1097 if (reta_conf[idx].mask & (1ULL << shift)) in iavf_dev_rss_reta_query() 1098 reta_conf[idx].reta[shift] = vf->rss_lut[i]; in iavf_dev_rss_reta_query()
|
| /f-stack/dpdk/drivers/net/atlantic/ |
| H A D | atl_ethdev.c | 103 struct rte_eth_rss_reta_entry64 *reta_conf, 106 struct rte_eth_rss_reta_entry64 *reta_conf, 1822 struct rte_eth_rss_reta_entry64 *reta_conf, in atl_reta_update() argument 1830 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i], in atl_reta_update() 1839 struct rte_eth_rss_reta_entry64 *reta_conf, in atl_reta_query() argument 1846 reta_conf->reta[i] = cf->aq_rss.indirection_table[i]; in atl_reta_query() 1847 reta_conf->mask = ~0U; in atl_reta_query()
|
| /f-stack/dpdk/drivers/net/axgbe/ |
| H A D | axgbe_ethdev.c | 64 struct rte_eth_rss_reta_entry64 *reta_conf, 67 struct rte_eth_rss_reta_entry64 *reta_conf, 493 struct rte_eth_rss_reta_entry64 *reta_conf, in axgbe_dev_rss_reta_update() argument 513 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) in axgbe_dev_rss_reta_update() 515 pdata->rss_table[i] = reta_conf[idx].reta[shift]; in axgbe_dev_rss_reta_update() 525 struct rte_eth_rss_reta_entry64 *reta_conf, in axgbe_dev_rss_reta_query() argument 544 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) in axgbe_dev_rss_reta_query() 546 reta_conf[idx].reta[shift] = pdata->rss_table[i]; in axgbe_dev_rss_reta_query()
|
| /f-stack/dpdk/drivers/net/igc/ |
| H A D | igc_ethdev.c | 234 struct rte_eth_rss_reta_entry64 *reta_conf, 237 struct rte_eth_rss_reta_entry64 *reta_conf, 2255 struct rte_eth_rss_reta_entry64 *reta_conf, in eth_igc_rss_reta_update() argument 2278 mask = (uint8_t)((reta_conf[idx].mask >> shift) & in eth_igc_rss_reta_update() 2298 (uint8_t)reta_conf[idx].reta[shift + j]; in eth_igc_rss_reta_update() 2311 struct rte_eth_rss_reta_entry64 *reta_conf, in eth_igc_rss_reta_query() argument 2334 mask = (uint8_t)((reta_conf[idx].mask >> shift) & in eth_igc_rss_reta_query() 2348 reta_conf[idx].reta[shift + j] = reta.bytes[j]; in eth_igc_rss_reta_query()
|
| /f-stack/dpdk/lib/librte_ethdev/ |
| H A D | rte_ethdev_driver.h | 342 struct rte_eth_rss_reta_entry64 *reta_conf, 347 struct rte_eth_rss_reta_entry64 *reta_conf,
|
| H A D | rte_ethdev.c | 3728 if (!reta_conf) in eth_check_reta_mask() 3733 if (reta_conf[i].mask) in eth_check_reta_mask() 3747 if (!reta_conf) in eth_check_reta_entry() 3758 if ((reta_conf[idx].mask & (1ULL << shift)) && in eth_check_reta_entry() 3759 (reta_conf[idx].reta[shift] >= max_rxq)) { in eth_check_reta_entry() 3763 reta_conf[idx].reta[shift], max_rxq); in eth_check_reta_entry() 3773 struct rte_eth_rss_reta_entry64 *reta_conf, in rte_eth_dev_rss_reta_update() argument 3781 ret = eth_check_reta_mask(reta_conf, reta_size); in rte_eth_dev_rss_reta_update() 3788 ret = eth_check_reta_entry(reta_conf, reta_size, in rte_eth_dev_rss_reta_update() 3800 struct rte_eth_rss_reta_entry64 *reta_conf, in rte_eth_dev_rss_reta_query() argument [all …]
|