Lines Matching refs:pf

281 static int i40e_pf_setup(struct i40e_pf *pf);
282 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
305 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
321 static void i40e_filter_input_set_init(struct i40e_pf *pf);
350 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
385 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
391 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
393 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
395 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
396 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
397 static void i40e_filter_restore(struct i40e_pf *pf);
399 static int i40e_pf_config_rss(struct i40e_pf *pf);
749 static inline void i40e_config_automask(struct i40e_pf *pf) in i40e_config_automask() argument
751 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_config_automask()
760 if (!pf->support_multi_driver) in i40e_config_automask()
773 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf) in i40e_add_tx_flow_control_drop_filter() argument
775 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_add_tx_flow_control_drop_filter()
783 pf->main_vsi_seid, 0, in i40e_add_tx_flow_control_drop_filter()
931 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in config_floating_veb() local
934 memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list)); in config_floating_veb()
937 pf->floating_veb = in config_floating_veb()
940 pf->floating_veb, in config_floating_veb()
941 pf->floating_veb_list); in config_floating_veb()
943 pf->floating_veb = false; in config_floating_veb()
953 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_init_ethtype_filter_list() local
954 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype; in i40e_init_ethtype_filter_list()
998 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_init_tunnel_filter_list() local
999 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; in i40e_init_tunnel_filter_list()
1043 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_init_fdir_filter_list() local
1044 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_init_fdir_filter_list()
1045 struct i40e_fdir_info *fdir_info = &pf->fdir; in i40e_init_fdir_filter_list()
1159 i40e_init_customized_info(struct i40e_pf *pf) in i40e_init_customized_info() argument
1165 pf->customized_pctype[i].index = i; in i40e_init_customized_info()
1166 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID; in i40e_init_customized_info()
1167 pf->customized_pctype[i].valid = false; in i40e_init_customized_info()
1170 pf->gtp_support = false; in i40e_init_customized_info()
1171 pf->esp_support = false; in i40e_init_customized_info()
1175 i40e_init_filter_invalidation(struct i40e_pf *pf) in i40e_init_filter_invalidation() argument
1177 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_init_filter_invalidation()
1178 struct i40e_fdir_info *fdir_info = &pf->fdir; in i40e_init_filter_invalidation()
1182 if (!pf->support_multi_driver) { in i40e_init_filter_invalidation()
1202 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_init_queue_region_conf() local
1203 struct i40e_queue_regions *info = &pf->queue_region; in i40e_init_queue_region_conf()
1217 struct i40e_pf *pf; in i40e_parse_multi_drv_handler() local
1221 pf = (struct i40e_pf *)opaque; in i40e_parse_multi_drv_handler()
1231 pf->support_multi_driver = (bool)support_multi_driver; in i40e_parse_multi_drv_handler()
1242 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_support_multi_driver() local
1247 pf->support_multi_driver = false; in i40e_support_multi_driver()
1268 i40e_parse_multi_drv_handler, pf) < 0) { in i40e_support_multi_driver()
1434 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in eth_i40e_dev_init() local
1467 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); in eth_i40e_dev_init()
1468 pf->adapter->eth_dev = dev; in eth_i40e_dev_init()
1469 pf->dev_data = dev->data; in eth_i40e_dev_init()
1471 hw->back = I40E_PF_TO_ADAPTER(pf); in eth_i40e_dev_init()
1507 i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg); in eth_i40e_dev_init()
1551 i40e_config_automask(pf); in eth_i40e_dev_init()
1560 if (!pf->support_multi_driver) in eth_i40e_dev_init()
1564 i40e_filter_input_set_init(pf); in eth_i40e_dev_init()
1567 if (!pf->support_multi_driver) { in eth_i40e_dev_init()
1610 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp); in eth_i40e_dev_init()
1615 ret = i40e_res_pool_init(&pf->msix_pool, 1, in eth_i40e_dev_init()
1653 if (!pf->support_multi_driver) { in eth_i40e_dev_init()
1665 ret = i40e_pf_setup(pf); in eth_i40e_dev_init()
1671 vsi = pf->main_vsi; in eth_i40e_dev_init()
1677 if (!pf->floating_veb) { in eth_i40e_dev_init()
1704 pf->flags &= ~I40E_FLAG_DCB; in eth_i40e_dev_init()
1724 if (!pf->support_multi_driver) in eth_i40e_dev_init()
1732 i40e_add_tx_flow_control_drop_filter(pf); in eth_i40e_dev_init()
1740 TAILQ_INIT(&pf->mirror_list); in eth_i40e_dev_init()
1743 TAILQ_INIT(&pf->rss_config_list); in eth_i40e_dev_init()
1749 i40e_init_customized_info(pf); in eth_i40e_dev_init()
1752 i40e_init_filter_invalidation(pf); in eth_i40e_dev_init()
1768 memset(&pf->rss_info, 0, in eth_i40e_dev_init()
1777 rte_free(pf->tunnel.hash_table); in eth_i40e_dev_init()
1778 rte_free(pf->tunnel.hash_map); in eth_i40e_dev_init()
1780 rte_free(pf->ethertype.hash_table); in eth_i40e_dev_init()
1781 rte_free(pf->ethertype.hash_map); in eth_i40e_dev_init()
1786 i40e_vsi_release(pf->main_vsi); in eth_i40e_dev_init()
1792 i40e_res_pool_destroy(&pf->msix_pool); in eth_i40e_dev_init()
1794 i40e_res_pool_destroy(&pf->qp_pool); in eth_i40e_dev_init()
1804 i40e_rm_ethtype_filter_list(struct i40e_pf *pf) in i40e_rm_ethtype_filter_list() argument
1809 ethertype_rule = &pf->ethertype; in i40e_rm_ethtype_filter_list()
1824 i40e_rm_tunnel_filter_list(struct i40e_pf *pf) in i40e_rm_tunnel_filter_list() argument
1829 tunnel_rule = &pf->tunnel; in i40e_rm_tunnel_filter_list()
1843 i40e_rm_fdir_filter_list(struct i40e_pf *pf) in i40e_rm_fdir_filter_list() argument
1848 fdir_info = &pf->fdir; in i40e_rm_fdir_filter_list()
1856 i40e_fdir_memory_cleanup(struct i40e_pf *pf) in i40e_fdir_memory_cleanup() argument
1860 fdir_info = &pf->fdir; in i40e_fdir_memory_cleanup()
1909 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_configure() local
1934 ret = i40e_fdir_setup(pf); in i40e_dev_configure()
1945 i40e_fdir_teardown(pf); in i40e_dev_configure()
1975 TAILQ_INIT(&pf->flow_list); in i40e_dev_configure()
1981 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { in i40e_dev_configure()
1982 i40e_vsi_release(pf->vmdq[i].vsi); in i40e_dev_configure()
1983 pf->vmdq[i].vsi = NULL; in i40e_dev_configure()
1985 rte_free(pf->vmdq); in i40e_dev_configure()
1986 pf->vmdq = NULL; in i40e_dev_configure()
1993 i40e_fdir_teardown(pf); in i40e_dev_configure()
2046 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); in __vsi_queues_bind_intr() local
2065 i40e_calc_itr_interval(1, pf->support_multi_driver); in __vsi_queues_bind_intr()
2196 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); in i40e_vsi_enable_queues_intr() local
2199 if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver) in i40e_vsi_enable_queues_intr()
2223 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); in i40e_vsi_disable_queues_intr() local
2226 if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver) in i40e_vsi_disable_queues_intr()
2381 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_start() local
2383 struct i40e_vsi *main_vsi = pf->main_vsi; in i40e_dev_start()
2418 ret = i40e_dev_rxtx_init(pf); in i40e_dev_start()
2426 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; in i40e_dev_start()
2433 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { in i40e_dev_start()
2434 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; in i40e_dev_start()
2435 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, in i40e_dev_start()
2439 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); in i40e_dev_start()
2460 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { in i40e_dev_start()
2461 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid, in i40e_dev_start()
2468 if (pf->vfs) { in i40e_dev_start()
2469 for (i = 0; i < pf->vf_num; i++) { in i40e_dev_start()
2470 vsi = pf->vfs[i].vsi; in i40e_dev_start()
2524 i40e_filter_restore(pf); in i40e_dev_start()
2526 if (pf->tm_conf.root && !pf->tm_conf.committed) in i40e_dev_start()
2546 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_stop() local
2548 struct i40e_vsi *main_vsi = pf->main_vsi; in i40e_dev_stop()
2572 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { in i40e_dev_stop()
2573 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi); in i40e_dev_stop()
2574 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi); in i40e_dev_stop()
2597 pf->tm_conf.committed = false; in i40e_dev_stop()
2602 pf->adapter->rss_reta_updated = 0; in i40e_dev_stop()
2610 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_close() local
2627 ret = rte_eth_switch_domain_free(pf->switch_domain_id); in i40e_dev_close()
2635 while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { in i40e_dev_close()
2637 pf->main_vsi->veb->seid, in i40e_dev_close()
2648 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); in i40e_dev_close()
2650 pf->nb_mirror_rule--; in i40e_dev_close()
2664 i40e_fdir_teardown(pf); in i40e_dev_close()
2669 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { in i40e_dev_close()
2670 i40e_vsi_release(pf->vmdq[i].vsi); in i40e_dev_close()
2671 pf->vmdq[i].vsi = NULL; in i40e_dev_close()
2673 rte_free(pf->vmdq); in i40e_dev_close()
2674 pf->vmdq = NULL; in i40e_dev_close()
2677 i40e_vsi_release(pf->main_vsi); in i40e_dev_close()
2683 i40e_res_pool_destroy(&pf->qp_pool); in i40e_dev_close()
2684 i40e_res_pool_destroy(&pf->msix_pool); in i40e_dev_close()
2687 if (!pf->support_multi_driver) in i40e_dev_close()
2726 i40e_rm_ethtype_filter_list(pf); in i40e_dev_close()
2727 i40e_rm_tunnel_filter_list(pf); in i40e_dev_close()
2728 i40e_rm_fdir_filter_list(pf); in i40e_dev_close()
2731 while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { in i40e_dev_close()
2732 TAILQ_REMOVE(&pf->flow_list, p_flow, node); in i40e_dev_close()
2739 i40e_fdir_memory_cleanup(pf); in i40e_dev_close()
2777 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_promiscuous_enable() local
2779 struct i40e_vsi *vsi = pf->main_vsi; in i40e_dev_promiscuous_enable()
2805 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_promiscuous_disable() local
2807 struct i40e_vsi *vsi = pf->main_vsi; in i40e_dev_promiscuous_disable()
2837 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_allmulticast_enable() local
2839 struct i40e_vsi *vsi = pf->main_vsi; in i40e_dev_allmulticast_enable()
2854 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_allmulticast_disable() local
2856 struct i40e_vsi *vsi = pf->main_vsi; in i40e_dev_allmulticast_disable()
3134 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) in i40e_read_stats_registers() argument
3137 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ in i40e_read_stats_registers()
3138 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */ in i40e_read_stats_registers()
3143 pf->offset_loaded, in i40e_read_stats_registers()
3144 &pf->internal_stats_offset.rx_bytes, in i40e_read_stats_registers()
3145 &pf->internal_stats.rx_bytes, in i40e_read_stats_registers()
3146 &pf->internal_prev_rx_bytes); in i40e_read_stats_registers()
3149 pf->offset_loaded, in i40e_read_stats_registers()
3150 &pf->internal_stats_offset.tx_bytes, in i40e_read_stats_registers()
3151 &pf->internal_stats.tx_bytes, in i40e_read_stats_registers()
3152 &pf->internal_prev_tx_bytes); in i40e_read_stats_registers()
3156 pf->offset_loaded, in i40e_read_stats_registers()
3157 &pf->internal_stats_offset.rx_unicast, in i40e_read_stats_registers()
3158 &pf->internal_stats.rx_unicast); in i40e_read_stats_registers()
3161 pf->offset_loaded, in i40e_read_stats_registers()
3162 &pf->internal_stats_offset.rx_multicast, in i40e_read_stats_registers()
3163 &pf->internal_stats.rx_multicast); in i40e_read_stats_registers()
3166 pf->offset_loaded, in i40e_read_stats_registers()
3167 &pf->internal_stats_offset.rx_broadcast, in i40e_read_stats_registers()
3168 &pf->internal_stats.rx_broadcast); in i40e_read_stats_registers()
3172 pf->offset_loaded, in i40e_read_stats_registers()
3173 &pf->internal_stats_offset.tx_unicast, in i40e_read_stats_registers()
3174 &pf->internal_stats.tx_unicast); in i40e_read_stats_registers()
3177 pf->offset_loaded, in i40e_read_stats_registers()
3178 &pf->internal_stats_offset.tx_multicast, in i40e_read_stats_registers()
3179 &pf->internal_stats.tx_multicast); in i40e_read_stats_registers()
3182 pf->offset_loaded, in i40e_read_stats_registers()
3183 &pf->internal_stats_offset.tx_broadcast, in i40e_read_stats_registers()
3184 &pf->internal_stats.tx_broadcast); in i40e_read_stats_registers()
3187 pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast + in i40e_read_stats_registers()
3188 pf->internal_stats.rx_multicast + in i40e_read_stats_registers()
3189 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN; in i40e_read_stats_registers()
3194 pf->offset_loaded, &os->eth.rx_bytes, in i40e_read_stats_registers()
3195 &ns->eth.rx_bytes, &pf->prev_rx_bytes); in i40e_read_stats_registers()
3198 pf->offset_loaded, &os->eth.rx_unicast, in i40e_read_stats_registers()
3202 pf->offset_loaded, &os->eth.rx_multicast, in i40e_read_stats_registers()
3206 pf->offset_loaded, &os->eth.rx_broadcast, in i40e_read_stats_registers()
3221 if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes) in i40e_read_stats_registers()
3224 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes; in i40e_read_stats_registers()
3226 if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast) in i40e_read_stats_registers()
3229 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast; in i40e_read_stats_registers()
3231 if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast) in i40e_read_stats_registers()
3234 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast; in i40e_read_stats_registers()
3236 if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast) in i40e_read_stats_registers()
3239 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast; in i40e_read_stats_registers()
3242 pf->offset_loaded, &os->eth.rx_discards, in i40e_read_stats_registers()
3247 pf->offset_loaded, in i40e_read_stats_registers()
3252 pf->offset_loaded, &os->eth.tx_bytes, in i40e_read_stats_registers()
3253 &ns->eth.tx_bytes, &pf->prev_tx_bytes); in i40e_read_stats_registers()
3256 pf->offset_loaded, &os->eth.tx_unicast, in i40e_read_stats_registers()
3260 pf->offset_loaded, &os->eth.tx_multicast, in i40e_read_stats_registers()
3264 pf->offset_loaded, &os->eth.tx_broadcast, in i40e_read_stats_registers()
3275 if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes) in i40e_read_stats_registers()
3278 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes; in i40e_read_stats_registers()
3280 if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast) in i40e_read_stats_registers()
3283 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast; in i40e_read_stats_registers()
3285 if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast) in i40e_read_stats_registers()
3288 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast; in i40e_read_stats_registers()
3290 if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast) in i40e_read_stats_registers()
3293 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast; in i40e_read_stats_registers()
3299 pf->offset_loaded, &os->tx_dropped_link_down, in i40e_read_stats_registers()
3302 pf->offset_loaded, &os->crc_errors, in i40e_read_stats_registers()
3305 pf->offset_loaded, &os->illegal_bytes, in i40e_read_stats_registers()
3309 pf->offset_loaded, &os->mac_local_faults, in i40e_read_stats_registers()
3312 pf->offset_loaded, &os->mac_remote_faults, in i40e_read_stats_registers()
3315 pf->offset_loaded, &os->rx_length_errors, in i40e_read_stats_registers()
3318 pf->offset_loaded, &os->link_xon_rx, in i40e_read_stats_registers()
3321 pf->offset_loaded, &os->link_xoff_rx, in i40e_read_stats_registers()
3325 pf->offset_loaded, in i40e_read_stats_registers()
3329 pf->offset_loaded, in i40e_read_stats_registers()
3334 pf->offset_loaded, &os->link_xon_tx, in i40e_read_stats_registers()
3337 pf->offset_loaded, &os->link_xoff_tx, in i40e_read_stats_registers()
3341 pf->offset_loaded, in i40e_read_stats_registers()
3345 pf->offset_loaded, in i40e_read_stats_registers()
3349 pf->offset_loaded, in i40e_read_stats_registers()
3355 pf->offset_loaded, &os->rx_size_64, in i40e_read_stats_registers()
3359 pf->offset_loaded, &os->rx_size_127, in i40e_read_stats_registers()
3363 pf->offset_loaded, &os->rx_size_255, in i40e_read_stats_registers()
3367 pf->offset_loaded, &os->rx_size_511, in i40e_read_stats_registers()
3371 pf->offset_loaded, &os->rx_size_1023, in i40e_read_stats_registers()
3375 pf->offset_loaded, &os->rx_size_1522, in i40e_read_stats_registers()
3379 pf->offset_loaded, &os->rx_size_big, in i40e_read_stats_registers()
3382 pf->offset_loaded, &os->rx_undersize, in i40e_read_stats_registers()
3385 pf->offset_loaded, &os->rx_fragments, in i40e_read_stats_registers()
3388 pf->offset_loaded, &os->rx_oversize, in i40e_read_stats_registers()
3391 pf->offset_loaded, &os->rx_jabber, in i40e_read_stats_registers()
3395 pf->offset_loaded, &os->tx_size_64, in i40e_read_stats_registers()
3399 pf->offset_loaded, &os->tx_size_127, in i40e_read_stats_registers()
3403 pf->offset_loaded, &os->tx_size_255, in i40e_read_stats_registers()
3407 pf->offset_loaded, &os->tx_size_511, in i40e_read_stats_registers()
3411 pf->offset_loaded, &os->tx_size_1023, in i40e_read_stats_registers()
3415 pf->offset_loaded, &os->tx_size_1522, in i40e_read_stats_registers()
3419 pf->offset_loaded, &os->tx_size_big, in i40e_read_stats_registers()
3421 i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index), in i40e_read_stats_registers()
3422 pf->offset_loaded, in i40e_read_stats_registers()
3427 pf->offset_loaded = true; in i40e_read_stats_registers()
3429 if (pf->main_vsi) in i40e_read_stats_registers()
3430 i40e_update_vsi_stats(pf->main_vsi); in i40e_read_stats_registers()
3437 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_stats_get() local
3439 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ in i40e_dev_stats_get()
3444 i40e_read_stats_registers(pf, hw); in i40e_dev_stats_get()
3446 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + in i40e_dev_stats_get()
3447 pf->main_vsi->eth_stats.rx_multicast + in i40e_dev_stats_get()
3448 pf->main_vsi->eth_stats.rx_broadcast - in i40e_dev_stats_get()
3449 pf->main_vsi->eth_stats.rx_discards; in i40e_dev_stats_get()
3453 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; in i40e_dev_stats_get()
3456 pf->main_vsi->eth_stats.tx_errors; in i40e_dev_stats_get()
3460 pf->main_vsi->eth_stats.rx_discards; in i40e_dev_stats_get()
3465 if (pf->vfs) { in i40e_dev_stats_get()
3466 for (i = 0; i < pf->vf_num; i++) { in i40e_dev_stats_get()
3467 vsi = pf->vfs[i].vsi; in i40e_dev_stats_get()
3556 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_stats_reset() local
3560 pf->offset_loaded = false; in i40e_dev_stats_reset()
3561 if (pf->main_vsi) in i40e_dev_stats_reset()
3562 pf->main_vsi->offset_loaded = false; in i40e_dev_stats_reset()
3565 i40e_read_stats_registers(pf, hw); in i40e_dev_stats_reset()
3632 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_xstats_get() local
3635 struct i40e_hw_port_stats *hw_stats = &pf->stats; in i40e_dev_xstats_get()
3641 i40e_read_stats_registers(pf, hw); in i40e_dev_xstats_get()
3745 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_info_get() local
3747 struct i40e_vsi *vsi = pf->main_vsi; in i40e_dev_info_get()
3795 dev_info->reta_size = pf->hash_lut_size; in i40e_dev_info_get()
3796 dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask; in i40e_dev_info_get()
3834 if (pf->flags & I40E_FLAG_VMDQ) { in i40e_dev_info_get()
3835 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi; in i40e_dev_info_get()
3837 dev_info->vmdq_queue_num = pf->vmdq_nb_qps * in i40e_dev_info_get()
3838 pf->max_nb_vmdq_vsi; in i40e_dev_info_get()
3887 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_vlan_filter_set() local
3888 struct i40e_vsi *vsi = pf->main_vsi; in i40e_vlan_filter_set()
3954 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_vlan_tpid_set() local
3967 if (pf->support_multi_driver) { in i40e_vlan_tpid_set()
4034 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_vlan_offload_set() local
4035 struct i40e_vsi *vsi = pf->main_vsi; in i40e_vlan_offload_set()
4089 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_vlan_pvid_set() local
4090 struct i40e_vsi *vsi = pf->main_vsi; in i40e_vlan_pvid_set()
4136 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_flow_ctrl_get() local
4138 fc_conf->pause_time = pf->fc_conf.pause_time; in i40e_flow_ctrl_get()
4141 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = in i40e_flow_ctrl_get()
4143 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = in i40e_flow_ctrl_get()
4146 fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]; in i40e_flow_ctrl_get()
4147 fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]; in i40e_flow_ctrl_get()
4176 struct i40e_pf *pf; in i40e_flow_ctrl_set() local
4196 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_flow_ctrl_set()
4199 pf->fc_conf.pause_time = fc_conf->pause_time; in i40e_flow_ctrl_set()
4200 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water; in i40e_flow_ctrl_set()
4201 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water; in i40e_flow_ctrl_set()
4220 pf->fc_conf.pause_time); in i40e_flow_ctrl_set()
4228 pf->fc_conf.pause_time); in i40e_flow_ctrl_set()
4242 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001; in i40e_flow_ctrl_set()
4248 pf->fc_conf.pause_time / 2); in i40e_flow_ctrl_set()
4266 if (!pf->support_multi_driver) { in i40e_flow_ctrl_set()
4269 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] in i40e_flow_ctrl_set()
4272 (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] in i40e_flow_ctrl_set()
4275 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] in i40e_flow_ctrl_set()
4278 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] in i40e_flow_ctrl_set()
4306 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_macaddr_add() local
4313 if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) || in i40e_macaddr_add()
4314 !pf->nb_cfg_vmdq_vsi)) { in i40e_macaddr_add()
4316 pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled", in i40e_macaddr_add()
4321 if (pool > pf->nb_cfg_vmdq_vsi) { in i40e_macaddr_add()
4323 pool, pf->nb_cfg_vmdq_vsi); in i40e_macaddr_add()
4334 vsi = pf->main_vsi; in i40e_macaddr_add()
4336 vsi = pf->vmdq[pool - 1].vsi; in i40e_macaddr_add()
4350 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_macaddr_remove() local
4365 vsi = pf->main_vsi; in i40e_macaddr_remove()
4368 if (!(pf->flags & I40E_FLAG_VMDQ) || in i40e_macaddr_remove()
4369 (i > pf->nb_cfg_vmdq_vsi)) { in i40e_macaddr_remove()
4374 vsi = pf->vmdq[i - 1].vsi; in i40e_macaddr_remove()
4389 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); in i40e_get_rss_lut() local
4397 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { in i40e_get_rss_lut()
4427 struct i40e_pf *pf; in i40e_set_rss_lut() local
4434 pf = I40E_VSI_TO_PF(vsi); in i40e_set_rss_lut()
4437 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { in i40e_set_rss_lut()
4471 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_rss_reta_update() local
4472 uint16_t i, lut_size = pf->hash_lut_size; in i40e_dev_rss_reta_update()
4490 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size); in i40e_dev_rss_reta_update()
4499 ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size); in i40e_dev_rss_reta_update()
4501 pf->adapter->rss_reta_updated = 1; in i40e_dev_rss_reta_update()
4514 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_rss_reta_query() local
4515 uint16_t i, lut_size = pf->hash_lut_size; in i40e_dev_rss_reta_query()
4534 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size); in i40e_dev_rss_reta_query()
4712 struct i40e_pf *pf; in i40e_pf_parse_vf_queue_number_handler() local
4716 pf = (struct i40e_pf *)opaque; in i40e_pf_parse_vf_queue_number_handler()
4723 "kept the value = %hu", value, pf->vf_nb_qp_max); in i40e_pf_parse_vf_queue_number_handler()
4728 pf->vf_nb_qp_max = (uint16_t)num; in i40e_pf_parse_vf_queue_number_handler()
4733 "kept the value = %hu", num, pf->vf_nb_qp_max); in i40e_pf_parse_vf_queue_number_handler()
4740 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_pf_config_vf_rxq_number() local
4745 pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; in i40e_pf_config_vf_rxq_number()
4766 i40e_pf_parse_vf_queue_number_handler, pf); in i40e_pf_config_vf_rxq_number()
4776 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_pf_parameter_init() local
4777 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_pf_parameter_init()
4789 pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME; in i40e_pf_parameter_init()
4790 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER; in i40e_pf_parameter_init()
4791 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER; in i40e_pf_parameter_init()
4793 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED; in i40e_pf_parameter_init()
4794 pf->max_num_vsi = hw->func_caps.num_vsis; in i40e_pf_parameter_init()
4795 pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF; in i40e_pf_parameter_init()
4796 pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; in i40e_pf_parameter_init()
4799 pf->fdir_qp_offset = 0; in i40e_pf_parameter_init()
4801 pf->flags |= I40E_FLAG_FDIR; in i40e_pf_parameter_init()
4802 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR; in i40e_pf_parameter_init()
4804 pf->fdir_nb_qps = 0; in i40e_pf_parameter_init()
4806 qp_count += pf->fdir_nb_qps; in i40e_pf_parameter_init()
4810 pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps; in i40e_pf_parameter_init()
4812 pf->lan_nb_qps = 1; in i40e_pf_parameter_init()
4814 pf->flags |= I40E_FLAG_RSS; in i40e_pf_parameter_init()
4816 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE; in i40e_pf_parameter_init()
4817 pf->lan_nb_qps = pf->lan_nb_qp_max; in i40e_pf_parameter_init()
4819 qp_count += pf->lan_nb_qps; in i40e_pf_parameter_init()
4823 pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps; in i40e_pf_parameter_init()
4825 pf->flags |= I40E_FLAG_SRIOV; in i40e_pf_parameter_init()
4826 pf->vf_nb_qps = pf->vf_nb_qp_max; in i40e_pf_parameter_init()
4827 pf->vf_num = pci_dev->max_vfs; in i40e_pf_parameter_init()
4830 pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num); in i40e_pf_parameter_init()
4832 pf->vf_nb_qps = 0; in i40e_pf_parameter_init()
4833 pf->vf_num = 0; in i40e_pf_parameter_init()
4835 qp_count += pf->vf_nb_qps * pf->vf_num; in i40e_pf_parameter_init()
4836 vsi_count += pf->vf_num; in i40e_pf_parameter_init()
4839 pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num; in i40e_pf_parameter_init()
4840 pf->vmdq_nb_qps = 0; in i40e_pf_parameter_init()
4841 pf->max_nb_vmdq_vsi = 0; in i40e_pf_parameter_init()
4845 pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp - in i40e_pf_parameter_init()
4846 qp_count) / pf->vmdq_nb_qp_max; in i40e_pf_parameter_init()
4851 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, in i40e_pf_parameter_init()
4853 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi, in i40e_pf_parameter_init()
4855 if (pf->max_nb_vmdq_vsi) { in i40e_pf_parameter_init()
4856 pf->flags |= I40E_FLAG_VMDQ; in i40e_pf_parameter_init()
4857 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max; in i40e_pf_parameter_init()
4860 pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps, in i40e_pf_parameter_init()
4861 pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi); in i40e_pf_parameter_init()
4870 qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi; in i40e_pf_parameter_init()
4871 vsi_count += pf->max_nb_vmdq_vsi; in i40e_pf_parameter_init()
4874 pf->flags |= I40E_FLAG_DCB; in i40e_pf_parameter_init()
4893 i40e_pf_get_switch_config(struct i40e_pf *pf) in i40e_pf_get_switch_config() argument
4895 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_pf_get_switch_config()
4924 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid); in i40e_pf_get_switch_config()
4925 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid); in i40e_pf_get_switch_config()
5359 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) in i40e_veb_setup() argument
5365 if (pf == NULL) { in i40e_veb_setup()
5370 hw = I40E_PF_TO_HW(pf); in i40e_veb_setup()
5379 veb->associate_pf = pf; in i40e_veb_setup()
5422 struct i40e_pf *pf; in i40e_vsi_release() local
5438 pf = I40E_VSI_TO_PF(vsi); in i40e_vsi_release()
5464 !pf->floating_veb_list[user_param])) { in i40e_vsi_release()
5480 pf->floating_veb_list[user_param]) { in i40e_vsi_release()
5496 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue); in i40e_vsi_release()
5499 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr); in i40e_vsi_release()
5617 i40e_enable_pf_lb(struct i40e_pf *pf) in i40e_enable_pf_lb() argument
5619 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_enable_pf_lb()
5630 ctxt.seid = pf->main_vsi_seid; in i40e_enable_pf_lb()
5652 i40e_vsi_setup(struct i40e_pf *pf, in i40e_vsi_setup() argument
5657 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_vsi_setup()
5687 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi); in i40e_vsi_setup()
5694 i40e_enable_pf_lb(pf); in i40e_vsi_setup()
5698 pf->main_vsi->floating_veb == NULL) { in i40e_vsi_setup()
5699 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi); in i40e_vsi_setup()
5701 if (pf->main_vsi->floating_veb == NULL) { in i40e_vsi_setup()
5714 vsi->adapter = I40E_PF_TO_ADAPTER(pf); in i40e_vsi_setup()
5716 vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi; in i40e_vsi_setup()
5723 vsi->nb_qps = pf->lan_nb_qps; in i40e_vsi_setup()
5726 vsi->nb_qps = pf->vf_nb_qps; in i40e_vsi_setup()
5729 vsi->nb_qps = pf->vmdq_nb_qps; in i40e_vsi_setup()
5732 vsi->nb_qps = pf->fdir_nb_qps; in i40e_vsi_setup()
5746 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps); in i40e_vsi_setup()
5758 if (pf->support_multi_driver) { in i40e_vsi_setup()
5767 ret = i40e_res_pool_alloc(&pf->msix_pool, in i40e_vsi_setup()
5781 ret = i40e_res_pool_alloc(&pf->msix_pool, 1); in i40e_vsi_setup()
5800 vsi->uplink_seid = pf->mac_seid; in i40e_vsi_setup()
5801 vsi->seid = pf->main_vsi_seid; in i40e_vsi_setup()
5867 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr, in i40e_vsi_setup()
5889 vsi->uplink_seid = pf->main_vsi->floating_veb->seid; in i40e_vsi_setup()
6001 TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head, in i40e_vsi_setup()
6023 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr); in i40e_vsi_setup()
6025 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue); in i40e_vsi_setup()
6224 i40e_pf_setup(struct i40e_pf *pf) in i40e_pf_setup() argument
6226 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_pf_setup()
6232 pf->offset_loaded = FALSE; in i40e_pf_setup()
6233 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats)); in i40e_pf_setup()
6234 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats)); in i40e_pf_setup()
6235 memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats)); in i40e_pf_setup()
6236 memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats)); in i40e_pf_setup()
6238 ret = i40e_pf_get_switch_config(pf); in i40e_pf_setup()
6244 ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id); in i40e_pf_setup()
6249 if (pf->flags & I40E_FLAG_FDIR) { in i40e_pf_setup()
6251 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR); in i40e_pf_setup()
6256 pf->flags &= ~I40E_FLAG_FDIR; in i40e_pf_setup()
6260 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0); in i40e_pf_setup()
6265 pf->main_vsi = vsi; in i40e_pf_setup()
6280 pf->hash_lut_size = hw->func_caps.rss_table_size; in i40e_pf_setup()
6410 i40e_dev_tx_init(struct i40e_pf *pf) in i40e_dev_tx_init() argument
6412 struct rte_eth_dev_data *data = pf->dev_data; in i40e_dev_tx_init()
6426 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf) in i40e_dev_tx_init()
6434 i40e_dev_rx_init(struct i40e_pf *pf) in i40e_dev_rx_init() argument
6436 struct rte_eth_dev_data *data = pf->dev_data; in i40e_dev_rx_init()
6441 i40e_pf_config_rss(pf); in i40e_dev_rx_init()
6455 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf) in i40e_dev_rx_init()
6462 i40e_dev_rxtx_init(struct i40e_pf *pf) in i40e_dev_rxtx_init() argument
6466 err = i40e_dev_tx_init(pf); in i40e_dev_rxtx_init()
6471 err = i40e_dev_rx_init(pf); in i40e_dev_rxtx_init()
6484 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_vmdq_setup() local
6489 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_vmdq_setup()
6497 if ((pf->flags & I40E_FLAG_VMDQ) == 0) { in i40e_vmdq_setup()
6503 if (conf_vsis > pf->max_nb_vmdq_vsi) { in i40e_vmdq_setup()
6506 pf->max_nb_vmdq_vsi); in i40e_vmdq_setup()
6510 if (pf->vmdq != NULL) { in i40e_vmdq_setup()
6515 pf->vmdq = rte_zmalloc("vmdq_info_struct", in i40e_vmdq_setup()
6518 if (pf->vmdq == NULL) { in i40e_vmdq_setup()
6527 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi, in i40e_vmdq_setup()
6534 vmdq_info = &pf->vmdq[i]; in i40e_vmdq_setup()
6535 vmdq_info->pf = pf; in i40e_vmdq_setup()
6538 pf->nb_cfg_vmdq_vsi = conf_vsis; in i40e_vmdq_setup()
6543 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) { in i40e_vmdq_setup()
6548 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi, in i40e_vmdq_setup()
6565 if (pf->vmdq[i].vsi == NULL) in i40e_vmdq_setup()
6568 i40e_vsi_release(pf->vmdq[i].vsi); in i40e_vmdq_setup()
6570 rte_free(pf->vmdq); in i40e_vmdq_setup()
6571 pf->vmdq = NULL; in i40e_vmdq_setup()
6662 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_handle_vfr_event() local
6667 if (!pf->vfs) in i40e_dev_handle_vfr_event()
6673 for (i = 0; i < pf->vf_num; i++) { in i40e_dev_handle_vfr_event()
6690 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0); in i40e_dev_handle_vfr_event()
6700 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_notify_all_vfs_link_status() local
6703 for (i = 0; i < pf->vf_num; i++) in i40e_notify_all_vfs_link_status()
6704 i40e_notify_vf_link_status(dev, &pf->vfs[i]); in i40e_notify_all_vfs_link_status()
6765 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_handle_mdd_event() local
6821 for (i = 0; i < pf->vf_num && mdd_detected; i++) { in i40e_handle_mdd_event()
6822 vf = &pf->vfs[i]; in i40e_handle_mdd_event()
7577 i40e_pf_disable_rss(struct i40e_pf *pf) in i40e_pf_disable_rss() argument
7579 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_pf_disable_rss()
7589 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); in i40e_set_rss_key() local
7605 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { in i40e_set_rss_key()
7637 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); in i40e_get_rss_key() local
7645 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { in i40e_get_rss_key()
7676 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) in i40e_hw_rss_hash_set() argument
7678 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_hw_rss_hash_set()
7682 ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key, in i40e_hw_rss_hash_set()
7687 hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf); in i40e_hw_rss_hash_set()
7699 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_rss_hash_update() local
7701 uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask; in i40e_dev_rss_hash_update()
7707 if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */ in i40e_dev_rss_hash_update()
7716 return i40e_hw_rss_hash_set(pf, rss_conf); in i40e_dev_rss_hash_update()
7723 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_rss_hash_conf_get() local
7731 ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key, in i40e_dev_rss_hash_conf_get()
7738 rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena); in i40e_dev_rss_hash_conf_get()
7821 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, in i40e_sw_tunnel_filter_insert() argument
7824 struct i40e_tunnel_rule *rule = &pf->tunnel; in i40e_sw_tunnel_filter_insert()
7843 i40e_sw_tunnel_filter_del(struct i40e_pf *pf, in i40e_sw_tunnel_filter_del() argument
7846 struct i40e_tunnel_rule *rule = &pf->tunnel; in i40e_sw_tunnel_filter_del()
7882 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) in i40e_replace_mpls_l1_filter() argument
7886 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_replace_mpls_l1_filter()
7890 if (pf->support_multi_driver) { in i40e_replace_mpls_l1_filter()
7943 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) in i40e_replace_mpls_cloud_filter() argument
7947 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_replace_mpls_cloud_filter()
7951 if (pf->support_multi_driver) { in i40e_replace_mpls_cloud_filter()
8018 i40e_replace_gtp_l1_filter(struct i40e_pf *pf) in i40e_replace_gtp_l1_filter() argument
8022 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_replace_gtp_l1_filter()
8026 if (pf->support_multi_driver) { in i40e_replace_gtp_l1_filter()
8106 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) in i40e_replace_gtp_cloud_filter() argument
8110 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_replace_gtp_cloud_filter()
8114 if (pf->support_multi_driver) { in i40e_replace_gtp_cloud_filter()
8179 i40e_replace_port_l1_filter(struct i40e_pf *pf, in i40e_replace_port_l1_filter() argument
8185 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_replace_port_l1_filter()
8188 if (pf->support_multi_driver) { in i40e_replace_port_l1_filter()
8251 i40e_replace_port_cloud_filter(struct i40e_pf *pf, in i40e_replace_port_cloud_filter() argument
8257 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_replace_port_cloud_filter()
8260 if (pf->support_multi_driver) { in i40e_replace_port_cloud_filter()
8305 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, in i40e_dev_consistent_tunnel_filter_set() argument
8316 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_dev_consistent_tunnel_filter_set()
8320 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; in i40e_dev_consistent_tunnel_filter_set()
8374 if (!pf->mpls_replace_flag) { in i40e_dev_consistent_tunnel_filter_set()
8375 i40e_replace_mpls_l1_filter(pf); in i40e_dev_consistent_tunnel_filter_set()
8376 i40e_replace_mpls_cloud_filter(pf); in i40e_dev_consistent_tunnel_filter_set()
8377 pf->mpls_replace_flag = 1; in i40e_dev_consistent_tunnel_filter_set()
8390 if (!pf->mpls_replace_flag) { in i40e_dev_consistent_tunnel_filter_set()
8391 i40e_replace_mpls_l1_filter(pf); in i40e_dev_consistent_tunnel_filter_set()
8392 i40e_replace_mpls_cloud_filter(pf); in i40e_dev_consistent_tunnel_filter_set()
8393 pf->mpls_replace_flag = 1; in i40e_dev_consistent_tunnel_filter_set()
8406 if (!pf->gtp_replace_flag) { in i40e_dev_consistent_tunnel_filter_set()
8407 i40e_replace_gtp_l1_filter(pf); in i40e_dev_consistent_tunnel_filter_set()
8408 i40e_replace_gtp_cloud_filter(pf); in i40e_dev_consistent_tunnel_filter_set()
8409 pf->gtp_replace_flag = 1; in i40e_dev_consistent_tunnel_filter_set()
8421 if (!pf->gtp_replace_flag) { in i40e_dev_consistent_tunnel_filter_set()
8422 i40e_replace_gtp_l1_filter(pf); in i40e_dev_consistent_tunnel_filter_set()
8423 i40e_replace_gtp_cloud_filter(pf); in i40e_dev_consistent_tunnel_filter_set()
8424 pf->gtp_replace_flag = 1; in i40e_dev_consistent_tunnel_filter_set()
8436 if (!pf->qinq_replace_flag) { in i40e_dev_consistent_tunnel_filter_set()
8437 ret = i40e_cloud_filter_qinq_create(pf); in i40e_dev_consistent_tunnel_filter_set()
8441 pf->qinq_replace_flag = 1; in i40e_dev_consistent_tunnel_filter_set()
8456 if (!pf->sport_replace_flag) { in i40e_dev_consistent_tunnel_filter_set()
8457 i40e_replace_port_l1_filter(pf, in i40e_dev_consistent_tunnel_filter_set()
8459 i40e_replace_port_cloud_filter(pf, in i40e_dev_consistent_tunnel_filter_set()
8461 pf->sport_replace_flag = 1; in i40e_dev_consistent_tunnel_filter_set()
8481 if (!pf->dport_replace_flag) { in i40e_dev_consistent_tunnel_filter_set()
8482 i40e_replace_port_l1_filter(pf, in i40e_dev_consistent_tunnel_filter_set()
8484 i40e_replace_port_cloud_filter(pf, in i40e_dev_consistent_tunnel_filter_set()
8486 pf->dport_replace_flag = 1; in i40e_dev_consistent_tunnel_filter_set()
8556 vsi = pf->main_vsi; in i40e_dev_consistent_tunnel_filter_set()
8558 if (tunnel_filter->vf_id >= pf->vf_num) { in i40e_dev_consistent_tunnel_filter_set()
8563 vf = &pf->vfs[tunnel_filter->vf_id]; in i40e_dev_consistent_tunnel_filter_set()
8605 ret = i40e_sw_tunnel_filter_insert(pf, tunnel); in i40e_dev_consistent_tunnel_filter_set()
8620 ret = i40e_sw_tunnel_filter_del(pf, &node->input); in i40e_dev_consistent_tunnel_filter_set()
8628 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port) in i40e_get_vxlan_port_idx() argument
8633 if (pf->vxlan_ports[i] == port) in i40e_get_vxlan_port_idx()
8641 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type) in i40e_add_vxlan_port() argument
8645 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_add_vxlan_port()
8647 idx = i40e_get_vxlan_port_idx(pf, port); in i40e_add_vxlan_port()
8656 idx = i40e_get_vxlan_port_idx(pf, 0); in i40e_add_vxlan_port()
8675 pf->vxlan_ports[idx] = port; in i40e_add_vxlan_port()
8676 pf->vxlan_bitmap |= (1 << idx); in i40e_add_vxlan_port()
8678 if (!(pf->flags & I40E_FLAG_VXLAN)) in i40e_add_vxlan_port()
8679 pf->flags |= I40E_FLAG_VXLAN; in i40e_add_vxlan_port()
8685 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port) in i40e_del_vxlan_port() argument
8688 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_del_vxlan_port()
8690 if (!(pf->flags & I40E_FLAG_VXLAN)) { in i40e_del_vxlan_port()
8695 idx = i40e_get_vxlan_port_idx(pf, port); in i40e_del_vxlan_port()
8710 pf->vxlan_ports[idx] = 0; in i40e_del_vxlan_port()
8711 pf->vxlan_bitmap &= ~(1 << idx); in i40e_del_vxlan_port()
8713 if (!pf->vxlan_bitmap) in i40e_del_vxlan_port()
8714 pf->flags &= ~I40E_FLAG_VXLAN; in i40e_del_vxlan_port()
8725 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_udp_tunnel_port_add() local
8732 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port, in i40e_dev_udp_tunnel_port_add()
8736 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port, in i40e_dev_udp_tunnel_port_add()
8760 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_udp_tunnel_port_del() local
8768 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port); in i40e_dev_udp_tunnel_port_del()
8786 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf) in i40e_pf_calc_configured_queues_num() argument
8788 struct rte_eth_dev_data *data = pf->dev_data; in i40e_pf_calc_configured_queues_num()
8793 for (i = 0; i < pf->lan_nb_qps; i++) { in i40e_pf_calc_configured_queues_num()
8806 i40e_pf_config_rss(struct i40e_pf *pf) in i40e_pf_config_rss() argument
8808 enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode; in i40e_pf_config_rss()
8809 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_pf_config_rss()
8818 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) in i40e_pf_config_rss()
8819 num = i40e_pf_calc_configured_queues_num(pf); in i40e_pf_config_rss()
8821 num = pf->dev_data->nb_rx_queues; in i40e_pf_config_rss()
8830 pf->dev_data->port_id); in i40e_pf_config_rss()
8834 if (pf->adapter->rss_reta_updated == 0) { in i40e_pf_config_rss()
8846 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf; in i40e_pf_config_rss()
8847 if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 || in i40e_pf_config_rss()
8849 i40e_pf_disable_rss(pf); in i40e_pf_config_rss()
8865 return i40e_hw_rss_hash_set(pf, &rss_conf); in i40e_pf_config_rss()
8873 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; in i40e_dev_set_gre_key_len() local
8877 if (pf->support_multi_driver) { in i40e_dev_set_gre_key_len()
9539 i40e_filter_input_set_init(struct i40e_pf *pf) in i40e_filter_input_set_init() argument
9541 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_filter_input_set_init()
9550 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype); in i40e_filter_input_set_init()
9561 if (pf->support_multi_driver && num > 0) { in i40e_filter_input_set_init()
9573 if (!pf->support_multi_driver) { in i40e_filter_input_set_init()
9605 if (!pf->support_multi_driver) in i40e_filter_input_set_init()
9606 pf->hash_input_set[pctype] = input_set; in i40e_filter_input_set_init()
9607 pf->fdir.input_set[pctype] = input_set; in i40e_filter_input_set_init()
9615 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; in i40e_hash_filter_inset_select() local
9631 if (pf->support_multi_driver) { in i40e_hash_filter_inset_select()
9636 pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); in i40e_hash_filter_inset_select()
9660 input_set |= pf->hash_input_set[pctype]; in i40e_hash_filter_inset_select()
9684 pf->hash_input_set[pctype] = input_set; in i40e_hash_filter_inset_select()
9718 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, in i40e_sw_ethertype_filter_insert() argument
9721 struct i40e_ethertype_rule *rule = &pf->ethertype; in i40e_sw_ethertype_filter_insert()
9741 i40e_sw_ethertype_filter_del(struct i40e_pf *pf, in i40e_sw_ethertype_filter_del() argument
9744 struct i40e_ethertype_rule *rule = &pf->ethertype; in i40e_sw_ethertype_filter_del()
9770 i40e_ethertype_filter_set(struct i40e_pf *pf, in i40e_ethertype_filter_set() argument
9774 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_ethertype_filter_set()
9775 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype; in i40e_ethertype_filter_set()
9782 if (filter->queue >= pf->dev_data->nb_rx_queues) { in i40e_ethertype_filter_set()
9822 pf->main_vsi->seid, in i40e_ethertype_filter_set()
9843 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter); in i40e_ethertype_filter_set()
9847 ret = i40e_sw_ethertype_filter_del(pf, &node->input); in i40e_ethertype_filter_set()
10327 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_mirror_rule_set() local
10337 if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { in i40e_mirror_rule_set()
10342 if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { in i40e_mirror_rule_set()
10346 if (mirror_conf->dst_pool > pf->vf_num) { in i40e_mirror_rule_set()
10352 seid = pf->main_vsi->veb->seid; in i40e_mirror_rule_set()
10354 TAILQ_FOREACH(it, &pf->mirror_list, rules) { in i40e_mirror_rule_set()
10376 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); in i40e_mirror_rule_set()
10378 pf->nb_mirror_rule--; in i40e_mirror_rule_set()
10411 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) { in i40e_mirror_rule_set()
10416 for (i = 0, j = 0; i < pf->vf_num; i++) { in i40e_mirror_rule_set()
10418 mirr_rule->entries[j] = pf->vfs[i].vsi->seid; in i40e_mirror_rule_set()
10422 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) { in i40e_mirror_rule_set()
10424 mirr_rule->entries[j] = pf->main_vsi_seid; in i40e_mirror_rule_set()
10453 if (mirror_conf->dst_pool == pf->vf_num) in i40e_mirror_rule_set()
10454 dst_seid = pf->main_vsi_seid; in i40e_mirror_rule_set()
10456 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid; in i40e_mirror_rule_set()
10475 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules); in i40e_mirror_rule_set()
10477 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules); in i40e_mirror_rule_set()
10479 pf->nb_mirror_rule++; in i40e_mirror_rule_set()
10494 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_mirror_rule_reset() local
10502 seid = pf->main_vsi->veb->seid; in i40e_mirror_rule_reset()
10504 TAILQ_FOREACH(it, &pf->mirror_list, rules) { in i40e_mirror_rule_reset()
10521 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules); in i40e_mirror_rule_reset()
10523 pf->nb_mirror_rule--; in i40e_mirror_rule_reset()
10839 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); in i40e_vsi_update_queue_mapping() local
10857 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; in i40e_vsi_update_queue_mapping()
11088 i40e_dcb_hw_configure(struct i40e_pf *pf, in i40e_dcb_hw_configure() argument
11092 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_dcb_hw_configure()
11094 struct i40e_vsi *main_vsi = pf->main_vsi; in i40e_dcb_hw_configure()
11187 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dcb_init_configure() local
11191 if ((pf->flags & I40E_FLAG_DCB) == 0) { in i40e_dcb_init_configure()
11291 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dcb_setup() local
11296 if ((pf->flags & I40E_FLAG_DCB) == 0) { in i40e_dcb_setup()
11301 if (pf->vf_num != 0) in i40e_dcb_setup()
11309 ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map); in i40e_dcb_setup()
11322 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_get_dcb_info() local
11324 struct i40e_vsi *vsi = pf->main_vsi; in i40e_dev_get_dcb_info()
11339 if (!pf->nb_cfg_vmdq_vsi) { in i40e_dev_get_dcb_info()
11360 vsi = pf->vmdq[j].vsi; in i40e_dev_get_dcb_info()
11377 } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL)); in i40e_dev_get_dcb_info()
11691 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_set_default_mac_addr() local
11692 struct i40e_vsi *vsi = pf->main_vsi; in i40e_set_default_mac_addr()
11703 if (rte_is_same_ether_addr(&pf->dev_addr, in i40e_set_default_mac_addr()
11725 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN); in i40e_set_default_mac_addr()
11740 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_dev_mtu_set() local
11741 struct rte_eth_dev_data *dev_data = pf->dev_data; in i40e_dev_mtu_set()
11770 i40e_ethertype_filter_restore(struct i40e_pf *pf) in i40e_ethertype_filter_restore() argument
11772 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_ethertype_filter_restore()
11774 *ethertype_list = &pf->ethertype.ethertype_list; in i40e_ethertype_filter_restore()
11791 flags, pf->main_vsi->seid, in i40e_ethertype_filter_restore()
11803 i40e_tunnel_filter_restore(struct i40e_pf *pf) in i40e_tunnel_filter_restore() argument
11805 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_tunnel_filter_restore()
11809 *tunnel_list = &pf->tunnel.tunnel_list; in i40e_tunnel_filter_restore()
11816 vsi = pf->main_vsi; in i40e_tunnel_filter_restore()
11818 vf = &pf->vfs[f->vf_id]; in i40e_tunnel_filter_restore()
11858 i40e_rss_filter_restore(struct i40e_pf *pf) in i40e_rss_filter_restore() argument
11860 struct i40e_rss_conf_list *list = &pf->rss_config_list; in i40e_rss_filter_restore()
11864 i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE); in i40e_rss_filter_restore()
11869 i40e_filter_restore(struct i40e_pf *pf) in i40e_filter_restore() argument
11871 i40e_ethertype_filter_restore(pf); in i40e_filter_restore()
11872 i40e_tunnel_filter_restore(pf); in i40e_filter_restore()
11873 i40e_fdir_filter_restore(pf); in i40e_filter_restore()
11874 i40e_rss_filter_restore(pf); in i40e_filter_restore()
11893 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index) in i40e_find_customized_pctype() argument
11898 if (pf->customized_pctype[i].index == index) in i40e_find_customized_pctype()
11899 return &pf->customized_pctype[i]; in i40e_find_customized_pctype()
11910 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_update_customized_pctype() local
11975 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
11979 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
11983 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
11987 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
11991 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
11995 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
11999 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
12003 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
12007 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
12011 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
12015 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
12019 i40e_find_customized_pctype(pf, in i40e_update_customized_pctype()
12245 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); in i40e_update_customized_info() local
12292 pf->gtp_support = true; in i40e_update_customized_info()
12294 pf->gtp_support = false; in i40e_update_customized_info()
12303 pf->esp_support = true; in i40e_update_customized_info()
12305 pf->esp_support = false; in i40e_update_customized_info()
12369 i40e_cloud_filter_qinq_create(struct i40e_pf *pf) in i40e_cloud_filter_qinq_create() argument
12374 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_cloud_filter_qinq_create()
12377 if (pf->support_multi_driver) { in i40e_cloud_filter_qinq_create()
12477 i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf) in i40e_rss_hash_set() argument
12479 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_rss_hash_set()
12484 ret = i40e_set_rss_key(pf->main_vsi, key, in i40e_rss_hash_set()
12489 hena = i40e_config_hena(pf->adapter, rss_conf->conf.types); in i40e_rss_hash_set()
12499 i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types) in i40e_rss_conf_hash_inset() argument
12501 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_rss_conf_hash_inset()
12602 mask0 = types & pf->adapter->flow_types_mask; in i40e_rss_conf_hash_inset()
12636 i40e_rss_mark_invalid_rule(struct i40e_pf *pf, in i40e_rss_mark_invalid_rule() argument
12647 TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) { in i40e_rss_mark_invalid_rule()
12671 i40e_rss_config_hash_function(struct i40e_pf *pf, in i40e_rss_config_hash_function() argument
12674 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_rss_config_hash_function()
12684 i40e_rss_mark_invalid_rule(pf, conf); in i40e_rss_config_hash_function()
12692 i40e_rss_mark_invalid_rule(pf, conf); in i40e_rss_config_hash_function()
12695 mask0 = conf->conf.types & pf->adapter->flow_types_mask; in i40e_rss_config_hash_function()
12708 if (pf->adapter->pctypes_tbl[i] & (1ULL << j)) in i40e_rss_config_hash_function()
12720 i40e_rss_enable_hash(struct i40e_pf *pf, in i40e_rss_enable_hash() argument
12723 struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; in i40e_rss_enable_hash()
12726 if (!(conf->conf.types & pf->adapter->flow_types_mask)) in i40e_rss_enable_hash()
12733 if (i40e_rss_conf_hash_inset(pf, conf->conf.types)) in i40e_rss_enable_hash()
12752 i40e_rss_hash_set(pf, &rss_conf); in i40e_rss_enable_hash()
12755 i40e_rss_config_hash_function(pf, conf); in i40e_rss_enable_hash()
12757 i40e_rss_mark_invalid_rule(pf, conf); in i40e_rss_enable_hash()
12764 i40e_rss_config_queue_region(struct i40e_pf *pf, in i40e_rss_config_queue_region() argument
12767 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_rss_config_queue_region()
12775 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) in i40e_rss_config_queue_region()
12776 num = i40e_pf_calc_configured_queues_num(pf); in i40e_rss_config_queue_region()
12778 num = pf->dev_data->nb_rx_queues; in i40e_rss_config_queue_region()
12787 pf->dev_data->port_id); in i40e_rss_config_queue_region()
12801 i40e_rss_mark_invalid_rule(pf, conf); in i40e_rss_config_queue_region()
12808 i40e_rss_clear_hash_function(struct i40e_pf *pf, in i40e_rss_clear_hash_function() argument
12811 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_rss_clear_hash_function()
12831 mask0 = conf->conf.types & pf->adapter->flow_types_mask; in i40e_rss_clear_hash_function()
12843 if (pf->adapter->pctypes_tbl[i] & (1ULL << j)) in i40e_rss_clear_hash_function()
12855 i40e_rss_disable_hash(struct i40e_pf *pf, in i40e_rss_disable_hash() argument
12858 struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; in i40e_rss_disable_hash()
12859 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_rss_disable_hash()
12868 i40e_rss_hash_set(pf, &rss_conf); in i40e_rss_disable_hash()
12871 if (!(pf->adapter->flow_types_mask & (1ULL << i)) || in i40e_rss_disable_hash()
12887 i40e_rss_clear_hash_function(pf, conf); in i40e_rss_disable_hash()
12894 i40e_rss_clear_queue_region(struct i40e_pf *pf) in i40e_rss_clear_queue_region() argument
12896 struct i40e_hw *hw = I40E_PF_TO_HW(pf); in i40e_rss_clear_queue_region()
12897 struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; in i40e_rss_clear_queue_region()
12903 num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC); in i40e_rss_clear_queue_region()
12911 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) in i40e_rss_clear_queue_region()
12912 num = i40e_pf_calc_configured_queues_num(pf); in i40e_rss_clear_queue_region()
12914 num = pf->dev_data->nb_rx_queues; in i40e_rss_clear_queue_region()
12923 pf->dev_data->port_id); in i40e_rss_clear_queue_region()
12944 i40e_config_rss_filter(struct i40e_pf *pf, in i40e_config_rss_filter() argument
12947 struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; in i40e_config_rss_filter()
12954 ret = i40e_rss_config_queue_region(pf, conf); in i40e_config_rss_filter()
12963 ret = i40e_rss_config_hash_function(pf, conf); in i40e_config_rss_filter()
12970 ret = i40e_rss_enable_hash(pf, conf); in i40e_config_rss_filter()
12987 i40e_rss_clear_queue_region(pf); in i40e_config_rss_filter()
12989 i40e_rss_clear_hash_function(pf, conf); in i40e_config_rss_filter()
12991 i40e_rss_disable_hash(pf, conf); in i40e_config_rss_filter()