| /dpdk/drivers/vdpa/mlx5/ |
| H A D | mlx5_vdpa.c | 74 return priv; in mlx5_vdpa_find_priv_resource_by_vdev() 275 priv->vid = 0; in mlx5_vdpa_dev_close() 298 priv->vid = vid; in mlx5_vdpa_dev_config() 523 priv); in mlx5_vdpa_config_get() 581 priv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd); in mlx5_vdpa_create_dev_resources() 629 if (!priv) { in mlx5_vdpa_dev_probe() 656 if (priv) in mlx5_vdpa_dev_probe() 705 if (priv->td) in mlx5_vdpa_release_dev_resources() 708 claim_zero(munmap(priv->virtq_db_addr, priv->var->length)); in mlx5_vdpa_release_dev_resources() 709 if (priv->var) in mlx5_vdpa_release_dev_resources() [all …]
|
| H A D | mlx5_vdpa_event.c | 35 if (priv->eventc) { in mlx5_vdpa_event_qp_global_release() 46 priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx, in mlx5_vdpa_event_qp_global_prepare() 48 if (!priv->eventc) { in mlx5_vdpa_event_qp_global_prepare() 54 if (mlx5_devx_uar_prepare(priv->cdev, &priv->uar) != 0) { in mlx5_vdpa_event_qp_global_prepare() 185 priv->timer_delay_us += priv->event_us; in mlx5_vdpa_timer_sleep() 265 priv->timer_delay_us = priv->event_us; in mlx5_vdpa_event_handle() 284 priv->timer_delay_us = priv->event_us; in mlx5_vdpa_event_handle() 408 priv); in mlx5_vdpa_err_event_setup() 412 priv->vid); in mlx5_vdpa_err_event_setup() 417 priv->vid); in mlx5_vdpa_err_event_setup() [all …]
|
| H A D | mlx5_vdpa_virtq.c | 22 struct mlx5_vdpa_priv *priv = virtq->priv; in mlx5_vdpa_virtq_kick_handler() local 133 priv->features = 0; in mlx5_vdpa_virtqs_release() 134 priv->nr_virtqs = 0; in mlx5_vdpa_virtqs_release() 278 size = priv->caps.umems[i].a * vq.size + priv->caps.umems[i].b; in mlx5_vdpa_virtq_setup() 352 attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id; in mlx5_vdpa_virtq_setup() 359 virtq->priv = priv; in mlx5_vdpa_virtq_setup() 365 virtq->priv = priv; in mlx5_vdpa_virtq_setup() 438 priv->vid); in mlx5_vdpa_features_validate() 446 priv->vid); in mlx5_vdpa_features_validate() 454 priv->vid); in mlx5_vdpa_features_validate() [all …]
|
| H A D | mlx5_vdpa_steer.c | 21 if (priv->steer.rss[i].flow) { in mlx5_vdpa_rss_flows_destroy() 24 priv->steer.rss[i].flow = NULL; in mlx5_vdpa_rss_flows_destroy() 31 if (priv->steer.rss[i].tir) { in mlx5_vdpa_rss_flows_destroy() 48 if (priv->steer.rqt) { in mlx5_vdpa_steer_unset() 50 priv->steer.rqt = NULL; in mlx5_vdpa_steer_unset() 78 priv->virtqs[i].enable && priv->virtqs[i].virtq) { in mlx5_vdpa_rqt_prepare() 92 if (!priv->steer.rqt) { in mlx5_vdpa_rqt_prepare() 93 priv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->cdev->ctx, in mlx5_vdpa_rqt_prepare() 95 if (!priv->steer.rqt) { in mlx5_vdpa_rqt_prepare() 200 (priv->cdev->ctx, &dv_attr, priv->steer.tbl); in mlx5_vdpa_rss_flows_create() [all …]
|
| H A D | mlx5_vdpa_lm.c | 20 for (i = 0; i < priv->nr_virtqs; ++i) { in mlx5_vdpa_logging_enable() 22 if (!priv->virtqs[i].virtq) { in mlx5_vdpa_logging_enable() 45 int ret = mlx5_os_wrapped_mkey_create(priv->cdev->ctx, priv->cdev->pd, in mlx5_vdpa_dirty_bitmap_set() 46 priv->cdev->pdn, in mlx5_vdpa_dirty_bitmap_set() 48 log_size, &priv->lm_mr); in mlx5_vdpa_dirty_bitmap_set() 54 attr.dirty_bitmap_mkey = priv->lm_mr.lkey; in mlx5_vdpa_dirty_bitmap_set() 55 for (i = 0; i < priv->nr_virtqs; ++i) { in mlx5_vdpa_dirty_bitmap_set() 57 if (!priv->virtqs[i].virtq) { in mlx5_vdpa_dirty_bitmap_set() 87 for (i = 0; i < priv->nr_virtqs; ++i) { in mlx5_vdpa_lm_log() 88 if (!priv->virtqs[i].virtq) { in mlx5_vdpa_lm_log() [all …]
|
| H A D | mlx5_vdpa.h | 83 struct mlx5_vdpa_priv *priv; member 197 void mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv); 209 int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv); 264 int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv); 283 int mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv); 299 void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv); 318 int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv); 341 void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv); 352 int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv); 364 int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv); [all …]
|
| /dpdk/drivers/net/failsafe/ |
| H A D | failsafe_intr.c | 59 struct fs_priv *priv; in fs_rx_event_proxy_routine() local 67 priv = data; in fs_rx_event_proxy_routine() 68 events = priv->rxp.evec; in fs_rx_event_proxy_routine() 96 rte_service_map_lcore_set(priv->rxp.sid, priv->rxp.scid, 0); in fs_rx_event_proxy_service_uninstall() 135 priv->data->name); in fs_rx_event_proxy_service_install() 228 priv->rxp.evec = calloc(NUM_RX_PROXIES, sizeof(*priv->rxp.evec)); in fs_rx_event_proxy_install() 241 close(priv->rxp.efd); in fs_rx_event_proxy_install() 242 priv->rxp.efd = -1; in fs_rx_event_proxy_install() 245 free(priv->rxp.evec); in fs_rx_event_proxy_install() 398 priv->rxp.efd = -1; in fs_rx_event_proxy_uninstall() [all …]
|
| /dpdk/drivers/regex/mlx5/ |
| H A D | mlx5_regex.c | 51 rte_free(priv->qps); in mlx5_regex_stop() 52 priv->qps = NULL; in mlx5_regex_stop() 85 priv = rte_zmalloc("mlx5 regex device private", sizeof(*priv), in mlx5_regex_dev_probe() 87 if (!priv) { in mlx5_regex_dev_probe() 94 priv->cdev = cdev; in mlx5_regex_dev_probe() 97 priv->is_bf2 = 1; in mlx5_regex_dev_probe() 116 if (priv->has_umr) in mlx5_regex_dev_probe() 121 priv->regexdev->data->dev_private = priv; in mlx5_regex_dev_probe() 131 rte_free(priv); in mlx5_regex_dev_probe() 147 if (priv) { in mlx5_regex_dev_remove() [all …]
|
| /dpdk/drivers/net/mlx4/ |
| H A D | mlx4_intr.c | 72 mlx4_rx_intr_vec_disable(priv); in mlx4_rx_intr_vec_enable() 131 priv->intr_alarm = 0; in mlx4_link_status_alarm() 160 if (!priv->intr_alarm) { in mlx4_link_status_check() 165 priv); in mlx4_link_status_check() 168 priv->intr_alarm = 1; in mlx4_link_status_check() 267 priv); in mlx4_intr_uninstall() 272 priv->intr_alarm = 0; in mlx4_intr_uninstall() 294 mlx4_intr_uninstall(priv); in mlx4_intr_install() 296 if (rte_intr_fd_set(priv->intr_handle, priv->ctx->async_fd)) in mlx4_intr_install() 302 priv); in mlx4_intr_install() [all …]
|
| H A D | mlx4_ethdev.c | 232 priv->mtu = mtu; in mlx4_mtu_set() 455 if (index >= RTE_DIM(priv->mac) - priv->mac_mc) { in mlx4_mac_addr_remove() 459 memset(&priv->mac[index], 0, sizeof(priv->mac[index])); in mlx4_mac_addr_remove() 493 if (index >= RTE_DIM(priv->mac) - priv->mac_mc) { in mlx4_mac_addr_add() 497 memcpy(&priv->mac[index], mac_addr, sizeof(priv->mac[index])); in mlx4_mac_addr_add() 542 i != RTE_DIM(priv->mac) - priv->mac_mc; in mlx4_set_mc_addr_list() 550 memset(priv->mac + RTE_DIM(priv->mac) - priv->mac_mc, in mlx4_set_mc_addr_list() 552 sizeof(priv->mac[0]) * (priv->mac_mc - num)); in mlx4_set_mc_addr_list() 555 priv->mac_mc = num; in mlx4_set_mc_addr_list() 646 max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ? in mlx4_dev_infos_get() [all …]
|
| H A D | mlx4.c | 297 if (priv->started) in mlx4_dev_start() 300 priv->started = 1; in mlx4_dev_start() 385 ((priv->ctx != NULL) ? priv->ctx->device->name : "")); in mlx4_dev_close() 406 memset(priv, 0, sizeof(*priv)); in mlx4_dev_close() 975 priv->pd = pd; in mlx4_pci_probe() 977 priv->vf = vf; in mlx4_pci_probe() 989 priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd, in mlx4_pci_probe() 1000 priv->tso = in mlx4_pci_probe() 1004 if (priv->tso) in mlx4_pci_probe() 1031 mlx4_mtu_get(priv, &priv->mtu); in mlx4_pci_probe() [all …]
|
| /dpdk/drivers/net/mlx5/windows/ |
| H A D | mlx5_os.c | 82 if (!priv->q_counters) { in mlx5_queue_counter_id_prepare() 88 priv->counter_set_id = priv->q_counters->id; in mlx5_queue_counter_id_prepare() 343 sizeof(*priv), in mlx5_dev_spawn() 345 if (priv == NULL) { in mlx5_dev_spawn() 350 priv->sh = sh; in mlx5_dev_spawn() 362 priv->vport_id = -1; in mlx5_dev_spawn() 392 err = mlx5_port_args_config(priv, mkvlist, &priv->config); in mlx5_dev_spawn() 465 priv->mtu); in mlx5_dev_spawn() 554 if (priv) { in mlx5_dev_spawn() 559 mlx5_free(priv); in mlx5_dev_spawn() [all …]
|
| /dpdk/drivers/net/mlx5/ |
| H A D | mlx5_vlan.c | 44 MLX5_ASSERT(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter)); in mlx5_vlan_filter_set() 53 if (i < priv->vlan_filter_n) { in mlx5_vlan_filter_set() 59 --priv->vlan_filter_n; in mlx5_vlan_filter_set() 60 memmove(&priv->vlan_filter[i], in mlx5_vlan_filter_set() 61 &priv->vlan_filter[i + 1], in mlx5_vlan_filter_set() 62 sizeof(priv->vlan_filter[i]) * in mlx5_vlan_filter_set() 63 (priv->vlan_filter_n - i)); in mlx5_vlan_filter_set() 64 priv->vlan_filter[priv->vlan_filter_n] = 0; in mlx5_vlan_filter_set() 71 priv->vlan_filter[priv->vlan_filter_n] = vlan_id; in mlx5_vlan_filter_set() 72 ++priv->vlan_filter_n; in mlx5_vlan_filter_set() [all …]
|
| H A D | mlx5_rss.c | 53 priv->rss_conf.rss_key = mlx5_realloc(priv->rss_conf.rss_key, in mlx5_rss_hash_update() 57 if (!priv->rss_conf.rss_key) { in mlx5_rss_hash_update() 65 priv->rss_conf.rss_hf = rss_conf->rss_hf; in mlx5_rss_hash_update() 103 priv->rss_conf.rss_key_len); in mlx5_rss_hash_conf_get() 128 if (priv->reta_idx_n == reta_size) in mlx5_rss_reta_index_resize() 138 priv->reta_idx = mem; in mlx5_rss_reta_index_resize() 139 priv->reta_idx_n = reta_size; in mlx5_rss_reta_index_resize() 141 memset(&(*priv->reta_idx)[old_size], 0, in mlx5_rss_reta_index_resize() 143 sizeof((*priv->reta_idx)[0])); in mlx5_rss_reta_index_resize() 177 (*priv->reta_idx)[i]; in mlx5_dev_rss_reta_query() [all …]
|
| H A D | mlx5_ethdev.c | 47 MLX5_ASSERT(priv); in mlx5_ifindex() 49 if (priv->master && priv->sh->bond.ifindex > 0) in mlx5_ifindex() 85 priv->rss_conf.rss_key = mlx5_realloc(priv->rss_conf.rss_key, in mlx5_dev_configure() 110 priv->rxq_privs = mlx5_realloc(priv->rxq_privs, in mlx5_dev_configure() 199 priv->rxqs_n = rxqs_n; in mlx5_dev_configure_rss_reta() 325 max = RTE_MIN(priv->sh->dev_cap.max_cq, priv->sh->dev_cap.max_qp); in mlx5_dev_infos_get() 342 priv->reta_idx_n : priv->sh->dev_cap.ind_table_max_size; in mlx5_dev_infos_get() 446 info->pf = priv->pf_bond >= 0 ? priv->pf_bond : 0; in mlx5_representor_info_get() 604 priv->mtu = mtu; in mlx5_dev_set_mtu() 687 return priv; in mlx5_port_to_eswitch_info() [all …]
|
| H A D | mlx5_flow_meter.c | 77 (void)priv; in mlx5_flow_meter_action_create() 436 if (!priv->mtr_en) in mlx5_flow_mtr_cap_get() 495 if (!priv->mtr_en) in mlx5_flow_meter_profile_add() 553 if (!priv->mtr_en) in mlx5_flow_meter_profile_delete() 671 if (!priv->mtr_en || !priv->sh->meter_aso_en) in mlx5_flow_meter_policy_validate() 768 if (!priv->mtr_en) in mlx5_flow_meter_policy_add() 1121 (void)priv; in mlx5_flow_meter_action_modify() 1199 if (!priv->mtr_en) in mlx5_flow_meter_create() 1419 if (!priv->mtr_en) in mlx5_flow_meter_destroy() 1972 (void)priv; in mlx5_flow_meter_detach() [all …]
|
| H A D | mlx5_txq.c | 516 struct mlx5_priv *priv = txq_ctrl->priv; in txq_uar_init_secondary() local 716 struct mlx5_priv *priv = txq_ctrl->priv; in txq_calc_inline_max() local 744 struct mlx5_priv *priv = txq_ctrl->priv; in txq_set_params() local 767 (priv->pci_dev && priv->pci_dev->id.device_id == in txq_set_params() 963 struct mlx5_priv *priv = txq_ctrl->priv; in txq_adjust_params() local 988 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr); in txq_adjust_params() 999 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr); in txq_adjust_params() 1010 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr); in txq_adjust_params() 1089 tmpl->priv = priv; in mlx5_txq_new() 1147 tmpl->priv = priv; in mlx5_txq_hairpin_new() [all …]
|
| /dpdk/drivers/net/dpaa2/ |
| H A D | dpaa2_ethdev.c | 371 num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); in dpaa2_alloc_rx_tx_queues() 373 tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues; in dpaa2_alloc_rx_tx_queues() 375 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; in dpaa2_alloc_rx_tx_queues() 740 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { in dpaa2_dev_rx_queue_setup() 1372 priv->hw = NULL; in dpaa2_dev_close() 1654 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { in dpaa2_dev_stats_get() 2511 priv->token, in populate_mac_addr() 2525 priv->token, in populate_mac_addr() 2672 priv->num_rx_tc, priv->nb_rx_queues, in dpaa2_dev_init() 2673 priv->nb_tx_queues, priv->max_cgs); in dpaa2_dev_init() [all …]
|
| H A D | dpaa2_flow.c | 3022 if (priv->num_rx_tc > 1) { in dpaa2_flow_entry_update() 3129 if (priv->num_rx_tc > 1) { in dpaa2_flow_entry_update() 3306 dest_dev = priv->eth_dev; in dpaa2_flow_redirect_dev() 3557 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc; in dpaa2_generic_flow_set() 3563 priv->token, &tc_cfg); in dpaa2_generic_flow_set() 3645 priv->qos_entries); in dpaa2_generic_flow_set() 3665 priv->fs_entries); in dpaa2_generic_flow_set() 3787 priv->qos_entries); in dpaa2_generic_flow_set() 4002 priv->dist_queues - 1); in dpaa2_flow_create() 4093 if (priv->num_rx_tc > 1) { in dpaa2_flow_destroy() [all …]
|
| /dpdk/drivers/net/mvpp2/ |
| H A D | mrvl_ethdev.c | 695 if (!priv) in mrvl_tx_queue_start() 808 priv->pp_id, priv->ppio_id); in mrvl_dev_start() 854 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); in mrvl_dev_start() 1144 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); in mrvl_dev_close() 2025 rxq->priv = priv; in mrvl_rx_queue_setup() 2128 txq->priv = priv; in mrvl_tx_queue_setup() 3063 if (!priv) in mrvl_priv_create() 3067 &priv->pp_id, &priv->ppio_id); in mrvl_priv_create() 3097 return priv; in mrvl_priv_create() 3099 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); in mrvl_priv_create() [all …]
|
| H A D | mrvl_tm.c | 77 LIST_INIT(&priv->nodes); in mrvl_tm_init() 79 if (priv->rate_max) in mrvl_tm_init() 149 if (!priv->configured) in mrvl_node_type_get() 185 if (!priv->configured) in mrvl_capabilities_get() 241 if (!priv->configured) in mrvl_level_capabilities_get() 312 if (!priv->configured) in mrvl_node_capabilities_get() 389 if (!priv->configured) in mrvl_shaper_profile_add() 618 if (priv->ppio) in mrvl_node_add() 708 if (priv->ppio) { in mrvl_node_delete() 864 if (priv->ppio) { in mrvl_hierarchy_commit() [all …]
|
| H A D | mrvl_mtr.c | 162 LIST_FOREACH(mtr, &priv->mtrs, next) in mrvl_mtr_from_id() 218 if (!priv->ppio) in mrvl_meter_enable() 261 LIST_FOREACH(flow, &priv->flows, next) { in mrvl_meter_enable() 344 mtr = mrvl_mtr_from_id(priv, mtr_id); in mrvl_create() 384 if (!priv->ppio) in mrvl_destroy() 389 mtr = mrvl_mtr_from_id(priv, mtr_id); in mrvl_destroy() 432 if (!priv->ppio) in mrvl_meter_profile_update() 437 mtr = mrvl_mtr_from_id(priv, mtr_id); in mrvl_meter_profile_update() 492 LIST_INIT(&priv->profiles); in mrvl_mtr_init() 493 LIST_INIT(&priv->mtrs); in mrvl_mtr_init() [all …]
|
| /dpdk/drivers/net/pfe/ |
| H A D | pfe_ethdev.c | 145 struct pfe_eth_priv_s *priv = queue->priv; in pfe_recv_pkts_on_intr() local 174 struct pfe_eth_priv_s *priv = queue->priv; in pfe_recv_pkts() local 189 struct pfe_eth_priv_s *priv = queue->priv; in pfe_xmit_pkts() local 261 client->priv = priv; in pfe_eth_open() 297 client->priv = priv; in pfe_eth_open() 331 if (priv == NULL) in pfe_eth_open_cdev() 351 if (priv == NULL) in pfe_eth_close_cdev() 450 pfe = priv->pfe; in pfe_rx_queue_setup() 604 priv->promisc = 1; in pfe_promiscuous_enable() 775 priv->pfe = pfe; in pfe_eth_init() [all …]
|
| /dpdk/drivers/net/mvneta/ |
| H A D | mvneta_ethdev.c | 260 if (!priv->ppio) in mvneta_mtu_set() 294 if (!priv->ppio) in mvneta_dev_set_link_up() 314 if (!priv->ppio) in mvneta_dev_set_link_down() 336 if (priv->ppio) in mvneta_dev_start() 343 ret = neta_ppio_init(&priv->ppio_params, &priv->ppio); in mvneta_dev_start() 348 priv->ppio_id = priv->ppio->port_id; in mvneta_dev_start() 411 if (!priv->ppio) in mvneta_dev_stop() 438 if (priv->ppio) in mvneta_dev_close() 485 if (!priv->ppio) in mvneta_link_update() 547 if (!priv->ppio) in mvneta_promiscuous_enable() [all …]
|
| /dpdk/drivers/net/nfp/nfpcore/ |
| H A D | nfp_cpp_pcie_ops.c | 424 priv->width.write > 0 && priv->width.read != priv->width.write) in nfp6000_area_init() 428 priv->width.bar = priv->width.read; in nfp6000_area_init() 430 priv->width.bar = priv->width.write; in nfp6000_area_init() 442 ret = nfp_reconfigure_bar(nfp, priv->bar, priv->target, priv->action, in nfp6000_area_init() 443 priv->token, priv->offset, priv->size, in nfp6000_area_init() 457 priv->bar_offset = priv->offset & in nfp6000_area_acquire() 463 NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(priv->bar, priv->token); in nfp6000_area_acquire() 465 priv->bar_offset = priv->offset & priv->bar->mask; in nfp6000_area_acquire() 472 priv->iomem = priv->bar->iomem + priv->bar_offset; in nfp6000_area_acquire() 553 if (!priv->bar) in nfp6000_area_read() [all …]
|