| /dpdk/drivers/net/bonding/ |
| H A D | rte_eth_bond_api.c | 99 internals->active_slaves[internals->active_slave_count] = port_id; in activate_slave() 120 bond_tlb_disable(internals); in deactivate_slave() 183 internals->kvlist = NULL; in rte_eth_bond_create() 499 internals->slaves[internals->slave_count].reta_size = dev_info.reta_size; in __eth_bond_slave_add_lock_free() 547 if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) { in __eth_bond_slave_add_lock_free() 560 internals->slave_count++; in __eth_bond_slave_add_lock_free() 715 internals->current_primary_port = internals->active_slaves[0]; in __eth_bond_slave_remove_lock_free() 717 internals->current_primary_port = internals->slaves[0].port_id; in __eth_bond_slave_remove_lock_free() 727 if (internals->slave_count < 1 && !internals->user_defined_mac) in __eth_bond_slave_remove_lock_free() 793 return internals->mode; in rte_eth_bond_mode_get() [all …]
|
| H A D | rte_eth_bond_pmd.c | 1054 internals); in bond_ethdev_tx_burst_alb() 1893 memmove(&internals->slaves[i], &internals->slaves[i + 1], in slave_remove() 1918 &internals->slaves[internals->slave_count]; in slave_add() 1982 if (internals->slaves[i].port_id == internals->primary_port) in bond_ethdev_start() 2043 bond_ethdev_primary_set(internals, internals->primary_port); in bond_ethdev_start() 3012 internals->current_primary_port = internals->primary_port; in bond_ethdev_lsc_event_callback() 3086 memcpy(&internals->reta_conf[i], &internals->reta_conf[0], in bond_ethdev_rss_reta_update() 3382 memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim)); in bond_alloc() 3383 memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim)); in bond_alloc() 3386 memset(internals->slaves, 0, sizeof(internals->slaves)); in bond_alloc() [all …]
|
| H A D | rte_eth_bond_alb.c | 26 idx = (internals->mode6.last_slave + 1) % internals->active_slave_count; in calculate_slave() 27 internals->mode6.last_slave = idx; in calculate_slave() 28 return internals->active_slaves[idx]; in calculate_slave() 43 rte_spinlock_init(&internals->mode6.lock); in bond_mode_alb_enable() 45 internals->mode6.ntt = 0; in bond_mode_alb_enable() 48 if (internals->mode6.mempool == NULL) { in bond_mode_alb_enable() 62 if (internals->mode6.mempool == NULL) { in bond_mode_alb_enable() 76 struct bond_dev_private *internals) in bond_mode_alb_arp_recv() argument 123 internals->mode6.ntt = 1; in bond_mode_alb_arp_recv() 129 struct bond_dev_private *internals) in bond_mode_alb_arp_xmit() argument [all …]
|
| H A D | rte_eth_bond_flow.c | 72 for (i = 0; i < internals->slave_count; i++) { in bond_flow_validate() 100 for (i = 0; i < internals->slave_count; i++) { in bond_flow_create() 113 for (i = 0; i < internals->slave_count; i++) { in bond_flow_create() 115 rte_flow_destroy(internals->slaves[i].port_id, in bond_flow_create() 130 for (i = 0; i < internals->slave_count; i++) { in bond_flow_destroy() 143 TAILQ_REMOVE(&internals->flow_list, flow, next); in bond_flow_destroy() 184 for (i = 0; i < internals->slave_count; i++) { in bond_flow_query_count() 224 for (i = 0; i < internals->slave_count; i++) { in bond_flow_isolate() 229 internals->flow_isolated_valid = 0; in bond_flow_isolate() 233 internals->flow_isolated = set; in bond_flow_isolate() [all …]
|
| H A D | rte_eth_bond_8023ad.c | 379 internals->mode4.short_timeout); in rx_machine() 532 internals->port_id, slave_id); in mux_machine() 543 internals->port_id, slave_id); in mux_machine() 956 tx_machine(internals, slave_id); in bond_mode_8023ad_periodic_cb() 1056 internals->active_slave_count, slave_id) == internals->active_slave_count); in bond_mode_8023ad_activate_slave() 1291 internals->active_slaves[i]); in bond_mode_8023ad_enable() 1436 if (internals->mode != 4) in rte_eth_bond_8023ad_agg_selection_set() 1439 mode4 = &internals->mode4; in rte_eth_bond_8023ad_agg_selection_set() 1458 if (internals->mode != 4) in rte_eth_bond_8023ad_agg_selection_get() 1460 mode4 = &internals->mode4; in rte_eth_bond_8023ad_agg_selection_get() [all …]
|
| H A D | eth_bond_private.h | 215 valid_slave_port_id(struct bond_dev_private *internals, uint16_t port_id); 254 slave_remove(struct bond_dev_private *internals, 258 slave_add(struct bond_dev_private *internals, 275 bond_ethdev_primary_set(struct bond_dev_private *internals, 315 bond_tlb_disable(struct bond_dev_private *internals); 318 bond_tlb_enable(struct bond_dev_private *internals); 321 bond_tlb_activate_slave(struct bond_dev_private *internals);
|
| H A D | rte_eth_bond_alb.h | 72 struct bond_dev_private *internals); 89 struct bond_dev_private *internals); 103 struct rte_mbuf *pkt, struct bond_dev_private *internals);
|
| /dpdk/drivers/net/nfb/ |
| H A D | nfb_rxmode.c | 13 struct pmd_internals *internals = (struct pmd_internals *) in nfb_eth_promiscuous_enable() local 17 for (i = 0; i < internals->max_rxmac; ++i) { in nfb_eth_promiscuous_enable() 18 nc_rxmac_mac_filter_enable(internals->rxmac[i], in nfb_eth_promiscuous_enable() 36 for (i = 0; i < internals->max_rxmac; ++i) { in nfb_eth_promiscuous_disable() 52 if (internals->max_rxmac > 0) in nfb_eth_promiscuous_get() 53 nc_rxmac_read_status(internals->rxmac[0], &status); in nfb_eth_promiscuous_get() 67 for (i = 0; i < internals->max_rxmac; ++i) { in nfb_eth_allmulticast_enable() 68 nc_rxmac_mac_filter_enable(internals->rxmac[i], in nfb_eth_allmulticast_enable() 86 for (i = 0; i < internals->max_rxmac; ++i) { in nfb_eth_allmulticast_disable() 87 nc_rxmac_mac_filter_enable(internals->rxmac[i], in nfb_eth_allmulticast_disable() [all …]
|
| H A D | nfb_ethdev.c | 197 nfb_close(internals->nfb); in nfb_eth_dev_configure() 275 nfb_nc_rxmac_deinit(internals->rxmac, internals->max_rxmac); in nfb_eth_dev_close() 276 nfb_nc_txmac_deinit(internals->txmac, internals->max_txmac); in nfb_eth_dev_close() 552 internals->nfb = nfb_open(internals->nfb_dev); in nfb_eth_dev_init() 553 if (internals->nfb == NULL) { in nfb_eth_dev_init() 555 internals->nfb_dev); in nfb_eth_dev_init() 565 internals->rxmac, in nfb_eth_dev_init() 566 &internals->max_rxmac); in nfb_eth_dev_init() 568 internals->txmac, in nfb_eth_dev_init() 569 &internals->max_txmac); in nfb_eth_dev_init() [all …]
|
| /dpdk/drivers/net/kni/ |
| H A D | rte_eth_kni.c | 41 struct pmd_internals *internals; member 113 while (!internals->stop_thread) { in kni_handle_request() 139 if (internals->kni == NULL) { in eth_kni_start() 159 internals->is_kni_started = 1; in eth_kni_dev_start() 163 internals->stop_thread = 0; in eth_kni_dev_start() 167 kni_handle_request, internals); in eth_kni_dev_start() 186 if (internals->no_request_thread == 0 && internals->stop_thread == 0) { in eth_kni_dev_stop() 187 internals->stop_thread = 1; in eth_kni_dev_stop() 207 struct pmd_internals *internals; in eth_kni_close() local 261 q->internals = internals; in eth_kni_rx_queue_setup() [all …]
|
| /dpdk/drivers/net/af_packet/ |
| H A D | rte_eth_af_packet.c | 447 req = &internals->req; in eth_dev_close() 454 free(internals->if_name); in eth_dev_close() 455 rte_free(internals->rx_queue); in eth_dev_close() 456 rte_free(internals->tx_queue); in eth_dev_close() 698 *internals = rte_zmalloc_socket(name, sizeof(**internals), in rte_pmd_init_internals() 700 if (*internals == NULL) in rte_pmd_init_internals() 712 if (!(*internals)->rx_queue || !(*internals)->tx_queue) { in rte_pmd_init_internals() 723 req = &((*internals)->req); in rte_pmd_init_internals() 933 free((*internals)->if_name); in rte_pmd_init_internals() 934 rte_free(*internals); in rte_pmd_init_internals() [all …]
|
| /dpdk/drivers/net/null/ |
| H A D | rte_eth_null.c | 33 struct pmd_internals *internals; member 215 struct pmd_internals *internals; in eth_rx_queue_setup() local 236 internals->rx_null_queues[rx_queue_id].internals = internals; in eth_rx_queue_setup() 249 struct pmd_internals *internals; in eth_tx_queue_setup() local 269 internals->tx_null_queues[tx_queue_id].internals = internals; in eth_tx_queue_setup() 285 struct pmd_internals *internals; in eth_dev_info() local 547 internals->no_rx = args->no_rx; in eth_dev_null_create() 552 internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_ETH_RETA_GROUP_SIZE; in eth_dev_null_create() 568 if (internals->packet_copy) { in eth_dev_null_create() 571 } else if (internals->no_rx) { in eth_dev_null_create() [all …]
|
| /dpdk/drivers/net/af_xdp/ |
| H A D | rte_eth_af_xdp.c | 934 if (internals->shared_umem) { in eth_dev_close() 1015 internals->if_name)) { in get_shared_umem() 1318 internals->if_index, in xsk_configure() 1319 &internals->map); in xsk_configure() 1322 internals->prog_path); in xsk_configure() 1330 if (internals->shared_umem) in xsk_configure() 1742 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); in init_internals() 1743 if (internals == NULL) in init_internals() 1790 internals->tx_queues[i].pair = &internals->rx_queues[i]; in init_internals() 1791 internals->rx_queues[i].pair = &internals->tx_queues[i]; in init_internals() [all …]
|
| /dpdk/drivers/crypto/qat/ |
| H A D | qat_sym.c | 77 struct qat_cryptodev_private *internals; in qat_sym_build_request() local 80 internals = cdev->data->dev_private; in qat_sym_build_request() 109 struct qat_cryptodev_private *internals; in qat_sym_build_request() local 133 internals = cdev->data->dev_private; in qat_sym_build_request() 199 struct qat_cryptodev_private *internals; in qat_sym_dev_create() local 280 internals->qat_dev = qat_pci_dev; in qat_sym_dev_create() 289 if (internals->capa_mz == NULL) { in qat_sym_dev_create() 292 if (internals->capa_mz == NULL) { in qat_sym_dev_create() 302 internals->qat_dev_capabilities = internals->capa_mz->addr; in qat_sym_dev_create() 308 internals->min_enq_burst_threshold = in qat_sym_dev_create() [all …]
|
| /dpdk/drivers/compress/isal/ |
| H A D | isal_compress_pmd_ops.c | 52 internals->priv_xform_mp = rte_mempool_lookup(mp_name); in isal_comp_pmd_config() 54 if (internals->priv_xform_mp != NULL) { in isal_comp_pmd_config() 55 if (((internals->priv_xform_mp)->elt_size != elt_size) || in isal_comp_pmd_config() 56 ((internals->priv_xform_mp)->size < in isal_comp_pmd_config() 61 internals->priv_xform_mp = NULL; in isal_comp_pmd_config() 65 internals->priv_xform_mp = rte_mempool_create( in isal_comp_pmd_config() 80 if (internals->priv_xform_mp == NULL) { in isal_comp_pmd_config() 85 dev->data->dev_private = internals; in isal_comp_pmd_config() 110 rte_mempool_free(internals->priv_xform_mp); in isal_comp_pmd_close() 337 rte_mempool_put(internals->priv_xform_mp, priv_xform); in isal_comp_pmd_priv_xform_create() [all …]
|
| /dpdk/drivers/net/ring/ |
| H A D | rte_eth_ring.c | 259 struct pmd_internals *internals = NULL; in eth_dev_close() local 269 internals = dev->data->dev_private; in eth_dev_close() 270 if (internals->action == DEV_CREATE) { in eth_dev_close() 319 struct pmd_internals *internals = NULL; in do_eth_dev_ring_create() local 342 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); in do_eth_dev_ring_create() 343 if (internals == NULL) { in do_eth_dev_ring_create() 369 internals->action = action; in do_eth_dev_ring_create() 370 internals->max_rx_queues = nb_rx_queues; in do_eth_dev_ring_create() 381 data->dev_private = internals; in do_eth_dev_ring_create() 385 data->mac_addrs = &internals->address; in do_eth_dev_ring_create() [all …]
|
| /dpdk/drivers/net/pcap/ |
| H A D | pcap_ethdev.c | 609 if (internals->single_iface) { in eth_dev_start() 610 tx = &internals->tx_queue[0]; in eth_dev_start() 611 rx = &internals->rx_queue[0]; in eth_dev_start() 625 tx = &internals->tx_queue[i]; in eth_dev_start() 641 rx = &internals->rx_queue[i]; in eth_dev_start() 838 if (internals->infinite_rx) { in eth_dev_close() 853 if (internals->phy_mac == 0) in eth_dev_close() 883 if (internals->infinite_rx) { in eth_rx_queue_setup() 1235 (*internals)->phy_mac = 0; in pmd_init_internals() 1336 internals->if_index = in eth_from_pcaps() [all …]
|
| /dpdk/drivers/compress/zlib/ |
| H A D | zlib_pmd_ops.c | 36 struct zlib_private *internals = dev->data->dev_private; in zlib_pmd_config() local 40 mp = internals->mp; in zlib_pmd_config() 54 internals->mp = mp; in zlib_pmd_config() 76 struct zlib_private *internals = dev->data->dev_private; in zlib_pmd_close() local 77 rte_mempool_free(internals->mp); in zlib_pmd_close() 78 internals->mp = NULL; in zlib_pmd_close() 224 struct zlib_private *internals = dev->data->dev_private; in zlib_pmd_stream_create() local 231 if (rte_mempool_get(internals->mp, zstream)) { in zlib_pmd_stream_create() 244 rte_mempool_put(internals->mp, stream); in zlib_pmd_stream_create()
|
| /dpdk/drivers/crypto/ipsec_mb/ |
| H A D | ipsec_mb_ops.c | 82 struct ipsec_mb_dev_private *internals = dev->data->dev_private; in ipsec_mb_info_get() local 84 &ipsec_mb_pmds[internals->pmd_type]; in ipsec_mb_info_get() 90 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; in ipsec_mb_info_get() 210 struct ipsec_mb_dev_private *internals = dev->data->dev_private; in ipsec_mb_qp_setup() local 212 &ipsec_mb_pmds[internals->pmd_type]; in ipsec_mb_qp_setup() 265 qp->pmd_type = internals->pmd_type; in ipsec_mb_qp_setup() 304 struct ipsec_mb_dev_private *internals = dev->data->dev_private; in ipsec_mb_sym_session_get_size() local 306 &ipsec_mb_pmds[internals->pmd_type]; in ipsec_mb_sym_session_get_size() 318 struct ipsec_mb_dev_private *internals = dev->data->dev_private; in ipsec_mb_sym_session_configure() local 320 &ipsec_mb_pmds[internals->pmd_type]; in ipsec_mb_sym_session_configure()
|
| H A D | ipsec_mb_private.c | 50 struct ipsec_mb_dev_private *internals; in ipsec_mb_create() local 95 internals = dev->data->dev_private; in ipsec_mb_create() 96 internals->pmd_type = pmd_type; in ipsec_mb_create() 97 internals->max_nb_queue_pairs = init_params.max_nb_queue_pairs; in ipsec_mb_create()
|
| /dpdk/drivers/crypto/ccp/ |
| H A D | rte_ccp_pmd.c | 66 struct ccp_private *internals; in get_ccp_session() local 75 internals = (struct ccp_private *)qp->dev->data->dev_private; in get_ccp_session() 77 internals) != 0)) { in get_ccp_session() 232 struct ccp_private *internals; in cryptodev_ccp_create() local 268 internals = dev->data->dev_private; in cryptodev_ccp_create() 270 internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs; in cryptodev_ccp_create() 271 internals->auth_opt = init_params->auth_opt; in cryptodev_ccp_create() 272 internals->crypto_num_dev = cryptodev_cnt; in cryptodev_ccp_create()
|
| H A D | ccp_pmd_ops.c | 618 struct ccp_private *internals = dev->data->dev_private; in ccp_pmd_info_get() local 624 if (internals->auth_opt == 1) in ccp_pmd_info_get() 626 dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; in ccp_pmd_info_get() 690 struct ccp_private *internals = dev->data->dev_private; in ccp_pmd_qp_setup() local 694 if (qp_id >= internals->max_nb_qpairs) { in ccp_pmd_qp_setup() 696 qp_id, internals->max_nb_qpairs); in ccp_pmd_qp_setup() 765 struct ccp_private *internals; in ccp_pmd_sym_session_configure() local 776 internals = (struct ccp_private *)dev->data->dev_private; in ccp_pmd_sym_session_configure() 777 ret = ccp_set_session_parameters(sess_private_data, xform, internals); in ccp_pmd_sym_session_configure()
|
| /dpdk/drivers/crypto/bcmfs/ |
| H A D | bcmfs_sym_pmd.c | 61 struct bcmfs_sym_dev_private *internals = dev->data->dev_private; in bcmfs_sym_dev_info_get() local 62 struct bcmfs_device *fsdev = internals->fsdev; in bcmfs_sym_dev_info_get() 363 struct bcmfs_sym_dev_private *internals; in bcmfs_sym_dev_create() local 390 internals = cryptodev->data->dev_private; in bcmfs_sym_dev_create() 391 internals->fsdev = fsdev; in bcmfs_sym_dev_create() 392 fsdev->sym_dev = internals; in bcmfs_sym_dev_create() 394 internals->sym_dev_id = cryptodev->data->dev_id; in bcmfs_sym_dev_create() 395 internals->fsdev_capabilities = bcmfs_sym_get_capabilities(); in bcmfs_sym_dev_create() 400 cryptodev->data->name, internals->sym_dev_id); in bcmfs_sym_dev_create()
|
| /dpdk/drivers/crypto/null/ |
| H A D | null_crypto_pmd_ops.c | 119 struct null_crypto_private *internals = dev->data->dev_private; in null_crypto_pmd_info_get() local 123 dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; in null_crypto_pmd_info_get() 193 struct null_crypto_private *internals = dev->data->dev_private; in null_crypto_pmd_qp_setup() local 197 if (qp_id >= internals->max_nb_qpairs) { in null_crypto_pmd_qp_setup() 200 qp_id, internals->max_nb_qpairs); in null_crypto_pmd_qp_setup()
|
| /dpdk/drivers/net/tap/ |
| H A D | rte_eth_tap.c | 1169 if (internals->nlsk_fd != -1) { in tap_dev_close() 1173 internals->nlsk_fd = -1; in tap_dev_close() 1178 rxq = &internals->rxq[i]; in tap_dev_close() 1202 internals->gso_ctx_mp = NULL; in tap_dev_close() 1204 if (internals->ka_fd != -1) { in tap_dev_close() 1205 close(internals->ka_fd); in tap_dev_close() 1206 internals->ka_fd = -1; in tap_dev_close() 1219 close(internals->ioctl_sock); in tap_dev_close() 1220 internals->ioctl_sock = -1; in tap_dev_close() 1655 internals->name, rx_queue_id, in tap_rx_queue_setup() [all …]
|