| /dpdk/app/test/ |
| H A D | test_eventdev.c | 104 dev_conf->nb_event_port_dequeue_depth = in devconf_set_default_sane_values() 106 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 108 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 110 dev_conf->nb_events_limit = in devconf_set_default_sane_values() 121 fn(dev_conf, info); in test_ethdev_config_run() 164 dev_conf->nb_event_port_dequeue_depth = in max_event_port_dequeue_depth() 172 dev_conf->nb_event_port_enqueue_depth = in max_event_port_enqueue_depth() 181 struct rte_event_dev_config dev_conf; in test_eventdev_configure() local 208 test_ethdev_config_run(&dev_conf, &info, in test_eventdev_configure() 212 test_ethdev_config_run(&dev_conf, &info, in test_eventdev_configure() [all …]
|
| H A D | test_event_eth_tx_adapter.c | 477 struct rte_event_dev_config dev_conf; in tx_adapter_service() local 484 memset(&dev_conf, 0, sizeof(dev_conf)); in tx_adapter_service() 513 dev_conf.nb_event_port_dequeue_depth = in tx_adapter_service() 515 dev_conf.nb_event_port_enqueue_depth = in tx_adapter_service() 517 dev_conf.nb_events_limit = in tx_adapter_service() 519 dev_conf.nb_event_queues = qcnt + 1; in tx_adapter_service() 520 dev_conf.nb_event_ports = pcnt; in tx_adapter_service() 521 err = rte_event_dev_configure(TEST_DEV_ID, &dev_conf); in tx_adapter_service() 646 struct rte_eth_conf dev_conf; in tx_adapter_dynamic_device() local 650 memset(&dev_conf, 0, sizeof(dev_conf)); in tx_adapter_dynamic_device() [all …]
|
| H A D | test_dmadev_api.c | 243 struct rte_dma_conf dev_conf = { 0 }; in test_dma_vchan_setup() local 258 dev_conf.nb_vchans = dev_info.max_vchans; in test_dma_vchan_setup() 259 ret = rte_dma_configure(test_dev_id, &dev_conf); in test_dma_vchan_setup() 263 ret = rte_dma_vchan_setup(test_dev_id, dev_conf.nb_vchans, &vchan_conf); in test_dma_vchan_setup() 299 struct rte_dma_conf dev_conf = { 0 }; in setup_one_vchan() local 304 dev_conf.nb_vchans = dev_info.max_vchans; in setup_one_vchan() 305 ret = rte_dma_configure(test_dev_id, &dev_conf); in setup_one_vchan() 319 struct rte_dma_conf dev_conf = { 0 }; in test_dma_start_stop() local 336 ret = rte_dma_configure(test_dev_id, &dev_conf); in test_dma_start_stop()
|
| H A D | test_event_crypto_adapter.c | 603 evdev_set_conf_values(struct rte_event_dev_config *dev_conf, in evdev_set_conf_values() argument 606 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); in evdev_set_conf_values() 607 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; in evdev_set_conf_values() 608 dev_conf->nb_event_ports = NB_TEST_PORTS; in evdev_set_conf_values() 609 dev_conf->nb_event_queues = NB_TEST_QUEUES; in evdev_set_conf_values() 610 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; in evdev_set_conf_values() 611 dev_conf->nb_event_port_dequeue_depth = in evdev_set_conf_values() 613 dev_conf->nb_event_port_enqueue_depth = in evdev_set_conf_values() 615 dev_conf->nb_event_port_enqueue_depth = in evdev_set_conf_values() 617 dev_conf->nb_events_limit = in evdev_set_conf_values()
|
| H A D | test_event_timer_adapter.c | 77 dev_conf->nb_event_ports = 1; in devconf_set_default_sane_values() 78 dev_conf->nb_event_queues = 1; in devconf_set_default_sane_values() 80 dev_conf->nb_event_port_dequeue_depth = in devconf_set_default_sane_values() 82 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 84 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 86 dev_conf->nb_events_limit = in devconf_set_default_sane_values() 94 struct rte_event_dev_config dev_conf; in eventdev_setup() local 106 ret = rte_event_dev_configure(evdev, &dev_conf); in eventdev_setup() 231 struct rte_event_dev_config dev_conf; in test_port_conf_cb() local 260 port_id = dev_conf.nb_event_ports; in test_port_conf_cb() [all …]
|
| /dpdk/lib/ethdev/ |
| H A D | rte_ethdev_trace.h | 25 uint16_t nb_tx_q, const struct rte_eth_conf *dev_conf, int rc), 29 rte_trace_point_emit_u32(dev_conf->link_speeds); 30 rte_trace_point_emit_u32(dev_conf->rxmode.mq_mode); 31 rte_trace_point_emit_u32(dev_conf->rxmode.mtu); 32 rte_trace_point_emit_u64(dev_conf->rxmode.offloads); 33 rte_trace_point_emit_u32(dev_conf->txmode.mq_mode); 34 rte_trace_point_emit_u64(dev_conf->txmode.offloads); 35 rte_trace_point_emit_u32(dev_conf->lpbk_mode);
|
| H A D | rte_ethdev.c | 1081 if (dev_conf == NULL) { in rte_eth_dev_configure() 1105 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); in rte_eth_dev_configure() 1111 if (dev_conf != &dev->data->dev_conf) in rte_eth_dev_configure() 1112 memcpy(&dev->data->dev_conf, dev_conf, in rte_eth_dev_configure() 1188 if (dev_conf->rxmode.mtu == 0) in rte_eth_dev_configure() 1221 dev_conf->rxmode.offloads) { in rte_eth_dev_configure() 1310 dev_conf->rxmode.offloads, in rte_eth_dev_configure() 1320 dev_conf->txmode.offloads, in rte_eth_dev_configure() 1335 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); in rte_eth_dev_configure() 3161 if (dev_conf == NULL) { in rte_eth_dev_conf_get() [all …]
|
| /dpdk/lib/eventdev/ |
| H A D | rte_eventdev.c | 315 if (dev_conf == NULL) in rte_event_dev_configure() 324 || dev_conf->dequeue_timeout_ns > in rte_event_dev_configure() 343 if (!dev_conf->nb_event_queues) { in rte_event_dev_configure() 356 if (dev_conf->nb_event_queues - in rte_event_dev_configure() 366 dev_conf->nb_event_queues) { in rte_event_dev_configure() 370 dev_conf->nb_event_queues); in rte_event_dev_configure() 375 if (!dev_conf->nb_event_ports) { in rte_event_dev_configure() 387 if (dev_conf->nb_event_ports - in rte_event_dev_configure() 398 dev_conf->nb_event_ports) { in rte_event_dev_configure() 403 dev_conf->nb_event_ports); in rte_event_dev_configure() [all …]
|
| H A D | eventdev_trace.h | 27 const struct rte_event_dev_config *dev_conf, int rc), 29 rte_trace_point_emit_u32(dev_conf->dequeue_timeout_ns); 30 rte_trace_point_emit_i32(dev_conf->nb_events_limit); 31 rte_trace_point_emit_u8(dev_conf->nb_event_queues); 32 rte_trace_point_emit_u8(dev_conf->nb_event_ports); 33 rte_trace_point_emit_u32(dev_conf->nb_event_queue_flows); 34 rte_trace_point_emit_u32(dev_conf->nb_event_port_dequeue_depth); 35 rte_trace_point_emit_u32(dev_conf->nb_event_port_enqueue_depth); 36 rte_trace_point_emit_u32(dev_conf->event_dev_cfg); 37 rte_trace_point_emit_u8(dev_conf->nb_single_link_event_port_queues);
|
| /dpdk/drivers/net/bnxt/ |
| H A D | bnxt_rxq.c | 68 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; in bnxt_mq_rx_configure() local 71 &dev_conf->rx_adv_conf.vmdq_rx_conf; in bnxt_mq_rx_configure() 85 switch (dev_conf->rxmode.mq_mode) { in bnxt_mq_rx_configure() 108 dev_conf->rxmode.mq_mode); in bnxt_mq_rx_configure() 112 } else if (!dev_conf->rxmode.mq_mode) { in bnxt_mq_rx_configure() 141 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) { in bnxt_mq_rx_configure() 151 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB || in bnxt_mq_rx_configure() 152 !(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS)) in bnxt_mq_rx_configure() 465 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; in bnxt_rx_queue_start() local 498 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { in bnxt_rx_queue_start() [all …]
|
| /dpdk/examples/ip_pipeline/ |
| H A D | cryptodev.c | 52 struct rte_cryptodev_config dev_conf; in cryptodev_create() local 93 dev_conf.socket_id = socket_id; in cryptodev_create() 94 dev_conf.nb_queue_pairs = params->n_queues; in cryptodev_create() 95 dev_conf.ff_disable = 0; in cryptodev_create() 97 status = rte_cryptodev_configure(dev_id, &dev_conf); in cryptodev_create()
|
| /dpdk/drivers/event/dpaa2/ |
| H A D | dpaa2_eventdev_selftest.c | 92 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); in devconf_set_default_sane_values() 93 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; in devconf_set_default_sane_values() 94 dev_conf->nb_event_ports = info->max_event_ports; in devconf_set_default_sane_values() 95 dev_conf->nb_event_queues = info->max_event_queues; in devconf_set_default_sane_values() 97 dev_conf->nb_event_port_dequeue_depth = in devconf_set_default_sane_values() 99 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 101 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 103 dev_conf->nb_events_limit = in devconf_set_default_sane_values() 117 struct rte_event_dev_config dev_conf; in _eventdev_setup() local 139 devconf_set_default_sane_values(&dev_conf, &info); in _eventdev_setup() [all …]
|
| /dpdk/lib/dmadev/ |
| H A D | rte_dmadev.c | 443 if (!rte_dma_is_valid(dev_id) || dev_conf == NULL) in rte_dma_configure() 458 if (dev_conf->nb_vchans == 0) { in rte_dma_configure() 463 if (dev_conf->nb_vchans > dev_info.max_vchans) { in rte_dma_configure() 468 if (dev_conf->enable_silent && in rte_dma_configure() 475 ret = (*dev->dev_ops->dev_configure)(dev, dev_conf, in rte_dma_configure() 478 memcpy(&dev->data->dev_conf, dev_conf, in rte_dma_configure() 493 if (dev->data->dev_conf.nb_vchans == 0) { in rte_dma_start() 589 if (dev->data->dev_conf.nb_vchans == 0) { in rte_dma_vchan_setup() 663 if (vchan >= dev->data->dev_conf.nb_vchans && in rte_dma_stats_get() 684 if (vchan >= dev->data->dev_conf.nb_vchans && in rte_dma_stats_reset() [all …]
|
| H A D | rte_dmadev_pmd.h | 34 const struct rte_dma_conf *dev_conf, 101 struct rte_dma_conf dev_conf; /**< DMA device configuration. */ member
|
| /dpdk/drivers/net/softnic/ |
| H A D | rte_eth_softnic_cryptodev.c | 61 struct rte_cryptodev_config dev_conf; in softnic_cryptodev_create() local 104 dev_conf.socket_id = socket_id; in softnic_cryptodev_create() 105 dev_conf.nb_queue_pairs = params->n_queues; in softnic_cryptodev_create() 107 status = rte_cryptodev_configure(dev_id, &dev_conf); in softnic_cryptodev_create()
|
| /dpdk/drivers/net/nfp/ |
| H A D | nfp_common.c | 142 struct rte_eth_conf *dev_conf; in nfp_net_configure() local 159 dev_conf = &dev->data->dev_conf; in nfp_net_configure() 160 rxmode = &dev_conf->rxmode; in nfp_net_configure() 161 txmode = &dev_conf->txmode; in nfp_net_configure() 347 struct rte_eth_conf *dev_conf; in nfp_check_offloads() local 354 dev_conf = &dev->data->dev_conf; in nfp_check_offloads() 355 rxmode = &dev_conf->rxmode; in nfp_check_offloads() 356 txmode = &dev_conf->txmode; in nfp_check_offloads() 1268 struct rte_eth_conf *dev_conf; in nfp_net_rss_config_default() local 1293 dev_conf = &dev->data->dev_conf; in nfp_net_rss_config_default() [all …]
|
| H A D | nfp_ethdev_vf.c | 57 struct rte_eth_conf *dev_conf; in nfp_netvf_start() local 73 if (dev->data->dev_conf.intr_conf.rxq != 0) { in nfp_netvf_start() 104 dev_conf = &dev->data->dev_conf; in nfp_netvf_start() 105 rxmode = &dev_conf->rxmode; in nfp_netvf_start()
|
| /dpdk/drivers/net/txgbe/ |
| H A D | txgbe_fdir.c | 173 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; in txgbe_fdir_set_input_mask() 236 &dev->data->dev_conf.fdir_conf.mask; in txgbe_fdir_store_input_mask() 237 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; in txgbe_fdir_store_input_mask() 297 &dev->data->dev_conf.fdir_conf.flex_conf; in txgbe_set_fdir_flex_conf() 367 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; in txgbe_fdir_configure() 376 err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, in txgbe_fdir_configure() 795 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; in txgbe_fdir_filter_program() 812 dev->data->dev_conf.fdir_conf.pballoc); in txgbe_fdir_filter_program() 816 dev->data->dev_conf.fdir_conf.pballoc); in txgbe_fdir_filter_program() 842 queue = dev->data->dev_conf.fdir_conf.drop_queue; in txgbe_fdir_filter_program() [all …]
|
| /dpdk/doc/guides/nics/ |
| H A D | features.rst | 56 * **[uses] user config**: ``dev_conf.intr_conf.lsc``. 71 * **[uses] user config**: ``dev_conf.intr_conf.rmv``. 94 * **[uses] user config**: ``dev_conf.intr_conf.rxq``. 197 ``dev_conf.rxmode.max_lro_pkt_size``. 279 * **[uses] user config**: ``dev_conf.rx_adv_conf.rss_conf``. 331 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``. 332 * **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``. 333 * **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``. 334 * **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_tx_conf``. 355 * **[uses] user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``. [all …]
|
| /dpdk/app/test-pmd/ |
| H A D | testpmd.c | 596 dev_conf); in eth_dev_configure_mp() 1563 port->dev_conf.txmode = tx_mode; in init_config_port_offloads() 1564 port->dev_conf.rxmode = rx_mode; in init_config_port_offloads() 2758 struct rte_eth_conf dev_conf; in start_port() local 2786 &(port->dev_conf)); in start_port() 2809 if (dev_conf.rxmode.offloads != in start_port() 2812 dev_conf.rxmode.offloads; in start_port() 2817 dev_conf.rxmode.offloads; in start_port() 2820 if (dev_conf.txmode.offloads != in start_port() 2823 dev_conf.txmode.offloads; in start_port() [all …]
|
| /dpdk/drivers/event/octeontx/ |
| H A D | ssovf_evdev_selftest.c | 125 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); in devconf_set_default_sane_values() 126 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; in devconf_set_default_sane_values() 127 dev_conf->nb_event_ports = info->max_event_ports; in devconf_set_default_sane_values() 128 dev_conf->nb_event_queues = info->max_event_queues; in devconf_set_default_sane_values() 130 dev_conf->nb_event_port_dequeue_depth = in devconf_set_default_sane_values() 132 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 134 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 136 dev_conf->nb_events_limit = in devconf_set_default_sane_values() 150 struct rte_event_dev_config dev_conf; in _eventdev_setup() local 172 devconf_set_default_sane_values(&dev_conf, &info); in _eventdev_setup() [all …]
|
| /dpdk/drivers/net/ixgbe/ |
| H A D | ixgbe_fdir.c | 320 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) { in fdir_set_input_mask_82599() 349 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; in fdir_set_input_mask_x550() 472 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; in ixgbe_fdir_store_input_mask() 488 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; in ixgbe_fdir_set_input_mask() 642 enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode; in ixgbe_fdir_configure() 689 &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl); in ixgbe_fdir_configure() 1124 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; in ixgbe_fdir_filter_program() 1168 dev->data->dev_conf.fdir_conf.pballoc); in ixgbe_fdir_filter_program() 1173 dev->data->dev_conf.fdir_conf.pballoc); in ixgbe_fdir_filter_program() 1191 queue = dev->data->dev_conf.fdir_conf.drop_queue; in ixgbe_fdir_filter_program() [all …]
|
| /dpdk/drivers/event/cnxk/ |
| H A D | cnxk_eventdev_selftest.c | 115 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); in devconf_set_default_sane_values() 116 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; in devconf_set_default_sane_values() 117 dev_conf->nb_event_ports = info->max_event_ports; in devconf_set_default_sane_values() 118 dev_conf->nb_event_queues = info->max_event_queues; in devconf_set_default_sane_values() 120 dev_conf->nb_event_port_dequeue_depth = in devconf_set_default_sane_values() 122 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 124 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 126 dev_conf->nb_events_limit = info->max_num_events; in devconf_set_default_sane_values() 139 struct rte_event_dev_config dev_conf; in _eventdev_setup() local 154 devconf_set_default_sane_values(&dev_conf, &info); in _eventdev_setup() [all …]
|
| /dpdk/drivers/net/mlx4/ |
| H A D | mlx4_intr.c | 128 Ð_DEV(priv)->data->dev_conf.intr_conf; in mlx4_link_status_alarm() 193 Ð_DEV(priv)->data->dev_conf.intr_conf; in mlx4_interrupt_handler() 291 Ð_DEV(priv)->data->dev_conf.intr_conf; in mlx4_intr_install() 399 Ð_DEV(priv)->data->dev_conf.intr_conf; in mlx4_rxq_intr_enable()
|
| /dpdk/drivers/net/octeontx/ |
| H A D | octeontx_ethdev.c | 336 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); in devconf_set_default_sane_values() 339 dev_conf->nb_event_ports = info->max_event_ports; in devconf_set_default_sane_values() 340 dev_conf->nb_event_queues = info->max_event_queues; in devconf_set_default_sane_values() 343 dev_conf->nb_event_port_dequeue_depth = in devconf_set_default_sane_values() 345 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 347 dev_conf->nb_event_port_enqueue_depth = in devconf_set_default_sane_values() 349 dev_conf->nb_events_limit = in devconf_set_default_sane_values() 408 struct rte_eth_conf *conf = &data->dev_conf; in octeontx_dev_configure() 1484 struct rte_event_dev_config dev_conf; in octeontx_probe() local 1548 devconf_set_default_sane_values(&dev_conf, &info); in octeontx_probe() [all …]
|