| /f-stack/dpdk/drivers/net/bnxt/ |
| H A D | bnxt_rxq.c | 73 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) { in bnxt_mq_rx_configure() 76 switch (dev_conf->rxmode.mq_mode) { in bnxt_mq_rx_configure() 99 dev_conf->rxmode.mq_mode); in bnxt_mq_rx_configure() 103 } else if (!dev_conf->rxmode.mq_mode) { in bnxt_mq_rx_configure() 133 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) { in bnxt_mq_rx_configure() 143 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB || in bnxt_mq_rx_configure() 144 !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS)) in bnxt_mq_rx_configure() 168 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { in bnxt_mq_rx_configure() 305 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; in bnxt_rx_queue_setup_op() 481 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { in bnxt_rx_queue_start() [all …]
|
| /f-stack/dpdk/lib/librte_ethdev/ |
| H A D | rte_ethdev_trace.h | 30 rte_trace_point_emit_u32(dev_conf->rxmode.mq_mode); 31 rte_trace_point_emit_u32(dev_conf->rxmode.max_rx_pkt_len); 32 rte_trace_point_emit_u64(dev_conf->rxmode.offloads);
|
| H A D | rte_ethdev.c | 1400 port_id, dev_conf->rxmode.max_rx_pkt_len, in rte_eth_dev_configure() 1407 port_id, dev_conf->rxmode.max_rx_pkt_len, in rte_eth_dev_configure() 1416 dev->data->dev_conf.rxmode.max_rx_pkt_len = in rte_eth_dev_configure() 1425 if (dev_conf->rxmode.max_lro_pkt_size == 0) in rte_eth_dev_configure() 1427 dev->data->dev_conf.rxmode.max_rx_pkt_len; in rte_eth_dev_configure() 1430 dev->data->dev_conf.rxmode.max_rx_pkt_len, in rte_eth_dev_configure() 1438 dev_conf->rxmode.offloads) { in rte_eth_dev_configure() 1442 port_id, dev_conf->rxmode.offloads, in rte_eth_dev_configure() 1527 dev_conf->rxmode.offloads, in rte_eth_dev_configure() 1528 dev->data->dev_conf.rxmode.offloads, "Rx", in rte_eth_dev_configure() [all …]
|
| /f-stack/dpdk/examples/l2fwd-event/ |
| H A D | l2fwd_common.c | 13 .rxmode = { in l2fwd_event_init_ports() 26 port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; in l2fwd_event_init_ports() 83 rxq_conf.offloads = local_port_conf.rxmode.offloads; in l2fwd_event_init_ports()
|
| /f-stack/dpdk/drivers/net/thunderx/ |
| H A D | nicvf_ethdev.c | 154 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; in nicvf_dev_set_mtu() local 180 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; in nicvf_dev_set_mtu() 820 dev->data->dev_conf.rxmode.mq_mode, in nicvf_configure_rss() 1735 dev->data->dev_conf.rxmode.max_rx_pkt_len in nicvf_dev_start() 1910 struct rte_eth_rxmode *rxmode = &conf->rxmode; in nicvf_dev_configure() local 1917 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) in nicvf_dev_configure() 1930 if (rxmode->mq_mode != ETH_MQ_RX_NONE && in nicvf_dev_configure() 1931 rxmode->mq_mode != ETH_MQ_RX_RSS) { in nicvf_dev_configure() 1936 if (rxmode->split_hdr_size) { in nicvf_dev_configure() 2050 struct rte_eth_rxmode *rxmode; in nicvf_vlan_offload_config() local [all …]
|
| /f-stack/dpdk/drivers/net/i40e/ |
| H A D | i40e_rxtx_vec_common.h | 198 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; in i40e_rx_vec_dev_conf_condition_check_default() local 209 if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) in i40e_rx_vec_dev_conf_condition_check_default() 213 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) in i40e_rx_vec_dev_conf_condition_check_default()
|
| /f-stack/dpdk/drivers/net/octeontx/ |
| H A D | octeontx_ethdev_ops.c | 41 struct rte_eth_rxmode *rxmode; in octeontx_dev_vlan_offload_set() local 44 rxmode = &dev->data->dev_conf.rxmode; in octeontx_dev_vlan_offload_set() 47 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { in octeontx_dev_vlan_offload_set()
|
| /f-stack/dpdk/drivers/net/hns3/ |
| H A D | hns3_rxtx_vec.c | 193 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; in hns3_rx_check_vec_support() local 203 if (rxmode->offloads & offloads_mask) in hns3_rx_check_vec_support()
|
| /f-stack/dpdk/drivers/net/octeontx2/ |
| H A D | otx2_vlan.c | 53 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { in nix_set_rx_vlan_action() 415 if (eth_dev->data->dev_conf.rxmode.mq_mode == in nix_vlan_handle_default_rx_entry() 715 struct rte_eth_rxmode *rxmode; in otx2_nix_vlan_offload_set() local 718 rxmode = ð_dev->data->dev_conf.rxmode; in otx2_nix_vlan_offload_set() 721 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { in otx2_nix_vlan_offload_set() 733 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { in otx2_nix_vlan_offload_set() 744 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) { in otx2_nix_vlan_offload_set() 988 if (eth_dev->data->dev_conf.rxmode.offloads & in otx2_nix_vlan_offload_init()
|
| /f-stack/dpdk/drivers/net/igc/ |
| H A D | igc_ethdev.c | 1604 dev->data->dev_conf.rxmode.offloads |= in eth_igc_mtu_set() 1608 dev->data->dev_conf.rxmode.offloads &= in eth_igc_mtu_set() 1618 dev->data->dev_conf.rxmode.max_rx_pkt_len); in eth_igc_mtu_set() 2497 if ((dev->data->dev_conf.rxmode.offloads & in igc_vlan_hw_extend_disable() 2505 dev->data->dev_conf.rxmode.max_rx_pkt_len, in igc_vlan_hw_extend_disable() 2511 dev->data->dev_conf.rxmode.max_rx_pkt_len); in igc_vlan_hw_extend_disable() 2530 if ((dev->data->dev_conf.rxmode.offloads & in igc_vlan_hw_extend_enable() 2538 dev->data->dev_conf.rxmode.max_rx_pkt_len + in igc_vlan_hw_extend_enable() 2544 dev->data->dev_conf.rxmode.max_rx_pkt_len); in igc_vlan_hw_extend_enable() 2554 struct rte_eth_rxmode *rxmode; in eth_igc_vlan_offload_set() local [all …]
|
| /f-stack/dpdk/drivers/net/mvneta/ |
| H A D | mvneta_ethdev.c | 117 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) { in mvneta_dev_configure() 119 dev->data->dev_conf.rxmode.mq_mode); in mvneta_dev_configure() 124 if (dev->data->dev_conf.rxmode.split_hdr_size) { in mvneta_dev_configure() 129 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) in mvneta_dev_configure() 130 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - in mvneta_dev_configure() 265 dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE; in mvneta_mtu_set()
|
| /f-stack/dpdk/drivers/net/e1000/ |
| H A D | igb_rxtx.c | 2297 switch (dev->data->dev_conf.rxmode.mq_mode) { in igb_dev_mq_rx_configure() 2319 struct rte_eth_rxmode *rxmode; in eth_igb_rx_init() local 2340 rxmode = &dev->data->dev_conf.rxmode; in eth_igb_rx_init() 2353 dev->data->dev_conf.rxmode.max_rx_pkt_len + in eth_igb_rx_init() 2422 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + in eth_igb_rx_init() 2502 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) in eth_igb_rx_init() 2506 if (rxmode->offloads & in eth_igb_rx_init() 2511 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) in eth_igb_rx_init() 2559 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY) in eth_igb_rx_init() 2654 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len + in eth_igbvf_rx_init() [all …]
|
| H A D | em_rxtx.c | 1414 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; in eth_em_rx_queue_setup() 1471 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) in eth_em_rx_queue_setup() 1740 struct rte_eth_rxmode *rxmode; in eth_em_rx_init() local 1749 rxmode = &dev->data->dev_conf.rxmode; in eth_em_rx_init() 1809 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) in eth_em_rx_init() 1842 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME || in eth_em_rx_init() 1865 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) in eth_em_rx_init() 1877 rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { in eth_em_rx_init() 1884 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) in eth_em_rx_init() 1891 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) in eth_em_rx_init() [all …]
|
| /f-stack/dpdk/app/test-eventdev/ |
| H A D | test_pipeline_common.c | 173 .rxmode = { in pipeline_ethdev_setup() 195 port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz; in pipeline_ethdev_setup() 197 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; in pipeline_ethdev_setup() 227 rx_conf.offloads = port_conf.rxmode.offloads; in pipeline_ethdev_setup()
|
| /f-stack/dpdk/drivers/net/netvsc/ |
| H A D | hn_ethdev.c | 549 const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode; in hn_dev_configure() local 557 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) in hn_dev_configure() 558 dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; in hn_dev_configure() 568 unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS; in hn_dev_configure() 572 rxmode->offloads); in hn_dev_configure() 576 hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); in hn_dev_configure() 579 rxmode->offloads); in hn_dev_configure()
|
| /f-stack/dpdk/drivers/net/mlx4/ |
| H A D | mlx4_rxq.c | 757 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; in mlx4_rx_queue_setup() 831 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= in mlx4_rx_queue_setup() 837 dev->data->dev_conf.rxmode.max_rx_pkt_len; in mlx4_rx_queue_setup() 849 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { in mlx4_rx_queue_setup() 855 dev->data->dev_conf.rxmode.max_rx_pkt_len); in mlx4_rx_queue_setup() 863 dev->data->dev_conf.rxmode.max_rx_pkt_len, in mlx4_rx_queue_setup()
|
| /f-stack/dpdk/examples/flow_filtering/ |
| H A D | main.c | 131 .rxmode = { in init_port() 165 rxq_conf.offloads = port_conf.rxmode.offloads; in init_port()
|
| /f-stack/dpdk/drivers/net/cxgbe/ |
| H A D | cxgbe_ethdev.c | 304 eth_dev->data->dev_conf.rxmode.offloads |= in cxgbe_dev_mtu_set() 307 eth_dev->data->dev_conf.rxmode.offloads &= in cxgbe_dev_mtu_set() 313 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu; in cxgbe_dev_mtu_set() 364 struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode; in cxgbe_dev_start() 449 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) in cxgbe_dev_configure() 450 eth_dev->data->dev_conf.rxmode.offloads |= in cxgbe_dev_configure() 612 unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; in cxgbe_dev_rx_queue_setup() 673 eth_dev->data->dev_conf.rxmode.offloads |= in cxgbe_dev_rx_queue_setup() 676 eth_dev->data->dev_conf.rxmode.offloads &= in cxgbe_dev_rx_queue_setup()
|
| /f-stack/dpdk/drivers/net/dpaa/ |
| H A D | dpaa_ethdev.c | 188 dev->data->dev_conf.rxmode.offloads |= in dpaa_mtu_set() 191 dev->data->dev_conf.rxmode.offloads &= in dpaa_mtu_set() 205 uint64_t rx_offloads = eth_conf->rxmode.offloads; in dpaa_eth_dev_configure() 243 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= in dpaa_eth_dev_configure() 245 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; in dpaa_eth_dev_configure() 249 dev->data->dev_conf.rxmode.max_rx_pkt_len, in dpaa_eth_dev_configure() 971 } else if (dev->data->dev_conf.rxmode.offloads & in dpaa_eth_rx_queue_setup() 973 if (dev->data->dev_conf.rxmode.max_rx_pkt_len > in dpaa_eth_rx_queue_setup() 977 dev->data->dev_conf.rxmode.max_rx_pkt_len, in dpaa_eth_rx_queue_setup() 986 dev->data->dev_conf.rxmode.max_rx_pkt_len, in dpaa_eth_rx_queue_setup() [all …]
|
| /f-stack/dpdk/drivers/net/qede/ |
| H A D | qede_ethdev.c | 1027 eth_dev->data->dev_conf.rxmode.offloads |= in qede_vlan_offload_set() 1105 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; in qede_dev_start() local 1118 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { in qede_dev_start() 1123 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; in qede_dev_start() 1271 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; in qede_dev_configure() local 1278 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) in qede_dev_configure() 1279 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; in qede_dev_configure() 1297 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || in qede_dev_configure() 1298 rxmode->mq_mode == ETH_MQ_RX_RSS)) { in qede_dev_configure() 1321 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - in qede_dev_configure() [all …]
|
| /f-stack/dpdk/drivers/net/sfc/ |
| H A D | sfc_port.c | 367 const struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode; in sfc_port_configure() local 371 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) in sfc_port_configure() 372 port->pdu = rxmode->max_rx_pkt_len; in sfc_port_configure()
|
| /f-stack/dpdk/examples/qos_meter/ |
| H A D | main.c | 55 .rxmode = { 359 rxq_conf.offloads = conf.rxmode.offloads; in main() 407 rxq_conf.offloads = conf.rxmode.offloads; in main()
|
| /f-stack/dpdk/examples/ip_pipeline/ |
| H A D | link.c | 47 .rxmode = { 160 port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; in link_create()
|
| /f-stack/dpdk/drivers/net/nfp/ |
| H A D | nfp_net.c | 364 struct rte_eth_rxmode *rxmode; in nfp_net_configure() local 381 rxmode = &dev_conf->rxmode; in nfp_net_configure() 384 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) in nfp_net_configure() 385 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; in nfp_net_configure() 394 if (rxmode->mq_mode & ETH_MQ_RX_RSS && in nfp_net_configure() 615 struct rte_eth_rxmode *rxmode; in nfp_check_offloads() local 622 rxmode = &dev_conf->rxmode; in nfp_check_offloads() 636 hw->mtu = rxmode->max_rx_pkt_len; in nfp_check_offloads() 678 struct rte_eth_rxmode *rxmode; in nfp_net_start() local 729 rxmode = &dev_conf->rxmode; in nfp_net_start() [all …]
|
| /f-stack/dpdk/drivers/net/txgbe/ |
| H A D | txgbe_ethdev.c | 988 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; in txgbe_vlan_hw_extend_enable() local 1024 struct rte_eth_rxmode *rxmode; in txgbe_config_vlan_strip_on_all_queues() local 1028 rxmode = &dev->data->dev_conf.rxmode; in txgbe_config_vlan_strip_on_all_queues() 1029 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) in txgbe_config_vlan_strip_on_all_queues() 1045 struct rte_eth_rxmode *rxmode; in txgbe_vlan_offload_config() local 1046 rxmode = &dev->data->dev_conf.rxmode; in txgbe_vlan_offload_config() 1122 switch (dev_conf->rxmode.mq_mode) { in txgbe_check_mq_mode() 1130 dev_conf->rxmode.mq_mode); in txgbe_check_mq_mode() 1147 dev->data->dev_conf.rxmode.mq_mode = in txgbe_check_mq_mode() 1154 dev_conf->rxmode.mq_mode); in txgbe_check_mq_mode() [all …]
|