1131a75b6SHemant Agrawal /* * SPDX-License-Identifier: BSD-3-Clause
2c147eae0SHemant Agrawal *
3c147eae0SHemant Agrawal * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4ac624068SGagandeep Singh * Copyright 2016-2021 NXP
5c147eae0SHemant Agrawal *
6c147eae0SHemant Agrawal */
7c147eae0SHemant Agrawal
8c147eae0SHemant Agrawal #include <time.h>
9c147eae0SHemant Agrawal #include <net/if.h>
10c147eae0SHemant Agrawal
11c147eae0SHemant Agrawal #include <rte_mbuf.h>
12df96fd0dSBruce Richardson #include <ethdev_driver.h>
13c147eae0SHemant Agrawal #include <rte_malloc.h>
14c147eae0SHemant Agrawal #include <rte_memcpy.h>
15c147eae0SHemant Agrawal #include <rte_string_fns.h>
16c147eae0SHemant Agrawal #include <rte_cycles.h>
17c147eae0SHemant Agrawal #include <rte_kvargs.h>
18c147eae0SHemant Agrawal #include <rte_dev.h>
19c147eae0SHemant Agrawal #include <rte_fslmc.h>
20fe2b986aSSunil Kumar Kori #include <rte_flow_driver.h>
216ac5a55bSJun Yang #include "rte_dpaa2_mempool.h"
22c147eae0SHemant Agrawal
23a10a988aSShreyansh Jain #include "dpaa2_pmd_logs.h"
24c147eae0SHemant Agrawal #include <fslmc_vfio.h>
253e5a335dSHemant Agrawal #include <dpaa2_hw_pvt.h>
26bee61d86SHemant Agrawal #include <dpaa2_hw_mempool.h>
273cf50ff5SHemant Agrawal #include <dpaa2_hw_dpio.h>
28748eccb9SHemant Agrawal #include <mc/fsl_dpmng.h>
29c147eae0SHemant Agrawal #include "dpaa2_ethdev.h"
3072ec7a67SSunil Kumar Kori #include "dpaa2_sparser.h"
31f40adb40SHemant Agrawal #include <fsl_qbman_debug.h>
32c147eae0SHemant Agrawal
33c7ec1ba8SHemant Agrawal #define DRIVER_LOOPBACK_MODE "drv_loopback"
3420191ab3SNipun Gupta #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
358d21c563SHemant Agrawal #define DRIVER_TX_CONF "drv_tx_conf"
364690a611SNipun Gupta #define DRIVER_ERROR_QUEUE "drv_err_queue"
37eadcfd95SRohit Raj #define CHECK_INTERVAL 100 /* 100ms */
38eadcfd95SRohit Raj #define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */
39a3a997f0SHemant Agrawal
40175fe7d9SSunil Kumar Kori /* Supported Rx offloads */
41175fe7d9SSunil Kumar Kori static uint64_t dev_rx_offloads_sup =
42295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_CHECKSUM |
43295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
44295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
45295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
46295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
47295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
48295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TIMESTAMP;
49175fe7d9SSunil Kumar Kori
50175fe7d9SSunil Kumar Kori /* Rx offloads which cannot be disabled */
51175fe7d9SSunil Kumar Kori static uint64_t dev_rx_offloads_nodis =
52295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_RSS_HASH |
53295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCATTER;
54175fe7d9SSunil Kumar Kori
55175fe7d9SSunil Kumar Kori /* Supported Tx offloads */
56175fe7d9SSunil Kumar Kori static uint64_t dev_tx_offloads_sup =
57295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
58295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
59295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
60295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
61295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
62295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
63295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
64295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
65175fe7d9SSunil Kumar Kori
66175fe7d9SSunil Kumar Kori /* Tx offloads which cannot be disabled */
67175fe7d9SSunil Kumar Kori static uint64_t dev_tx_offloads_nodis =
68295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
69175fe7d9SSunil Kumar Kori
70c1870f65SAkhil Goyal /* enable timestamp in mbuf */
71724f79dfSHemant Agrawal bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
7261c41e2eSThomas Monjalon uint64_t dpaa2_timestamp_rx_dynflag;
7361c41e2eSThomas Monjalon int dpaa2_timestamp_dynfield_offset = -1;
74c1870f65SAkhil Goyal
754690a611SNipun Gupta /* Enable error queue */
764690a611SNipun Gupta bool dpaa2_enable_err_queue;
774690a611SNipun Gupta
7835dc25d1SRohit Raj #define MAX_NB_RX_DESC 11264
7935dc25d1SRohit Raj int total_nb_rx_desc;
8035dc25d1SRohit Raj
811d6329b2SHemant Agrawal struct rte_dpaa2_xstats_name_off {
821d6329b2SHemant Agrawal char name[RTE_ETH_XSTATS_NAME_SIZE];
831d6329b2SHemant Agrawal uint8_t page_id; /* dpni statistics page id */
841d6329b2SHemant Agrawal uint8_t stats_id; /* stats id in the given page */
851d6329b2SHemant Agrawal };
861d6329b2SHemant Agrawal
871d6329b2SHemant Agrawal static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
881d6329b2SHemant Agrawal {"ingress_multicast_frames", 0, 2},
891d6329b2SHemant Agrawal {"ingress_multicast_bytes", 0, 3},
901d6329b2SHemant Agrawal {"ingress_broadcast_frames", 0, 4},
911d6329b2SHemant Agrawal {"ingress_broadcast_bytes", 0, 5},
921d6329b2SHemant Agrawal {"egress_multicast_frames", 1, 2},
931d6329b2SHemant Agrawal {"egress_multicast_bytes", 1, 3},
941d6329b2SHemant Agrawal {"egress_broadcast_frames", 1, 4},
951d6329b2SHemant Agrawal {"egress_broadcast_bytes", 1, 5},
961d6329b2SHemant Agrawal {"ingress_filtered_frames", 2, 0},
971d6329b2SHemant Agrawal {"ingress_discarded_frames", 2, 1},
981d6329b2SHemant Agrawal {"ingress_nobuffer_discards", 2, 2},
991d6329b2SHemant Agrawal {"egress_discarded_frames", 2, 3},
1001d6329b2SHemant Agrawal {"egress_confirmed_frames", 2, 4},
101c720c5f6SHemant Agrawal {"cgr_reject_frames", 4, 0},
102c720c5f6SHemant Agrawal {"cgr_reject_bytes", 4, 1},
1031d6329b2SHemant Agrawal };
1041d6329b2SHemant Agrawal
105c147eae0SHemant Agrawal static struct rte_dpaa2_driver rte_dpaa2_pmd;
106c5acbb5eSHemant Agrawal static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
107c5acbb5eSHemant Agrawal int wait_to_complete);
108a1f3a12cSHemant Agrawal static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
109a1f3a12cSHemant Agrawal static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
110e1640849SHemant Agrawal static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
111c147eae0SHemant Agrawal
1123ce294f2SHemant Agrawal static int
dpaa2_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1133ce294f2SHemant Agrawal dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1143ce294f2SHemant Agrawal {
1153ce294f2SHemant Agrawal int ret;
1163ce294f2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
11781c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private;
1183ce294f2SHemant Agrawal
1193ce294f2SHemant Agrawal PMD_INIT_FUNC_TRACE();
1203ce294f2SHemant Agrawal
1213ce294f2SHemant Agrawal if (dpni == NULL) {
122a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1233ce294f2SHemant Agrawal return -1;
1243ce294f2SHemant Agrawal }
1253ce294f2SHemant Agrawal
1263ce294f2SHemant Agrawal if (on)
12796f7bfe8SSachin Saxena ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
12896f7bfe8SSachin Saxena vlan_id, 0, 0, 0);
1293ce294f2SHemant Agrawal else
1303ce294f2SHemant Agrawal ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
1313ce294f2SHemant Agrawal priv->token, vlan_id);
1323ce294f2SHemant Agrawal
1333ce294f2SHemant Agrawal if (ret < 0)
134a10a988aSShreyansh Jain DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
1353ce294f2SHemant Agrawal ret, vlan_id, priv->hw_id);
1363ce294f2SHemant Agrawal
1373ce294f2SHemant Agrawal return ret;
1383ce294f2SHemant Agrawal }
1393ce294f2SHemant Agrawal
140289ba0c0SDavid Harton static int
dpaa2_vlan_offload_set(struct rte_eth_dev * dev,int mask)1413ce294f2SHemant Agrawal dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1423ce294f2SHemant Agrawal {
1433ce294f2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
14481c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private;
14550ce3e7aSWei Hu (Xavier) int ret = 0;
1463ce294f2SHemant Agrawal
1473ce294f2SHemant Agrawal PMD_INIT_FUNC_TRACE();
1483ce294f2SHemant Agrawal
149295968d1SFerruh Yigit if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1507be78d02SJosh Soref /* VLAN Filter not available */
151c172f85eSHemant Agrawal if (!priv->max_vlan_filters) {
152a10a988aSShreyansh Jain DPAA2_PMD_INFO("VLAN filter not available");
15350ce3e7aSWei Hu (Xavier) return -ENOTSUP;
154c172f85eSHemant Agrawal }
155c172f85eSHemant Agrawal
1560ebce612SSunil Kumar Kori if (dev->data->dev_conf.rxmode.offloads &
157295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1583ce294f2SHemant Agrawal ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
1593ce294f2SHemant Agrawal priv->token, true);
1603ce294f2SHemant Agrawal else
1613ce294f2SHemant Agrawal ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
1623ce294f2SHemant Agrawal priv->token, false);
1633ce294f2SHemant Agrawal if (ret < 0)
164a10a988aSShreyansh Jain DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
1653ce294f2SHemant Agrawal }
166289ba0c0SDavid Harton
16750ce3e7aSWei Hu (Xavier) return ret;
1683ce294f2SHemant Agrawal }
1693ce294f2SHemant Agrawal
170748eccb9SHemant Agrawal static int
dpaa2_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type __rte_unused,uint16_t tpid)171e59b75ffSHemant Agrawal dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
172e59b75ffSHemant Agrawal enum rte_vlan_type vlan_type __rte_unused,
173e59b75ffSHemant Agrawal uint16_t tpid)
174e59b75ffSHemant Agrawal {
175e59b75ffSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
17681c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private;
177e59b75ffSHemant Agrawal int ret = -ENOTSUP;
178e59b75ffSHemant Agrawal
179e59b75ffSHemant Agrawal PMD_INIT_FUNC_TRACE();
180e59b75ffSHemant Agrawal
181e59b75ffSHemant Agrawal /* nothing to be done for standard vlan tpids */
182e59b75ffSHemant Agrawal if (tpid == 0x8100 || tpid == 0x88A8)
183e59b75ffSHemant Agrawal return 0;
184e59b75ffSHemant Agrawal
185e59b75ffSHemant Agrawal ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
186e59b75ffSHemant Agrawal priv->token, tpid);
187e59b75ffSHemant Agrawal if (ret < 0)
188e59b75ffSHemant Agrawal DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
189e59b75ffSHemant Agrawal /* if already configured tpids, remove them first */
190e59b75ffSHemant Agrawal if (ret == -EBUSY) {
191e59b75ffSHemant Agrawal struct dpni_custom_tpid_cfg tpid_list = {0};
192e59b75ffSHemant Agrawal
193e59b75ffSHemant Agrawal ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
194e59b75ffSHemant Agrawal priv->token, &tpid_list);
195e59b75ffSHemant Agrawal if (ret < 0)
196e59b75ffSHemant Agrawal goto fail;
197e59b75ffSHemant Agrawal ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
198e59b75ffSHemant Agrawal priv->token, tpid_list.tpid1);
199e59b75ffSHemant Agrawal if (ret < 0)
200e59b75ffSHemant Agrawal goto fail;
201e59b75ffSHemant Agrawal ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
202e59b75ffSHemant Agrawal priv->token, tpid);
203e59b75ffSHemant Agrawal }
204e59b75ffSHemant Agrawal fail:
205e59b75ffSHemant Agrawal return ret;
206e59b75ffSHemant Agrawal }
207e59b75ffSHemant Agrawal
208e59b75ffSHemant Agrawal static int
dpaa2_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)209748eccb9SHemant Agrawal dpaa2_fw_version_get(struct rte_eth_dev *dev,
210748eccb9SHemant Agrawal char *fw_version,
211748eccb9SHemant Agrawal size_t fw_size)
212748eccb9SHemant Agrawal {
213748eccb9SHemant Agrawal int ret;
21481c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private;
215748eccb9SHemant Agrawal struct mc_soc_version mc_plat_info = {0};
216748eccb9SHemant Agrawal struct mc_version mc_ver_info = {0};
217748eccb9SHemant Agrawal
218748eccb9SHemant Agrawal PMD_INIT_FUNC_TRACE();
219748eccb9SHemant Agrawal
220748eccb9SHemant Agrawal if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
221a10a988aSShreyansh Jain DPAA2_PMD_WARN("\tmc_get_soc_version failed");
222748eccb9SHemant Agrawal
223748eccb9SHemant Agrawal if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
224a10a988aSShreyansh Jain DPAA2_PMD_WARN("\tmc_get_version failed");
225748eccb9SHemant Agrawal
226748eccb9SHemant Agrawal ret = snprintf(fw_version, fw_size,
227748eccb9SHemant Agrawal "%x-%d.%d.%d",
228748eccb9SHemant Agrawal mc_plat_info.svr,
229748eccb9SHemant Agrawal mc_ver_info.major,
230748eccb9SHemant Agrawal mc_ver_info.minor,
231748eccb9SHemant Agrawal mc_ver_info.revision);
232d345d6c9SFerruh Yigit if (ret < 0)
233d345d6c9SFerruh Yigit return -EINVAL;
234748eccb9SHemant Agrawal
235748eccb9SHemant Agrawal ret += 1; /* add the size of '\0' */
236d345d6c9SFerruh Yigit if (fw_size < (size_t)ret)
237748eccb9SHemant Agrawal return ret;
238748eccb9SHemant Agrawal else
239748eccb9SHemant Agrawal return 0;
240748eccb9SHemant Agrawal }
241748eccb9SHemant Agrawal
242bdad90d1SIvan Ilchenko static int
dpaa2_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)2433e5a335dSHemant Agrawal dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2443e5a335dSHemant Agrawal {
2453e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
2463e5a335dSHemant Agrawal
2473e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE();
2483e5a335dSHemant Agrawal
24933fad432SHemant Agrawal dev_info->max_mac_addrs = priv->max_mac_filters;
250bee61d86SHemant Agrawal dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
251bee61d86SHemant Agrawal dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
2523e5a335dSHemant Agrawal dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
2533e5a335dSHemant Agrawal dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
254175fe7d9SSunil Kumar Kori dev_info->rx_offload_capa = dev_rx_offloads_sup |
255175fe7d9SSunil Kumar Kori dev_rx_offloads_nodis;
256175fe7d9SSunil Kumar Kori dev_info->tx_offload_capa = dev_tx_offloads_sup |
257175fe7d9SSunil Kumar Kori dev_tx_offloads_nodis;
258295968d1SFerruh Yigit dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
259295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_2_5G |
260295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_10G;
2612fe6f1b7SDmitry Kozlyuk dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
262762b275fSHemant Agrawal
263762b275fSHemant Agrawal dev_info->max_hash_mac_addrs = 0;
264762b275fSHemant Agrawal dev_info->max_vfs = 0;
265295968d1SFerruh Yigit dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
266762b275fSHemant Agrawal dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
267bdad90d1SIvan Ilchenko
268e35ead33SHemant Agrawal dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
269e35ead33SHemant Agrawal /* same is rx size for best perf */
270e35ead33SHemant Agrawal dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
271e35ead33SHemant Agrawal
272e35ead33SHemant Agrawal dev_info->default_rxportconf.nb_queues = 1;
273e35ead33SHemant Agrawal dev_info->default_txportconf.nb_queues = 1;
274e35ead33SHemant Agrawal dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
275e35ead33SHemant Agrawal dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
276e35ead33SHemant Agrawal
2777e2c3f14SHemant Agrawal if (dpaa2_svr_family == SVR_LX2160A) {
278295968d1SFerruh Yigit dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
279295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_40G |
280295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_50G |
281295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_100G;
2827e2c3f14SHemant Agrawal }
2837e2c3f14SHemant Agrawal
284bdad90d1SIvan Ilchenko return 0;
2853e5a335dSHemant Agrawal }
2863e5a335dSHemant Agrawal
2873e5a335dSHemant Agrawal static int
dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)288ddbc2b66SApeksha Gupta dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
289ddbc2b66SApeksha Gupta __rte_unused uint16_t queue_id,
290ddbc2b66SApeksha Gupta struct rte_eth_burst_mode *mode)
291ddbc2b66SApeksha Gupta {
292ddbc2b66SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
293ddbc2b66SApeksha Gupta int ret = -EINVAL;
294ddbc2b66SApeksha Gupta unsigned int i;
295ddbc2b66SApeksha Gupta const struct burst_info {
296ddbc2b66SApeksha Gupta uint64_t flags;
297ddbc2b66SApeksha Gupta const char *output;
298ddbc2b66SApeksha Gupta } rx_offload_map[] = {
299295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
300295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
301295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
302295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
303295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
304295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
305295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
306295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
307295968d1SFerruh Yigit {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
308ddbc2b66SApeksha Gupta };
309ddbc2b66SApeksha Gupta
310ddbc2b66SApeksha Gupta /* Update Rx offload info */
311ddbc2b66SApeksha Gupta for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
312ddbc2b66SApeksha Gupta if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
313ddbc2b66SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s",
314ddbc2b66SApeksha Gupta rx_offload_map[i].output);
315ddbc2b66SApeksha Gupta ret = 0;
316ddbc2b66SApeksha Gupta break;
317ddbc2b66SApeksha Gupta }
318ddbc2b66SApeksha Gupta }
319ddbc2b66SApeksha Gupta return ret;
320ddbc2b66SApeksha Gupta }
321ddbc2b66SApeksha Gupta
322ddbc2b66SApeksha Gupta static int
dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)323ddbc2b66SApeksha Gupta dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
324ddbc2b66SApeksha Gupta __rte_unused uint16_t queue_id,
325ddbc2b66SApeksha Gupta struct rte_eth_burst_mode *mode)
326ddbc2b66SApeksha Gupta {
327ddbc2b66SApeksha Gupta struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
328ddbc2b66SApeksha Gupta int ret = -EINVAL;
329ddbc2b66SApeksha Gupta unsigned int i;
330ddbc2b66SApeksha Gupta const struct burst_info {
331ddbc2b66SApeksha Gupta uint64_t flags;
332ddbc2b66SApeksha Gupta const char *output;
333ddbc2b66SApeksha Gupta } tx_offload_map[] = {
334295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
335295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
336295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
337295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
338295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
339295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
340295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
341295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
342295968d1SFerruh Yigit {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
343ddbc2b66SApeksha Gupta };
344ddbc2b66SApeksha Gupta
345ddbc2b66SApeksha Gupta /* Update Tx offload info */
346ddbc2b66SApeksha Gupta for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
347ddbc2b66SApeksha Gupta if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
348ddbc2b66SApeksha Gupta snprintf(mode->info, sizeof(mode->info), "%s",
349ddbc2b66SApeksha Gupta tx_offload_map[i].output);
350ddbc2b66SApeksha Gupta ret = 0;
351ddbc2b66SApeksha Gupta break;
352ddbc2b66SApeksha Gupta }
353ddbc2b66SApeksha Gupta }
354ddbc2b66SApeksha Gupta return ret;
355ddbc2b66SApeksha Gupta }
356ddbc2b66SApeksha Gupta
357ddbc2b66SApeksha Gupta static int
dpaa2_alloc_rx_tx_queues(struct rte_eth_dev * dev)3583e5a335dSHemant Agrawal dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
3593e5a335dSHemant Agrawal {
3603e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
3613e5a335dSHemant Agrawal uint16_t dist_idx;
3623e5a335dSHemant Agrawal uint32_t vq_id;
3632d5f7f52SAshish Jain uint8_t num_rxqueue_per_tc;
3643e5a335dSHemant Agrawal struct dpaa2_queue *mc_q, *mcq;
3653e5a335dSHemant Agrawal uint32_t tot_queues;
3663e5a335dSHemant Agrawal int i;
3673e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q;
3683e5a335dSHemant Agrawal
3693e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE();
3703e5a335dSHemant Agrawal
3712d5f7f52SAshish Jain num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
3728d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE)
3739ceacab7SPriyanka Jain tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
3749ceacab7SPriyanka Jain else
3753e5a335dSHemant Agrawal tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
3763e5a335dSHemant Agrawal mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
3773e5a335dSHemant Agrawal RTE_CACHE_LINE_SIZE);
3783e5a335dSHemant Agrawal if (!mc_q) {
379a10a988aSShreyansh Jain DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
3803e5a335dSHemant Agrawal return -1;
3813e5a335dSHemant Agrawal }
3823e5a335dSHemant Agrawal
3833e5a335dSHemant Agrawal for (i = 0; i < priv->nb_rx_queues; i++) {
38485ee5ddaSShreyansh Jain mc_q->eth_data = dev->data;
3853e5a335dSHemant Agrawal priv->rx_vq[i] = mc_q++;
3863e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
3873e5a335dSHemant Agrawal dpaa2_q->q_storage = rte_malloc("dq_storage",
3883e5a335dSHemant Agrawal sizeof(struct queue_storage_info_t),
3893e5a335dSHemant Agrawal RTE_CACHE_LINE_SIZE);
3903e5a335dSHemant Agrawal if (!dpaa2_q->q_storage)
3913e5a335dSHemant Agrawal goto fail;
3923e5a335dSHemant Agrawal
3933e5a335dSHemant Agrawal memset(dpaa2_q->q_storage, 0,
3943e5a335dSHemant Agrawal sizeof(struct queue_storage_info_t));
3953cf50ff5SHemant Agrawal if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
3963cf50ff5SHemant Agrawal goto fail;
3973e5a335dSHemant Agrawal }
3983e5a335dSHemant Agrawal
3994690a611SNipun Gupta if (dpaa2_enable_err_queue) {
4004690a611SNipun Gupta priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
4014690a611SNipun Gupta sizeof(struct dpaa2_queue), 0);
40229e5519dSWeiguo Li if (!priv->rx_err_vq)
40329e5519dSWeiguo Li goto fail;
4044690a611SNipun Gupta
4054690a611SNipun Gupta dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
4064690a611SNipun Gupta dpaa2_q->q_storage = rte_malloc("err_dq_storage",
4074690a611SNipun Gupta sizeof(struct queue_storage_info_t) *
4084690a611SNipun Gupta RTE_MAX_LCORE,
4094690a611SNipun Gupta RTE_CACHE_LINE_SIZE);
4104690a611SNipun Gupta if (!dpaa2_q->q_storage)
4114690a611SNipun Gupta goto fail;
4124690a611SNipun Gupta
4134690a611SNipun Gupta memset(dpaa2_q->q_storage, 0,
4144690a611SNipun Gupta sizeof(struct queue_storage_info_t));
4154690a611SNipun Gupta for (i = 0; i < RTE_MAX_LCORE; i++)
4164690a611SNipun Gupta if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
4174690a611SNipun Gupta goto fail;
4184690a611SNipun Gupta }
4194690a611SNipun Gupta
4203e5a335dSHemant Agrawal for (i = 0; i < priv->nb_tx_queues; i++) {
42185ee5ddaSShreyansh Jain mc_q->eth_data = dev->data;
4227ae777d0SHemant Agrawal mc_q->flow_id = 0xffff;
4233e5a335dSHemant Agrawal priv->tx_vq[i] = mc_q++;
4247ae777d0SHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
4257ae777d0SHemant Agrawal dpaa2_q->cscn = rte_malloc(NULL,
4267ae777d0SHemant Agrawal sizeof(struct qbman_result), 16);
4277ae777d0SHemant Agrawal if (!dpaa2_q->cscn)
4287ae777d0SHemant Agrawal goto fail_tx;
4293e5a335dSHemant Agrawal }
4303e5a335dSHemant Agrawal
4318d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) {
4329ceacab7SPriyanka Jain /*Setup tx confirmation queues*/
4339ceacab7SPriyanka Jain for (i = 0; i < priv->nb_tx_queues; i++) {
4349ceacab7SPriyanka Jain mc_q->eth_data = dev->data;
4359ceacab7SPriyanka Jain mc_q->tc_index = i;
4369ceacab7SPriyanka Jain mc_q->flow_id = 0;
4379ceacab7SPriyanka Jain priv->tx_conf_vq[i] = mc_q++;
4389ceacab7SPriyanka Jain dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
4399ceacab7SPriyanka Jain dpaa2_q->q_storage =
4409ceacab7SPriyanka Jain rte_malloc("dq_storage",
4419ceacab7SPriyanka Jain sizeof(struct queue_storage_info_t),
4429ceacab7SPriyanka Jain RTE_CACHE_LINE_SIZE);
4439ceacab7SPriyanka Jain if (!dpaa2_q->q_storage)
4449ceacab7SPriyanka Jain goto fail_tx_conf;
4459ceacab7SPriyanka Jain
4469ceacab7SPriyanka Jain memset(dpaa2_q->q_storage, 0,
4479ceacab7SPriyanka Jain sizeof(struct queue_storage_info_t));
4489ceacab7SPriyanka Jain if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
4499ceacab7SPriyanka Jain goto fail_tx_conf;
4509ceacab7SPriyanka Jain }
4519ceacab7SPriyanka Jain }
4529ceacab7SPriyanka Jain
4533e5a335dSHemant Agrawal vq_id = 0;
454599017a2SHemant Agrawal for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
4553e5a335dSHemant Agrawal mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
4562d5f7f52SAshish Jain mcq->tc_index = dist_idx / num_rxqueue_per_tc;
4572d5f7f52SAshish Jain mcq->flow_id = dist_idx % num_rxqueue_per_tc;
4583e5a335dSHemant Agrawal vq_id++;
4593e5a335dSHemant Agrawal }
4603e5a335dSHemant Agrawal
4613e5a335dSHemant Agrawal return 0;
4629ceacab7SPriyanka Jain fail_tx_conf:
4639ceacab7SPriyanka Jain i -= 1;
4649ceacab7SPriyanka Jain while (i >= 0) {
4659ceacab7SPriyanka Jain dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
4669ceacab7SPriyanka Jain rte_free(dpaa2_q->q_storage);
4679ceacab7SPriyanka Jain priv->tx_conf_vq[i--] = NULL;
4689ceacab7SPriyanka Jain }
4699ceacab7SPriyanka Jain i = priv->nb_tx_queues;
4707ae777d0SHemant Agrawal fail_tx:
4717ae777d0SHemant Agrawal i -= 1;
4727ae777d0SHemant Agrawal while (i >= 0) {
4737ae777d0SHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
4747ae777d0SHemant Agrawal rte_free(dpaa2_q->cscn);
4757ae777d0SHemant Agrawal priv->tx_vq[i--] = NULL;
4767ae777d0SHemant Agrawal }
4777ae777d0SHemant Agrawal i = priv->nb_rx_queues;
4783e5a335dSHemant Agrawal fail:
4793e5a335dSHemant Agrawal i -= 1;
4803e5a335dSHemant Agrawal mc_q = priv->rx_vq[0];
4813e5a335dSHemant Agrawal while (i >= 0) {
4823e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
4833cf50ff5SHemant Agrawal dpaa2_free_dq_storage(dpaa2_q->q_storage);
4843e5a335dSHemant Agrawal rte_free(dpaa2_q->q_storage);
4853e5a335dSHemant Agrawal priv->rx_vq[i--] = NULL;
4863e5a335dSHemant Agrawal }
4874690a611SNipun Gupta
4884690a611SNipun Gupta if (dpaa2_enable_err_queue) {
4894690a611SNipun Gupta dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
4904690a611SNipun Gupta if (dpaa2_q->q_storage)
4914690a611SNipun Gupta dpaa2_free_dq_storage(dpaa2_q->q_storage);
4924690a611SNipun Gupta rte_free(dpaa2_q->q_storage);
4934690a611SNipun Gupta }
4944690a611SNipun Gupta
4953e5a335dSHemant Agrawal rte_free(mc_q);
4963e5a335dSHemant Agrawal return -1;
4973e5a335dSHemant Agrawal }
4983e5a335dSHemant Agrawal
4995d9a1e4dSHemant Agrawal static void
dpaa2_free_rx_tx_queues(struct rte_eth_dev * dev)5005d9a1e4dSHemant Agrawal dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
5015d9a1e4dSHemant Agrawal {
5025d9a1e4dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
5035d9a1e4dSHemant Agrawal struct dpaa2_queue *dpaa2_q;
5045d9a1e4dSHemant Agrawal int i;
5055d9a1e4dSHemant Agrawal
5065d9a1e4dSHemant Agrawal PMD_INIT_FUNC_TRACE();
5075d9a1e4dSHemant Agrawal
5085d9a1e4dSHemant Agrawal /* Queue allocation base */
5095d9a1e4dSHemant Agrawal if (priv->rx_vq[0]) {
5105d9a1e4dSHemant Agrawal /* cleaning up queue storage */
5115d9a1e4dSHemant Agrawal for (i = 0; i < priv->nb_rx_queues; i++) {
5125d9a1e4dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
5135d9a1e4dSHemant Agrawal rte_free(dpaa2_q->q_storage);
5145d9a1e4dSHemant Agrawal }
5155d9a1e4dSHemant Agrawal /* cleanup tx queue cscn */
5165d9a1e4dSHemant Agrawal for (i = 0; i < priv->nb_tx_queues; i++) {
5175d9a1e4dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
5185d9a1e4dSHemant Agrawal rte_free(dpaa2_q->cscn);
5195d9a1e4dSHemant Agrawal }
5208d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) {
5219ceacab7SPriyanka Jain /* cleanup tx conf queue storage */
5229ceacab7SPriyanka Jain for (i = 0; i < priv->nb_tx_queues; i++) {
5239ceacab7SPriyanka Jain dpaa2_q = (struct dpaa2_queue *)
5249ceacab7SPriyanka Jain priv->tx_conf_vq[i];
5259ceacab7SPriyanka Jain rte_free(dpaa2_q->q_storage);
5269ceacab7SPriyanka Jain }
5279ceacab7SPriyanka Jain }
5285d9a1e4dSHemant Agrawal /*free memory for all queues (RX+TX) */
5295d9a1e4dSHemant Agrawal rte_free(priv->rx_vq[0]);
5305d9a1e4dSHemant Agrawal priv->rx_vq[0] = NULL;
5315d9a1e4dSHemant Agrawal }
5325d9a1e4dSHemant Agrawal }
5335d9a1e4dSHemant Agrawal
5343e5a335dSHemant Agrawal static int
dpaa2_eth_dev_configure(struct rte_eth_dev * dev)5353e5a335dSHemant Agrawal dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
5363e5a335dSHemant Agrawal {
53721ce788cSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
53881c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private;
53921ce788cSHemant Agrawal struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
5400ebce612SSunil Kumar Kori uint64_t rx_offloads = eth_conf->rxmode.offloads;
5410ebce612SSunil Kumar Kori uint64_t tx_offloads = eth_conf->txmode.offloads;
5420ebce612SSunil Kumar Kori int rx_l3_csum_offload = false;
5430ebce612SSunil Kumar Kori int rx_l4_csum_offload = false;
5440ebce612SSunil Kumar Kori int tx_l3_csum_offload = false;
5450ebce612SSunil Kumar Kori int tx_l4_csum_offload = false;
546271f5aeeSJun Yang int ret, tc_index;
5471bb4a528SFerruh Yigit uint32_t max_rx_pktlen;
5483e5a335dSHemant Agrawal
5493e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE();
5503e5a335dSHemant Agrawal
5517bdf45f9SHemant Agrawal /* Rx offloads which are enabled by default */
552175fe7d9SSunil Kumar Kori if (dev_rx_offloads_nodis & ~rx_offloads) {
5537bdf45f9SHemant Agrawal DPAA2_PMD_INFO(
5547bdf45f9SHemant Agrawal "Some of rx offloads enabled by default - requested 0x%" PRIx64
5557bdf45f9SHemant Agrawal " fixed are 0x%" PRIx64,
556175fe7d9SSunil Kumar Kori rx_offloads, dev_rx_offloads_nodis);
557175fe7d9SSunil Kumar Kori }
5580ebce612SSunil Kumar Kori
5597bdf45f9SHemant Agrawal /* Tx offloads which are enabled by default */
560175fe7d9SSunil Kumar Kori if (dev_tx_offloads_nodis & ~tx_offloads) {
5617bdf45f9SHemant Agrawal DPAA2_PMD_INFO(
5627bdf45f9SHemant Agrawal "Some of tx offloads enabled by default - requested 0x%" PRIx64
5637bdf45f9SHemant Agrawal " fixed are 0x%" PRIx64,
564175fe7d9SSunil Kumar Kori tx_offloads, dev_tx_offloads_nodis);
565175fe7d9SSunil Kumar Kori }
5660ebce612SSunil Kumar Kori
5671bb4a528SFerruh Yigit max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
5681bb4a528SFerruh Yigit RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
5691bb4a528SFerruh Yigit if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) {
57044ea7355SAshish Jain ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
5711bb4a528SFerruh Yigit priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN);
5721bb4a528SFerruh Yigit if (ret != 0) {
5731bb4a528SFerruh Yigit DPAA2_PMD_ERR("Unable to set mtu. check config");
574e1640849SHemant Agrawal return ret;
575e1640849SHemant Agrawal }
57679ef9825SHemant Agrawal DPAA2_PMD_INFO("MTU configured for the device: %d",
57779ef9825SHemant Agrawal dev->data->mtu);
578e1640849SHemant Agrawal } else {
579e1640849SHemant Agrawal return -1;
580e1640849SHemant Agrawal }
581e1640849SHemant Agrawal
582295968d1SFerruh Yigit if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
583271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
58489c2ea8fSHemant Agrawal ret = dpaa2_setup_flow_dist(dev,
585271f5aeeSJun Yang eth_conf->rx_adv_conf.rss_conf.rss_hf,
586271f5aeeSJun Yang tc_index);
58789c2ea8fSHemant Agrawal if (ret) {
588271f5aeeSJun Yang DPAA2_PMD_ERR(
589271f5aeeSJun Yang "Unable to set flow distribution on tc%d."
590271f5aeeSJun Yang "Check queue config", tc_index);
59189c2ea8fSHemant Agrawal return ret;
59289c2ea8fSHemant Agrawal }
59389c2ea8fSHemant Agrawal }
594271f5aeeSJun Yang }
595c5acbb5eSHemant Agrawal
596295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
5970ebce612SSunil Kumar Kori rx_l3_csum_offload = true;
5980ebce612SSunil Kumar Kori
599295968d1SFerruh Yigit if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
600295968d1SFerruh Yigit (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
601295968d1SFerruh Yigit (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
6020ebce612SSunil Kumar Kori rx_l4_csum_offload = true;
60321ce788cSHemant Agrawal
60421ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
6050ebce612SSunil Kumar Kori DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
60621ce788cSHemant Agrawal if (ret) {
607a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
60821ce788cSHemant Agrawal return ret;
60921ce788cSHemant Agrawal }
61021ce788cSHemant Agrawal
61121ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
6120ebce612SSunil Kumar Kori DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
61321ce788cSHemant Agrawal if (ret) {
614a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
61521ce788cSHemant Agrawal return ret;
61621ce788cSHemant Agrawal }
61721ce788cSHemant Agrawal
6187eaf1323SGagandeep Singh #if !defined(RTE_LIBRTE_IEEE1588)
619295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
6207eaf1323SGagandeep Singh #endif
62161c41e2eSThomas Monjalon {
62261c41e2eSThomas Monjalon ret = rte_mbuf_dyn_rx_timestamp_register(
62361c41e2eSThomas Monjalon &dpaa2_timestamp_dynfield_offset,
62461c41e2eSThomas Monjalon &dpaa2_timestamp_rx_dynflag);
62561c41e2eSThomas Monjalon if (ret != 0) {
62661c41e2eSThomas Monjalon DPAA2_PMD_ERR("Error to register timestamp field/flag");
62761c41e2eSThomas Monjalon return -rte_errno;
62861c41e2eSThomas Monjalon }
629724f79dfSHemant Agrawal dpaa2_enable_ts[dev->data->port_id] = true;
63061c41e2eSThomas Monjalon }
63120196043SHemant Agrawal
632295968d1SFerruh Yigit if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
6330ebce612SSunil Kumar Kori tx_l3_csum_offload = true;
6340ebce612SSunil Kumar Kori
635295968d1SFerruh Yigit if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
636295968d1SFerruh Yigit (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
637295968d1SFerruh Yigit (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
6380ebce612SSunil Kumar Kori tx_l4_csum_offload = true;
6390ebce612SSunil Kumar Kori
64021ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
6410ebce612SSunil Kumar Kori DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
64221ce788cSHemant Agrawal if (ret) {
643a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
64421ce788cSHemant Agrawal return ret;
64521ce788cSHemant Agrawal }
64621ce788cSHemant Agrawal
64721ce788cSHemant Agrawal ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
6480ebce612SSunil Kumar Kori DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
64921ce788cSHemant Agrawal if (ret) {
650a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
65121ce788cSHemant Agrawal return ret;
65221ce788cSHemant Agrawal }
65321ce788cSHemant Agrawal
654ffb3389cSNipun Gupta /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
655ffb3389cSNipun Gupta * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
656ffb3389cSNipun Gupta * to 0 for LS2 in the hardware thus disabling data/annotation
657ffb3389cSNipun Gupta * stashing. For LX2 this is fixed in hardware and thus hash result and
658ffb3389cSNipun Gupta * parse results can be received in FD using this option.
659ffb3389cSNipun Gupta */
660ffb3389cSNipun Gupta if (dpaa2_svr_family == SVR_LX2160A) {
661ffb3389cSNipun Gupta ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
662ffb3389cSNipun Gupta DPNI_FLCTYPE_HASH, true);
663ffb3389cSNipun Gupta if (ret) {
664a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
665ffb3389cSNipun Gupta return ret;
666ffb3389cSNipun Gupta }
667ffb3389cSNipun Gupta }
668ffb3389cSNipun Gupta
669295968d1SFerruh Yigit if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
670295968d1SFerruh Yigit dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
671c172f85eSHemant Agrawal
672f023d059SJun Yang if (eth_conf->lpbk_mode) {
673f023d059SJun Yang ret = dpaa2_dev_recycle_config(dev);
674f023d059SJun Yang if (ret) {
675f023d059SJun Yang DPAA2_PMD_ERR("Error to configure %s to recycle port.",
676f023d059SJun Yang dev->data->name);
677f023d059SJun Yang
678f023d059SJun Yang return ret;
679f023d059SJun Yang }
680f023d059SJun Yang } else {
681f023d059SJun Yang /** User may disable loopback mode by calling
682f023d059SJun Yang * "dev_configure" with lpbk_mode cleared.
683f023d059SJun Yang * No matter the port was configured recycle or not,
684f023d059SJun Yang * recycle de-configure is called here.
685f023d059SJun Yang * If port is not recycled, the de-configure will return directly.
686f023d059SJun Yang */
687f023d059SJun Yang ret = dpaa2_dev_recycle_deconfig(dev);
688f023d059SJun Yang if (ret) {
689f023d059SJun Yang DPAA2_PMD_ERR("Error to de-configure recycle port %s.",
690f023d059SJun Yang dev->data->name);
691f023d059SJun Yang
692f023d059SJun Yang return ret;
693f023d059SJun Yang }
694f023d059SJun Yang }
695f023d059SJun Yang
696ac624068SGagandeep Singh dpaa2_tm_init(dev);
697ac624068SGagandeep Singh
6983e5a335dSHemant Agrawal return 0;
6993e5a335dSHemant Agrawal }
7003e5a335dSHemant Agrawal
7013e5a335dSHemant Agrawal /* Function to setup RX flow information. It contains traffic class ID,
7023e5a335dSHemant Agrawal * flow ID, destination configuration etc.
7033e5a335dSHemant Agrawal */
7043e5a335dSHemant Agrawal static int
dpaa2_dev_rx_queue_setup(struct rte_eth_dev * dev,uint16_t rx_queue_id,uint16_t nb_rx_desc,unsigned int socket_id __rte_unused,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mb_pool)7053e5a335dSHemant Agrawal dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
7063e5a335dSHemant Agrawal uint16_t rx_queue_id,
70713b856acSHemant Agrawal uint16_t nb_rx_desc,
7083e5a335dSHemant Agrawal unsigned int socket_id __rte_unused,
709988a7c38SHemant Agrawal const struct rte_eth_rxconf *rx_conf,
7103e5a335dSHemant Agrawal struct rte_mempool *mb_pool)
7113e5a335dSHemant Agrawal {
7123e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
71381c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
7143e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q;
7153e5a335dSHemant Agrawal struct dpni_queue cfg;
7163e5a335dSHemant Agrawal uint8_t options = 0;
7173e5a335dSHemant Agrawal uint8_t flow_id;
718bee61d86SHemant Agrawal uint32_t bpid;
71913b856acSHemant Agrawal int i, ret;
7203e5a335dSHemant Agrawal
7213e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE();
7223e5a335dSHemant Agrawal
723a10a988aSShreyansh Jain DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
7243e5a335dSHemant Agrawal dev, rx_queue_id, mb_pool, rx_conf);
7253e5a335dSHemant Agrawal
72635dc25d1SRohit Raj total_nb_rx_desc += nb_rx_desc;
72735dc25d1SRohit Raj if (total_nb_rx_desc > MAX_NB_RX_DESC) {
72835dc25d1SRohit Raj DPAA2_PMD_WARN("\nTotal nb_rx_desc exceeds %d limit. Please use Normal buffers",
72935dc25d1SRohit Raj MAX_NB_RX_DESC);
73035dc25d1SRohit Raj DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script");
73135dc25d1SRohit Raj }
73235dc25d1SRohit Raj
733988a7c38SHemant Agrawal /* Rx deferred start is not supported */
734988a7c38SHemant Agrawal if (rx_conf->rx_deferred_start) {
735988a7c38SHemant Agrawal DPAA2_PMD_ERR("%p:Rx deferred start not supported",
736988a7c38SHemant Agrawal (void *)dev);
737988a7c38SHemant Agrawal return -EINVAL;
738988a7c38SHemant Agrawal }
739988a7c38SHemant Agrawal
740bee61d86SHemant Agrawal if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
7416ac5a55bSJun Yang if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
7426ac5a55bSJun Yang ret = rte_dpaa2_bpid_info_init(mb_pool);
7436ac5a55bSJun Yang if (ret)
7446ac5a55bSJun Yang return ret;
7456ac5a55bSJun Yang }
746bee61d86SHemant Agrawal bpid = mempool_to_bpid(mb_pool);
7476ac5a55bSJun Yang ret = dpaa2_attach_bp_list(priv, dpni,
748bee61d86SHemant Agrawal rte_dpaa2_bpid_info[bpid].bp_list);
749bee61d86SHemant Agrawal if (ret)
750bee61d86SHemant Agrawal return ret;
751bee61d86SHemant Agrawal }
7523e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
7533e5a335dSHemant Agrawal dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
754109df460SShreyansh Jain dpaa2_q->bp_array = rte_dpaa2_bpid_info;
755de1d70f0SHemant Agrawal dpaa2_q->nb_desc = UINT16_MAX;
756de1d70f0SHemant Agrawal dpaa2_q->offloads = rx_conf->offloads;
7573e5a335dSHemant Agrawal
758599017a2SHemant Agrawal /*Get the flow id from given VQ id*/
75913b856acSHemant Agrawal flow_id = dpaa2_q->flow_id;
7603e5a335dSHemant Agrawal memset(&cfg, 0, sizeof(struct dpni_queue));
7613e5a335dSHemant Agrawal
7623e5a335dSHemant Agrawal options = options | DPNI_QUEUE_OPT_USER_CTX;
7635ae1edffSHemant Agrawal cfg.user_context = (size_t)(dpaa2_q);
7643e5a335dSHemant Agrawal
76513b856acSHemant Agrawal /* check if a private cgr available. */
76613b856acSHemant Agrawal for (i = 0; i < priv->max_cgs; i++) {
76713b856acSHemant Agrawal if (!priv->cgid_in_use[i]) {
76813b856acSHemant Agrawal priv->cgid_in_use[i] = 1;
76913b856acSHemant Agrawal break;
77013b856acSHemant Agrawal }
77113b856acSHemant Agrawal }
77213b856acSHemant Agrawal
77313b856acSHemant Agrawal if (i < priv->max_cgs) {
77413b856acSHemant Agrawal options |= DPNI_QUEUE_OPT_SET_CGID;
77513b856acSHemant Agrawal cfg.cgid = i;
77613b856acSHemant Agrawal dpaa2_q->cgid = cfg.cgid;
77713b856acSHemant Agrawal } else {
77813b856acSHemant Agrawal dpaa2_q->cgid = 0xff;
77913b856acSHemant Agrawal }
78013b856acSHemant Agrawal
78137529eceSHemant Agrawal /*if ls2088 or rev2 device, enable the stashing */
78230db823eSHemant Agrawal
783e0ded73bSHemant Agrawal if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
78437529eceSHemant Agrawal options |= DPNI_QUEUE_OPT_FLC;
78537529eceSHemant Agrawal cfg.flc.stash_control = true;
78637529eceSHemant Agrawal cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
78737529eceSHemant Agrawal /* 00 00 00 - last 6 bit represent annotation, context stashing,
788e0ded73bSHemant Agrawal * data stashing setting 01 01 00 (0x14)
789e0ded73bSHemant Agrawal * (in following order ->DS AS CS)
790e0ded73bSHemant Agrawal * to enable 1 line data, 1 line annotation.
791e0ded73bSHemant Agrawal * For LX2, this setting should be 01 00 00 (0x10)
79237529eceSHemant Agrawal */
793e0ded73bSHemant Agrawal if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
794e0ded73bSHemant Agrawal cfg.flc.value |= 0x10;
795e0ded73bSHemant Agrawal else
79637529eceSHemant Agrawal cfg.flc.value |= 0x14;
79737529eceSHemant Agrawal }
7983e5a335dSHemant Agrawal ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
7993e5a335dSHemant Agrawal dpaa2_q->tc_index, flow_id, options, &cfg);
8003e5a335dSHemant Agrawal if (ret) {
801a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
8023e5a335dSHemant Agrawal return -1;
8033e5a335dSHemant Agrawal }
8043e5a335dSHemant Agrawal
80523d6a87eSHemant Agrawal if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
80623d6a87eSHemant Agrawal struct dpni_taildrop taildrop;
80723d6a87eSHemant Agrawal
80823d6a87eSHemant Agrawal taildrop.enable = 1;
809de1d70f0SHemant Agrawal dpaa2_q->nb_desc = nb_rx_desc;
81013b856acSHemant Agrawal /* Private CGR will use tail drop length as nb_rx_desc.
81113b856acSHemant Agrawal * for rest cases we can use standard byte based tail drop.
81213b856acSHemant Agrawal * There is no HW restriction, but number of CGRs are limited,
81313b856acSHemant Agrawal * hence this restriction is placed.
81413b856acSHemant Agrawal */
81513b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) {
81623d6a87eSHemant Agrawal /*enabling per rx queue congestion control */
81713b856acSHemant Agrawal taildrop.threshold = nb_rx_desc;
81813b856acSHemant Agrawal taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
81913b856acSHemant Agrawal taildrop.oal = 0;
82013b856acSHemant Agrawal DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
82113b856acSHemant Agrawal rx_queue_id);
82213b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
82313b856acSHemant Agrawal DPNI_CP_CONGESTION_GROUP,
82413b856acSHemant Agrawal DPNI_QUEUE_RX,
82513b856acSHemant Agrawal dpaa2_q->tc_index,
8267a3a9d56SJun Yang dpaa2_q->cgid, &taildrop);
82713b856acSHemant Agrawal } else {
82813b856acSHemant Agrawal /*enabling per rx queue congestion control */
82913b856acSHemant Agrawal taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
83023d6a87eSHemant Agrawal taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
831d47f0292SHemant Agrawal taildrop.oal = CONG_RX_OAL;
83213b856acSHemant Agrawal DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
83323d6a87eSHemant Agrawal rx_queue_id);
83423d6a87eSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
83523d6a87eSHemant Agrawal DPNI_CP_QUEUE, DPNI_QUEUE_RX,
83613b856acSHemant Agrawal dpaa2_q->tc_index, flow_id,
83713b856acSHemant Agrawal &taildrop);
83813b856acSHemant Agrawal }
83913b856acSHemant Agrawal if (ret) {
84013b856acSHemant Agrawal DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
84113b856acSHemant Agrawal ret);
84213b856acSHemant Agrawal return -1;
84313b856acSHemant Agrawal }
84413b856acSHemant Agrawal } else { /* Disable tail Drop */
84513b856acSHemant Agrawal struct dpni_taildrop taildrop = {0};
84613b856acSHemant Agrawal DPAA2_PMD_INFO("Tail drop is disabled on queue");
84713b856acSHemant Agrawal
84813b856acSHemant Agrawal taildrop.enable = 0;
84913b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) {
85013b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
85113b856acSHemant Agrawal DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
85213b856acSHemant Agrawal dpaa2_q->tc_index,
8537a3a9d56SJun Yang dpaa2_q->cgid, &taildrop);
85413b856acSHemant Agrawal } else {
85513b856acSHemant Agrawal ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
85613b856acSHemant Agrawal DPNI_CP_QUEUE, DPNI_QUEUE_RX,
85723d6a87eSHemant Agrawal dpaa2_q->tc_index, flow_id, &taildrop);
85813b856acSHemant Agrawal }
85923d6a87eSHemant Agrawal if (ret) {
860a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
861a10a988aSShreyansh Jain ret);
86223d6a87eSHemant Agrawal return -1;
86323d6a87eSHemant Agrawal }
86423d6a87eSHemant Agrawal }
86523d6a87eSHemant Agrawal
8663e5a335dSHemant Agrawal dev->data->rx_queues[rx_queue_id] = dpaa2_q;
8673e5a335dSHemant Agrawal return 0;
8683e5a335dSHemant Agrawal }
8693e5a335dSHemant Agrawal
8703e5a335dSHemant Agrawal static int
dpaa2_dev_tx_queue_setup(struct rte_eth_dev * dev,uint16_t tx_queue_id,uint16_t nb_tx_desc,unsigned int socket_id __rte_unused,const struct rte_eth_txconf * tx_conf)8713e5a335dSHemant Agrawal dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
8723e5a335dSHemant Agrawal uint16_t tx_queue_id,
873b5869095SHemant Agrawal uint16_t nb_tx_desc,
8743e5a335dSHemant Agrawal unsigned int socket_id __rte_unused,
875988a7c38SHemant Agrawal const struct rte_eth_txconf *tx_conf)
8763e5a335dSHemant Agrawal {
8773e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
8783e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
8793e5a335dSHemant Agrawal priv->tx_vq[tx_queue_id];
8809ceacab7SPriyanka Jain struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
8819ceacab7SPriyanka Jain priv->tx_conf_vq[tx_queue_id];
88281c42c84SShreyansh Jain struct fsl_mc_io *dpni = dev->process_private;
8833e5a335dSHemant Agrawal struct dpni_queue tx_conf_cfg;
8843e5a335dSHemant Agrawal struct dpni_queue tx_flow_cfg;
8853e5a335dSHemant Agrawal uint8_t options = 0, flow_id;
88672100f0dSGagandeep Singh uint16_t channel_id;
887e26bf82eSSachin Saxena struct dpni_queue_id qid;
8883e5a335dSHemant Agrawal uint32_t tc_id;
8893e5a335dSHemant Agrawal int ret;
8903e5a335dSHemant Agrawal
8913e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE();
8923e5a335dSHemant Agrawal
893988a7c38SHemant Agrawal /* Tx deferred start is not supported */
894988a7c38SHemant Agrawal if (tx_conf->tx_deferred_start) {
895988a7c38SHemant Agrawal DPAA2_PMD_ERR("%p:Tx deferred start not supported",
896988a7c38SHemant Agrawal (void *)dev);
897988a7c38SHemant Agrawal return -EINVAL;
898988a7c38SHemant Agrawal }
899988a7c38SHemant Agrawal
900de1d70f0SHemant Agrawal dpaa2_q->nb_desc = UINT16_MAX;
901de1d70f0SHemant Agrawal dpaa2_q->offloads = tx_conf->offloads;
902de1d70f0SHemant Agrawal
9033e5a335dSHemant Agrawal /* Return if queue already configured */
904f9989673SAkhil Goyal if (dpaa2_q->flow_id != 0xffff) {
905f9989673SAkhil Goyal dev->data->tx_queues[tx_queue_id] = dpaa2_q;
9063e5a335dSHemant Agrawal return 0;
907f9989673SAkhil Goyal }
9083e5a335dSHemant Agrawal
9093e5a335dSHemant Agrawal memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
9103e5a335dSHemant Agrawal memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
9113e5a335dSHemant Agrawal
9123e5a335dSHemant Agrawal if (tx_queue_id == 0) {
9133e5a335dSHemant Agrawal /*Set tx-conf and error configuration*/
9148d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE)
9159ceacab7SPriyanka Jain ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
9169ceacab7SPriyanka Jain priv->token,
9179ceacab7SPriyanka Jain DPNI_CONF_AFFINE);
9189ceacab7SPriyanka Jain else
9193e5a335dSHemant Agrawal ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
9203e5a335dSHemant Agrawal priv->token,
9213e5a335dSHemant Agrawal DPNI_CONF_DISABLE);
9223e5a335dSHemant Agrawal if (ret) {
923a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in set tx conf mode settings: "
924a10a988aSShreyansh Jain "err=%d", ret);
9253e5a335dSHemant Agrawal return -1;
9263e5a335dSHemant Agrawal }
9273e5a335dSHemant Agrawal }
92872100f0dSGagandeep Singh
92972100f0dSGagandeep Singh tc_id = tx_queue_id % priv->num_tx_tc;
93072100f0dSGagandeep Singh channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels;
93172100f0dSGagandeep Singh flow_id = 0;
93272100f0dSGagandeep Singh
93372100f0dSGagandeep Singh ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
93472100f0dSGagandeep Singh ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg);
93572100f0dSGagandeep Singh if (ret) {
93672100f0dSGagandeep Singh DPAA2_PMD_ERR("Error in setting the tx flow: "
93772100f0dSGagandeep Singh "tc_id=%d, flow=%d err=%d",
93872100f0dSGagandeep Singh tc_id, flow_id, ret);
93972100f0dSGagandeep Singh return -1;
94072100f0dSGagandeep Singh }
94172100f0dSGagandeep Singh
94272100f0dSGagandeep Singh dpaa2_q->flow_id = flow_id;
94372100f0dSGagandeep Singh
9443e5a335dSHemant Agrawal dpaa2_q->tc_index = tc_id;
9453e5a335dSHemant Agrawal
946e26bf82eSSachin Saxena ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
94772100f0dSGagandeep Singh DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
948e26bf82eSSachin Saxena dpaa2_q->flow_id, &tx_flow_cfg, &qid);
949e26bf82eSSachin Saxena if (ret) {
950e26bf82eSSachin Saxena DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
951e26bf82eSSachin Saxena return -1;
952e26bf82eSSachin Saxena }
953e26bf82eSSachin Saxena dpaa2_q->fqid = qid.fqid;
954e26bf82eSSachin Saxena
955a0840963SHemant Agrawal if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
95613b856acSHemant Agrawal struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
9577ae777d0SHemant Agrawal
958de1d70f0SHemant Agrawal dpaa2_q->nb_desc = nb_tx_desc;
959de1d70f0SHemant Agrawal
96029dfa62fSHemant Agrawal cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
961b5869095SHemant Agrawal cong_notif_cfg.threshold_entry = nb_tx_desc;
9627ae777d0SHemant Agrawal /* Notify that the queue is not congested when the data in
9637be78d02SJosh Soref * the queue is below this threshold.(90% of value)
9647ae777d0SHemant Agrawal */
96538a0ac75SHemant Agrawal cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
9667ae777d0SHemant Agrawal cong_notif_cfg.message_ctx = 0;
967543dbfecSNipun Gupta cong_notif_cfg.message_iova =
968543dbfecSNipun Gupta (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
9697ae777d0SHemant Agrawal cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
9707ae777d0SHemant Agrawal cong_notif_cfg.notification_mode =
9717ae777d0SHemant Agrawal DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
9727ae777d0SHemant Agrawal DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
9737ae777d0SHemant Agrawal DPNI_CONG_OPT_COHERENT_WRITE;
97455984a9bSShreyansh Jain cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
9757ae777d0SHemant Agrawal
9767ae777d0SHemant Agrawal ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
9777ae777d0SHemant Agrawal priv->token,
9787ae777d0SHemant Agrawal DPNI_QUEUE_TX,
97972100f0dSGagandeep Singh ((channel_id << 8) | tc_id),
9807ae777d0SHemant Agrawal &cong_notif_cfg);
9817ae777d0SHemant Agrawal if (ret) {
982a10a988aSShreyansh Jain DPAA2_PMD_ERR(
983a10a988aSShreyansh Jain "Error in setting tx congestion notification: "
984a10a988aSShreyansh Jain "err=%d", ret);
9857ae777d0SHemant Agrawal return -ret;
9867ae777d0SHemant Agrawal }
9877ae777d0SHemant Agrawal }
98816c4a3c4SNipun Gupta dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
9893e5a335dSHemant Agrawal dev->data->tx_queues[tx_queue_id] = dpaa2_q;
9909ceacab7SPriyanka Jain
9918d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) {
9929ceacab7SPriyanka Jain dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
9939ceacab7SPriyanka Jain options = options | DPNI_QUEUE_OPT_USER_CTX;
9949ceacab7SPriyanka Jain tx_conf_cfg.user_context = (size_t)(dpaa2_q);
9959ceacab7SPriyanka Jain ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
99672100f0dSGagandeep Singh DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
9979ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
9989ceacab7SPriyanka Jain if (ret) {
9999ceacab7SPriyanka Jain DPAA2_PMD_ERR("Error in setting the tx conf flow: "
10009ceacab7SPriyanka Jain "tc_index=%d, flow=%d err=%d",
10019ceacab7SPriyanka Jain dpaa2_tx_conf_q->tc_index,
10029ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, ret);
10039ceacab7SPriyanka Jain return -1;
10049ceacab7SPriyanka Jain }
10059ceacab7SPriyanka Jain
10069ceacab7SPriyanka Jain ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
100772100f0dSGagandeep Singh DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
10089ceacab7SPriyanka Jain dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
10099ceacab7SPriyanka Jain if (ret) {
10109ceacab7SPriyanka Jain DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
10119ceacab7SPriyanka Jain return -1;
10129ceacab7SPriyanka Jain }
10139ceacab7SPriyanka Jain dpaa2_tx_conf_q->fqid = qid.fqid;
10149ceacab7SPriyanka Jain }
10153e5a335dSHemant Agrawal return 0;
10163e5a335dSHemant Agrawal }
10173e5a335dSHemant Agrawal
10183e5a335dSHemant Agrawal static void
dpaa2_dev_rx_queue_release(struct rte_eth_dev * dev,uint16_t rx_queue_id)10197483341aSXueming Li dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
10203e5a335dSHemant Agrawal {
10217483341aSXueming Li struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];
102213b856acSHemant Agrawal struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
102381c42c84SShreyansh Jain struct fsl_mc_io *dpni =
102481c42c84SShreyansh Jain (struct fsl_mc_io *)priv->eth_dev->process_private;
102513b856acSHemant Agrawal uint8_t options = 0;
102613b856acSHemant Agrawal int ret;
102713b856acSHemant Agrawal struct dpni_queue cfg;
102813b856acSHemant Agrawal
102913b856acSHemant Agrawal memset(&cfg, 0, sizeof(struct dpni_queue));
10303e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE();
103135dc25d1SRohit Raj
103235dc25d1SRohit Raj total_nb_rx_desc -= dpaa2_q->nb_desc;
103335dc25d1SRohit Raj
103413b856acSHemant Agrawal if (dpaa2_q->cgid != 0xff) {
103513b856acSHemant Agrawal options = DPNI_QUEUE_OPT_CLEAR_CGID;
103613b856acSHemant Agrawal cfg.cgid = dpaa2_q->cgid;
103713b856acSHemant Agrawal
103813b856acSHemant Agrawal ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
103913b856acSHemant Agrawal DPNI_QUEUE_RX,
104013b856acSHemant Agrawal dpaa2_q->tc_index, dpaa2_q->flow_id,
104113b856acSHemant Agrawal options, &cfg);
104213b856acSHemant Agrawal if (ret)
104313b856acSHemant Agrawal DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
104413b856acSHemant Agrawal dpaa2_q->fqid, ret);
104513b856acSHemant Agrawal priv->cgid_in_use[dpaa2_q->cgid] = 0;
104613b856acSHemant Agrawal dpaa2_q->cgid = 0xff;
104713b856acSHemant Agrawal }
10483e5a335dSHemant Agrawal }
10493e5a335dSHemant Agrawal
1050f40adb40SHemant Agrawal static uint32_t
dpaa2_dev_rx_queue_count(void * rx_queue)10518d7d4fcdSKonstantin Ananyev dpaa2_dev_rx_queue_count(void *rx_queue)
1052f40adb40SHemant Agrawal {
1053f40adb40SHemant Agrawal int32_t ret;
1054f40adb40SHemant Agrawal struct dpaa2_queue *dpaa2_q;
1055f40adb40SHemant Agrawal struct qbman_swp *swp;
1056f40adb40SHemant Agrawal struct qbman_fq_query_np_rslt state;
1057f40adb40SHemant Agrawal uint32_t frame_cnt = 0;
1058f40adb40SHemant Agrawal
1059f40adb40SHemant Agrawal if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1060f40adb40SHemant Agrawal ret = dpaa2_affine_qbman_swp();
1061f40adb40SHemant Agrawal if (ret) {
1062d527f5d9SNipun Gupta DPAA2_PMD_ERR(
1063d527f5d9SNipun Gupta "Failed to allocate IO portal, tid: %d\n",
1064d527f5d9SNipun Gupta rte_gettid());
1065f40adb40SHemant Agrawal return -EINVAL;
1066f40adb40SHemant Agrawal }
1067f40adb40SHemant Agrawal }
1068f40adb40SHemant Agrawal swp = DPAA2_PER_LCORE_PORTAL;
1069f40adb40SHemant Agrawal
10708d7d4fcdSKonstantin Ananyev dpaa2_q = rx_queue;
1071f40adb40SHemant Agrawal
1072f40adb40SHemant Agrawal if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
1073f40adb40SHemant Agrawal frame_cnt = qbman_fq_state_frame_count(&state);
10748d7d4fcdSKonstantin Ananyev DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u",
10758d7d4fcdSKonstantin Ananyev rx_queue, frame_cnt);
1076f40adb40SHemant Agrawal }
1077f40adb40SHemant Agrawal return frame_cnt;
1078f40adb40SHemant Agrawal }
1079f40adb40SHemant Agrawal
1080a5fc38d4SHemant Agrawal static const uint32_t *
dpaa2_supported_ptypes_get(struct rte_eth_dev * dev)1081a5fc38d4SHemant Agrawal dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
1082a5fc38d4SHemant Agrawal {
1083a5fc38d4SHemant Agrawal static const uint32_t ptypes[] = {
1084a5fc38d4SHemant Agrawal /*todo -= add more types */
1085a5fc38d4SHemant Agrawal RTE_PTYPE_L2_ETHER,
1086a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV4,
1087a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV4_EXT,
1088a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV6,
1089a5fc38d4SHemant Agrawal RTE_PTYPE_L3_IPV6_EXT,
1090a5fc38d4SHemant Agrawal RTE_PTYPE_L4_TCP,
1091a5fc38d4SHemant Agrawal RTE_PTYPE_L4_UDP,
1092a5fc38d4SHemant Agrawal RTE_PTYPE_L4_SCTP,
1093a5fc38d4SHemant Agrawal RTE_PTYPE_L4_ICMP,
1094a5fc38d4SHemant Agrawal RTE_PTYPE_UNKNOWN
1095a5fc38d4SHemant Agrawal };
1096a5fc38d4SHemant Agrawal
1097a3a997f0SHemant Agrawal if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
109820191ab3SNipun Gupta dev->rx_pkt_burst == dpaa2_dev_rx ||
1099a3a997f0SHemant Agrawal dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
1100a5fc38d4SHemant Agrawal return ptypes;
1101a5fc38d4SHemant Agrawal return NULL;
1102a5fc38d4SHemant Agrawal }
1103a5fc38d4SHemant Agrawal
1104c5acbb5eSHemant Agrawal /**
1105c5acbb5eSHemant Agrawal * Dpaa2 link Interrupt handler
1106c5acbb5eSHemant Agrawal *
1107c5acbb5eSHemant Agrawal * @param param
11087be78d02SJosh Soref * The address of parameter (struct rte_eth_dev *) registered before.
1109c5acbb5eSHemant Agrawal *
1110c5acbb5eSHemant Agrawal * @return
1111c5acbb5eSHemant Agrawal * void
1112c5acbb5eSHemant Agrawal */
1113c5acbb5eSHemant Agrawal static void
dpaa2_interrupt_handler(void * param)1114c5acbb5eSHemant Agrawal dpaa2_interrupt_handler(void *param)
1115c5acbb5eSHemant Agrawal {
1116c5acbb5eSHemant Agrawal struct rte_eth_dev *dev = param;
1117c5acbb5eSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
111881c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1119c5acbb5eSHemant Agrawal int ret;
1120c5acbb5eSHemant Agrawal int irq_index = DPNI_IRQ_INDEX;
1121c5acbb5eSHemant Agrawal unsigned int status = 0, clear = 0;
1122c5acbb5eSHemant Agrawal
1123c5acbb5eSHemant Agrawal PMD_INIT_FUNC_TRACE();
1124c5acbb5eSHemant Agrawal
1125c5acbb5eSHemant Agrawal if (dpni == NULL) {
1126a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1127c5acbb5eSHemant Agrawal return;
1128c5acbb5eSHemant Agrawal }
1129c5acbb5eSHemant Agrawal
1130c5acbb5eSHemant Agrawal ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
1131c5acbb5eSHemant Agrawal irq_index, &status);
1132c5acbb5eSHemant Agrawal if (unlikely(ret)) {
1133a10a988aSShreyansh Jain DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
1134c5acbb5eSHemant Agrawal clear = 0xffffffff;
1135c5acbb5eSHemant Agrawal goto out;
1136c5acbb5eSHemant Agrawal }
1137c5acbb5eSHemant Agrawal
1138c5acbb5eSHemant Agrawal if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
1139c5acbb5eSHemant Agrawal clear = DPNI_IRQ_EVENT_LINK_CHANGED;
1140c5acbb5eSHemant Agrawal dpaa2_dev_link_update(dev, 0);
1141c5acbb5eSHemant Agrawal /* calling all the apps registered for link status event */
11425723fbedSFerruh Yigit rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1143c5acbb5eSHemant Agrawal }
1144c5acbb5eSHemant Agrawal out:
1145c5acbb5eSHemant Agrawal ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
1146c5acbb5eSHemant Agrawal irq_index, clear);
1147c5acbb5eSHemant Agrawal if (unlikely(ret))
1148a10a988aSShreyansh Jain DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
1149c5acbb5eSHemant Agrawal }
1150c5acbb5eSHemant Agrawal
1151c5acbb5eSHemant Agrawal static int
dpaa2_eth_setup_irqs(struct rte_eth_dev * dev,int enable)1152c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
1153c5acbb5eSHemant Agrawal {
1154c5acbb5eSHemant Agrawal int err = 0;
1155c5acbb5eSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
115681c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1157c5acbb5eSHemant Agrawal int irq_index = DPNI_IRQ_INDEX;
1158c5acbb5eSHemant Agrawal unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
1159c5acbb5eSHemant Agrawal
1160c5acbb5eSHemant Agrawal PMD_INIT_FUNC_TRACE();
1161c5acbb5eSHemant Agrawal
1162c5acbb5eSHemant Agrawal err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
1163c5acbb5eSHemant Agrawal irq_index, mask);
1164c5acbb5eSHemant Agrawal if (err < 0) {
1165a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
1166c5acbb5eSHemant Agrawal strerror(-err));
1167c5acbb5eSHemant Agrawal return err;
1168c5acbb5eSHemant Agrawal }
1169c5acbb5eSHemant Agrawal
1170c5acbb5eSHemant Agrawal err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
1171c5acbb5eSHemant Agrawal irq_index, enable);
1172c5acbb5eSHemant Agrawal if (err < 0)
1173a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
1174c5acbb5eSHemant Agrawal strerror(-err));
1175c5acbb5eSHemant Agrawal
1176c5acbb5eSHemant Agrawal return err;
1177c5acbb5eSHemant Agrawal }
1178c5acbb5eSHemant Agrawal
11793e5a335dSHemant Agrawal static int
dpaa2_dev_start(struct rte_eth_dev * dev)11803e5a335dSHemant Agrawal dpaa2_dev_start(struct rte_eth_dev *dev)
11813e5a335dSHemant Agrawal {
1182c5acbb5eSHemant Agrawal struct rte_device *rdev = dev->device;
1183c5acbb5eSHemant Agrawal struct rte_dpaa2_device *dpaa2_dev;
11843e5a335dSHemant Agrawal struct rte_eth_dev_data *data = dev->data;
11853e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = data->dev_private;
118681c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
11873e5a335dSHemant Agrawal struct dpni_queue cfg;
1188ef18dafeSHemant Agrawal struct dpni_error_cfg err_cfg;
11893e5a335dSHemant Agrawal struct dpni_queue_id qid;
11903e5a335dSHemant Agrawal struct dpaa2_queue *dpaa2_q;
11913e5a335dSHemant Agrawal int ret, i;
1192c5acbb5eSHemant Agrawal struct rte_intr_handle *intr_handle;
1193c5acbb5eSHemant Agrawal
1194c5acbb5eSHemant Agrawal dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1195d61138d4SHarman Kalra intr_handle = dpaa2_dev->intr_handle;
11963e5a335dSHemant Agrawal
11973e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE();
11983e5a335dSHemant Agrawal ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
11993e5a335dSHemant Agrawal if (ret) {
1200a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
1201a10a988aSShreyansh Jain priv->hw_id, ret);
12023e5a335dSHemant Agrawal return ret;
12033e5a335dSHemant Agrawal }
12043e5a335dSHemant Agrawal
1205aa8c595aSHemant Agrawal /* Power up the phy. Needed to make the link go UP */
1206a1f3a12cSHemant Agrawal dpaa2_dev_set_link_up(dev);
1207a1f3a12cSHemant Agrawal
12083e5a335dSHemant Agrawal for (i = 0; i < data->nb_rx_queues; i++) {
12093e5a335dSHemant Agrawal dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
12103e5a335dSHemant Agrawal ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
12113e5a335dSHemant Agrawal DPNI_QUEUE_RX, dpaa2_q->tc_index,
12123e5a335dSHemant Agrawal dpaa2_q->flow_id, &cfg, &qid);
12133e5a335dSHemant Agrawal if (ret) {
1214a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in getting flow information: "
1215a10a988aSShreyansh Jain "err=%d", ret);
12163e5a335dSHemant Agrawal return ret;
12173e5a335dSHemant Agrawal }
12183e5a335dSHemant Agrawal dpaa2_q->fqid = qid.fqid;
12193e5a335dSHemant Agrawal }
12203e5a335dSHemant Agrawal
12214690a611SNipun Gupta if (dpaa2_enable_err_queue) {
12224690a611SNipun Gupta ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
12234690a611SNipun Gupta DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid);
12244690a611SNipun Gupta if (ret) {
12254690a611SNipun Gupta DPAA2_PMD_ERR("Error getting rx err flow information: err=%d",
12264690a611SNipun Gupta ret);
12274690a611SNipun Gupta return ret;
12284690a611SNipun Gupta }
12294690a611SNipun Gupta dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
12304690a611SNipun Gupta dpaa2_q->fqid = qid.fqid;
12314690a611SNipun Gupta dpaa2_q->eth_data = dev->data;
12324690a611SNipun Gupta
12334690a611SNipun Gupta err_cfg.errors = DPNI_ERROR_DISC;
12344690a611SNipun Gupta err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
12354690a611SNipun Gupta } else {
12364690a611SNipun Gupta /* checksum errors, send them to normal path
12374690a611SNipun Gupta * and set it in annotation
12384690a611SNipun Gupta */
1239ef18dafeSHemant Agrawal err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
12404690a611SNipun Gupta
12414690a611SNipun Gupta /* if packet with parse error are not to be dropped */
124234356a5dSShreyansh Jain err_cfg.errors |= DPNI_ERROR_PHE;
1243ef18dafeSHemant Agrawal
1244ef18dafeSHemant Agrawal err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
12454690a611SNipun Gupta }
1246ef18dafeSHemant Agrawal err_cfg.set_frame_annotation = true;
1247ef18dafeSHemant Agrawal
1248ef18dafeSHemant Agrawal ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
1249ef18dafeSHemant Agrawal priv->token, &err_cfg);
1250ef18dafeSHemant Agrawal if (ret) {
1251a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
1252a10a988aSShreyansh Jain ret);
1253ef18dafeSHemant Agrawal return ret;
1254ef18dafeSHemant Agrawal }
1255ef18dafeSHemant Agrawal
1256c5acbb5eSHemant Agrawal /* if the interrupts were configured on this devices*/
1257d61138d4SHarman Kalra if (intr_handle && rte_intr_fd_get(intr_handle) &&
1258d61138d4SHarman Kalra dev->data->dev_conf.intr_conf.lsc != 0) {
1259c5acbb5eSHemant Agrawal /* Registering LSC interrupt handler */
1260c5acbb5eSHemant Agrawal rte_intr_callback_register(intr_handle,
1261c5acbb5eSHemant Agrawal dpaa2_interrupt_handler,
1262c5acbb5eSHemant Agrawal (void *)dev);
1263c5acbb5eSHemant Agrawal
1264c5acbb5eSHemant Agrawal /* enable vfio intr/eventfd mapping
1265c5acbb5eSHemant Agrawal * Interrupt index 0 is required, so we can not use
1266c5acbb5eSHemant Agrawal * rte_intr_enable.
1267c5acbb5eSHemant Agrawal */
1268c5acbb5eSHemant Agrawal rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
1269c5acbb5eSHemant Agrawal
1270c5acbb5eSHemant Agrawal /* enable dpni_irqs */
1271c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(dev, 1);
1272c5acbb5eSHemant Agrawal }
1273c5acbb5eSHemant Agrawal
127416c4a3c4SNipun Gupta /* Change the tx burst function if ordered queues are used */
127516c4a3c4SNipun Gupta if (priv->en_ordered)
127616c4a3c4SNipun Gupta dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
127716c4a3c4SNipun Gupta
12783e5a335dSHemant Agrawal return 0;
12793e5a335dSHemant Agrawal }
12803e5a335dSHemant Agrawal
12813e5a335dSHemant Agrawal /**
12823e5a335dSHemant Agrawal * This routine disables all traffic on the adapter by issuing a
12833e5a335dSHemant Agrawal * global reset on the MAC.
12843e5a335dSHemant Agrawal */
128562024eb8SIvan Ilchenko static int
dpaa2_dev_stop(struct rte_eth_dev * dev)12863e5a335dSHemant Agrawal dpaa2_dev_stop(struct rte_eth_dev *dev)
12873e5a335dSHemant Agrawal {
12883e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
128981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
12903e5a335dSHemant Agrawal int ret;
1291c56c86ffSHemant Agrawal struct rte_eth_link link;
1292d192fd32SVanshika Shukla struct rte_device *rdev = dev->device;
1293d192fd32SVanshika Shukla struct rte_intr_handle *intr_handle;
1294d192fd32SVanshika Shukla struct rte_dpaa2_device *dpaa2_dev;
1295d192fd32SVanshika Shukla
1296d192fd32SVanshika Shukla dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1297d192fd32SVanshika Shukla intr_handle = dpaa2_dev->intr_handle;
12983e5a335dSHemant Agrawal
12993e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE();
13003e5a335dSHemant Agrawal
1301c5acbb5eSHemant Agrawal /* reset interrupt callback */
1302d61138d4SHarman Kalra if (intr_handle && rte_intr_fd_get(intr_handle) &&
1303d61138d4SHarman Kalra dev->data->dev_conf.intr_conf.lsc != 0) {
1304c5acbb5eSHemant Agrawal /*disable dpni irqs */
1305c5acbb5eSHemant Agrawal dpaa2_eth_setup_irqs(dev, 0);
1306c5acbb5eSHemant Agrawal
1307c5acbb5eSHemant Agrawal /* disable vfio intr before callback unregister */
1308c5acbb5eSHemant Agrawal rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1309c5acbb5eSHemant Agrawal
1310c5acbb5eSHemant Agrawal /* Unregistering LSC interrupt handler */
1311c5acbb5eSHemant Agrawal rte_intr_callback_unregister(intr_handle,
1312c5acbb5eSHemant Agrawal dpaa2_interrupt_handler,
1313c5acbb5eSHemant Agrawal (void *)dev);
1314c5acbb5eSHemant Agrawal }
1315c5acbb5eSHemant Agrawal
1316a1f3a12cSHemant Agrawal dpaa2_dev_set_link_down(dev);
1317a1f3a12cSHemant Agrawal
13183e5a335dSHemant Agrawal ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
13193e5a335dSHemant Agrawal if (ret) {
1320a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
13213e5a335dSHemant Agrawal ret, priv->hw_id);
132262024eb8SIvan Ilchenko return ret;
13233e5a335dSHemant Agrawal }
1324c56c86ffSHemant Agrawal
1325c56c86ffSHemant Agrawal /* clear the recorded link status */
1326c56c86ffSHemant Agrawal memset(&link, 0, sizeof(link));
13277e2eb5f0SStephen Hemminger rte_eth_linkstatus_set(dev, &link);
132862024eb8SIvan Ilchenko
132962024eb8SIvan Ilchenko return 0;
13303e5a335dSHemant Agrawal }
13313e5a335dSHemant Agrawal
1332b142387bSThomas Monjalon static int
dpaa2_dev_close(struct rte_eth_dev * dev)13333e5a335dSHemant Agrawal dpaa2_dev_close(struct rte_eth_dev *dev)
13343e5a335dSHemant Agrawal {
13353e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
133681c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
13375964d36aSSachin Saxena int i, ret;
1338a1f3a12cSHemant Agrawal struct rte_eth_link link;
13393e5a335dSHemant Agrawal
13403e5a335dSHemant Agrawal PMD_INIT_FUNC_TRACE();
13413e5a335dSHemant Agrawal
13425964d36aSSachin Saxena if (rte_eal_process_type() != RTE_PROC_PRIMARY)
13435964d36aSSachin Saxena return 0;
13446a556bd6SHemant Agrawal
13455964d36aSSachin Saxena if (!dpni) {
13465964d36aSSachin Saxena DPAA2_PMD_WARN("Already closed or not started");
13475964d36aSSachin Saxena return -1;
13485964d36aSSachin Saxena }
13495964d36aSSachin Saxena
1350ac624068SGagandeep Singh dpaa2_tm_deinit(dev);
13515964d36aSSachin Saxena dpaa2_flow_clean(dev);
13523e5a335dSHemant Agrawal /* Clean the device first */
13533e5a335dSHemant Agrawal ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
13543e5a335dSHemant Agrawal if (ret) {
1355a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1356b142387bSThomas Monjalon return -1;
13573e5a335dSHemant Agrawal }
1358a1f3a12cSHemant Agrawal
1359a1f3a12cSHemant Agrawal memset(&link, 0, sizeof(link));
13607e2eb5f0SStephen Hemminger rte_eth_linkstatus_set(dev, &link);
1361b142387bSThomas Monjalon
13625964d36aSSachin Saxena /* Free private queues memory */
13635964d36aSSachin Saxena dpaa2_free_rx_tx_queues(dev);
13645964d36aSSachin Saxena /* Close the device at underlying layer*/
13655964d36aSSachin Saxena ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
13665964d36aSSachin Saxena if (ret) {
13675964d36aSSachin Saxena DPAA2_PMD_ERR("Failure closing dpni device with err code %d",
13685964d36aSSachin Saxena ret);
13695964d36aSSachin Saxena }
13705964d36aSSachin Saxena
13715964d36aSSachin Saxena /* Free the allocated memory for ethernet private data and dpni*/
13725964d36aSSachin Saxena priv->hw = NULL;
13735964d36aSSachin Saxena dev->process_private = NULL;
13745964d36aSSachin Saxena rte_free(dpni);
13755964d36aSSachin Saxena
13765964d36aSSachin Saxena for (i = 0; i < MAX_TCS; i++)
13775964d36aSSachin Saxena rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
13785964d36aSSachin Saxena
13795964d36aSSachin Saxena if (priv->extract.qos_extract_param)
13805964d36aSSachin Saxena rte_free((void *)(size_t)priv->extract.qos_extract_param);
13815964d36aSSachin Saxena
13825964d36aSSachin Saxena DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name);
1383b142387bSThomas Monjalon return 0;
13843e5a335dSHemant Agrawal }
13853e5a335dSHemant Agrawal
13869039c812SAndrew Rybchenko static int
dpaa2_dev_promiscuous_enable(struct rte_eth_dev * dev)1387c0e5c69aSHemant Agrawal dpaa2_dev_promiscuous_enable(
1388c0e5c69aSHemant Agrawal struct rte_eth_dev *dev)
1389c0e5c69aSHemant Agrawal {
1390c0e5c69aSHemant Agrawal int ret;
1391c0e5c69aSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
139281c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1393c0e5c69aSHemant Agrawal
1394c0e5c69aSHemant Agrawal PMD_INIT_FUNC_TRACE();
1395c0e5c69aSHemant Agrawal
1396c0e5c69aSHemant Agrawal if (dpni == NULL) {
1397a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
13989039c812SAndrew Rybchenko return -ENODEV;
1399c0e5c69aSHemant Agrawal }
1400c0e5c69aSHemant Agrawal
1401c0e5c69aSHemant Agrawal ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1402c0e5c69aSHemant Agrawal if (ret < 0)
1403a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
14045d5aeeedSHemant Agrawal
14055d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
14065d5aeeedSHemant Agrawal if (ret < 0)
1407a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
14089039c812SAndrew Rybchenko
14099039c812SAndrew Rybchenko return ret;
1410c0e5c69aSHemant Agrawal }
1411c0e5c69aSHemant Agrawal
14129039c812SAndrew Rybchenko static int
dpaa2_dev_promiscuous_disable(struct rte_eth_dev * dev)1413c0e5c69aSHemant Agrawal dpaa2_dev_promiscuous_disable(
1414c0e5c69aSHemant Agrawal struct rte_eth_dev *dev)
1415c0e5c69aSHemant Agrawal {
1416c0e5c69aSHemant Agrawal int ret;
1417c0e5c69aSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
141881c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1419c0e5c69aSHemant Agrawal
1420c0e5c69aSHemant Agrawal PMD_INIT_FUNC_TRACE();
1421c0e5c69aSHemant Agrawal
1422c0e5c69aSHemant Agrawal if (dpni == NULL) {
1423a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
14249039c812SAndrew Rybchenko return -ENODEV;
1425c0e5c69aSHemant Agrawal }
1426c0e5c69aSHemant Agrawal
1427c0e5c69aSHemant Agrawal ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1428c0e5c69aSHemant Agrawal if (ret < 0)
1429a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
14305d5aeeedSHemant Agrawal
14315d5aeeedSHemant Agrawal if (dev->data->all_multicast == 0) {
14325d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
14335d5aeeedSHemant Agrawal priv->token, false);
14345d5aeeedSHemant Agrawal if (ret < 0)
1435a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
14365d5aeeedSHemant Agrawal ret);
14375d5aeeedSHemant Agrawal }
14389039c812SAndrew Rybchenko
14399039c812SAndrew Rybchenko return ret;
14405d5aeeedSHemant Agrawal }
14415d5aeeedSHemant Agrawal
1442ca041cd4SIvan Ilchenko static int
dpaa2_dev_allmulticast_enable(struct rte_eth_dev * dev)14435d5aeeedSHemant Agrawal dpaa2_dev_allmulticast_enable(
14445d5aeeedSHemant Agrawal struct rte_eth_dev *dev)
14455d5aeeedSHemant Agrawal {
14465d5aeeedSHemant Agrawal int ret;
14475d5aeeedSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
144881c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
14495d5aeeedSHemant Agrawal
14505d5aeeedSHemant Agrawal PMD_INIT_FUNC_TRACE();
14515d5aeeedSHemant Agrawal
14525d5aeeedSHemant Agrawal if (dpni == NULL) {
1453a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1454ca041cd4SIvan Ilchenko return -ENODEV;
14555d5aeeedSHemant Agrawal }
14565d5aeeedSHemant Agrawal
14575d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
14585d5aeeedSHemant Agrawal if (ret < 0)
1459a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1460ca041cd4SIvan Ilchenko
1461ca041cd4SIvan Ilchenko return ret;
14625d5aeeedSHemant Agrawal }
14635d5aeeedSHemant Agrawal
1464ca041cd4SIvan Ilchenko static int
dpaa2_dev_allmulticast_disable(struct rte_eth_dev * dev)14655d5aeeedSHemant Agrawal dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
14665d5aeeedSHemant Agrawal {
14675d5aeeedSHemant Agrawal int ret;
14685d5aeeedSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
146981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
14705d5aeeedSHemant Agrawal
14715d5aeeedSHemant Agrawal PMD_INIT_FUNC_TRACE();
14725d5aeeedSHemant Agrawal
14735d5aeeedSHemant Agrawal if (dpni == NULL) {
1474a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1475ca041cd4SIvan Ilchenko return -ENODEV;
14765d5aeeedSHemant Agrawal }
14775d5aeeedSHemant Agrawal
14785d5aeeedSHemant Agrawal /* must remain on for all promiscuous */
14795d5aeeedSHemant Agrawal if (dev->data->promiscuous == 1)
1480ca041cd4SIvan Ilchenko return 0;
14815d5aeeedSHemant Agrawal
14825d5aeeedSHemant Agrawal ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
14835d5aeeedSHemant Agrawal if (ret < 0)
1484a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1485ca041cd4SIvan Ilchenko
1486ca041cd4SIvan Ilchenko return ret;
1487c0e5c69aSHemant Agrawal }
1488e31d4d21SHemant Agrawal
1489e31d4d21SHemant Agrawal static int
dpaa2_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)1490e31d4d21SHemant Agrawal dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1491e31d4d21SHemant Agrawal {
1492e31d4d21SHemant Agrawal int ret;
1493e31d4d21SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
149481c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
149535b2d13fSOlivier Matz uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
149644ea7355SAshish Jain + VLAN_TAG_SIZE;
1497e31d4d21SHemant Agrawal
1498e31d4d21SHemant Agrawal PMD_INIT_FUNC_TRACE();
1499e31d4d21SHemant Agrawal
1500e31d4d21SHemant Agrawal if (dpni == NULL) {
1501a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1502e31d4d21SHemant Agrawal return -EINVAL;
1503e31d4d21SHemant Agrawal }
1504e31d4d21SHemant Agrawal
1505e31d4d21SHemant Agrawal /* Set the Max Rx frame length as 'mtu' +
1506e31d4d21SHemant Agrawal * Maximum Ethernet header length
1507e31d4d21SHemant Agrawal */
1508e31d4d21SHemant Agrawal ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
15096f8be0fbSHemant Agrawal frame_size - RTE_ETHER_CRC_LEN);
1510e31d4d21SHemant Agrawal if (ret) {
1511a10a988aSShreyansh Jain DPAA2_PMD_ERR("Setting the max frame length failed");
1512e31d4d21SHemant Agrawal return -1;
1513e31d4d21SHemant Agrawal }
1514a10a988aSShreyansh Jain DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1515e31d4d21SHemant Agrawal return 0;
1516e31d4d21SHemant Agrawal }
1517e31d4d21SHemant Agrawal
1518b4d97b7dSHemant Agrawal static int
dpaa2_dev_add_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr,__rte_unused uint32_t index,__rte_unused uint32_t pool)1519b4d97b7dSHemant Agrawal dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
15206d13ea8eSOlivier Matz struct rte_ether_addr *addr,
1521b4d97b7dSHemant Agrawal __rte_unused uint32_t index,
1522b4d97b7dSHemant Agrawal __rte_unused uint32_t pool)
1523b4d97b7dSHemant Agrawal {
1524b4d97b7dSHemant Agrawal int ret;
1525b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
152681c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1527b4d97b7dSHemant Agrawal
1528b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE();
1529b4d97b7dSHemant Agrawal
1530b4d97b7dSHemant Agrawal if (dpni == NULL) {
1531a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1532b4d97b7dSHemant Agrawal return -1;
1533b4d97b7dSHemant Agrawal }
1534b4d97b7dSHemant Agrawal
153596f7bfe8SSachin Saxena ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
153696f7bfe8SSachin Saxena addr->addr_bytes, 0, 0, 0);
1537b4d97b7dSHemant Agrawal if (ret)
1538a10a988aSShreyansh Jain DPAA2_PMD_ERR(
1539a10a988aSShreyansh Jain "error: Adding the MAC ADDR failed: err = %d", ret);
1540b4d97b7dSHemant Agrawal return 0;
1541b4d97b7dSHemant Agrawal }
1542b4d97b7dSHemant Agrawal
1543b4d97b7dSHemant Agrawal static void
dpaa2_dev_remove_mac_addr(struct rte_eth_dev * dev,uint32_t index)1544b4d97b7dSHemant Agrawal dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1545b4d97b7dSHemant Agrawal uint32_t index)
1546b4d97b7dSHemant Agrawal {
1547b4d97b7dSHemant Agrawal int ret;
1548b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
154981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1550b4d97b7dSHemant Agrawal struct rte_eth_dev_data *data = dev->data;
15516d13ea8eSOlivier Matz struct rte_ether_addr *macaddr;
1552b4d97b7dSHemant Agrawal
1553b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE();
1554b4d97b7dSHemant Agrawal
1555b4d97b7dSHemant Agrawal macaddr = &data->mac_addrs[index];
1556b4d97b7dSHemant Agrawal
1557b4d97b7dSHemant Agrawal if (dpni == NULL) {
1558a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1559b4d97b7dSHemant Agrawal return;
1560b4d97b7dSHemant Agrawal }
1561b4d97b7dSHemant Agrawal
1562b4d97b7dSHemant Agrawal ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1563b4d97b7dSHemant Agrawal priv->token, macaddr->addr_bytes);
1564b4d97b7dSHemant Agrawal if (ret)
1565a10a988aSShreyansh Jain DPAA2_PMD_ERR(
1566a10a988aSShreyansh Jain "error: Removing the MAC ADDR failed: err = %d", ret);
1567b4d97b7dSHemant Agrawal }
1568b4d97b7dSHemant Agrawal
1569caccf8b3SOlivier Matz static int
dpaa2_dev_set_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr)1570b4d97b7dSHemant Agrawal dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
15716d13ea8eSOlivier Matz struct rte_ether_addr *addr)
1572b4d97b7dSHemant Agrawal {
1573b4d97b7dSHemant Agrawal int ret;
1574b4d97b7dSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
157581c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1576b4d97b7dSHemant Agrawal
1577b4d97b7dSHemant Agrawal PMD_INIT_FUNC_TRACE();
1578b4d97b7dSHemant Agrawal
1579b4d97b7dSHemant Agrawal if (dpni == NULL) {
1580a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1581caccf8b3SOlivier Matz return -EINVAL;
1582b4d97b7dSHemant Agrawal }
1583b4d97b7dSHemant Agrawal
1584b4d97b7dSHemant Agrawal ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1585b4d97b7dSHemant Agrawal priv->token, addr->addr_bytes);
1586b4d97b7dSHemant Agrawal
1587b4d97b7dSHemant Agrawal if (ret)
1588a10a988aSShreyansh Jain DPAA2_PMD_ERR(
1589a10a988aSShreyansh Jain "error: Setting the MAC ADDR failed %d", ret);
1590caccf8b3SOlivier Matz
1591caccf8b3SOlivier Matz return ret;
1592b4d97b7dSHemant Agrawal }
1593a10a988aSShreyansh Jain
1594b0aa5459SHemant Agrawal static
dpaa2_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)1595d5b0924bSMatan Azrad int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1596b0aa5459SHemant Agrawal struct rte_eth_stats *stats)
1597b0aa5459SHemant Agrawal {
1598b0aa5459SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
159981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1600b0aa5459SHemant Agrawal int32_t retcode;
1601b0aa5459SHemant Agrawal uint8_t page0 = 0, page1 = 1, page2 = 2;
1602b0aa5459SHemant Agrawal union dpni_statistics value;
1603e43f2521SShreyansh Jain int i;
1604e43f2521SShreyansh Jain struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1605b0aa5459SHemant Agrawal
1606b0aa5459SHemant Agrawal memset(&value, 0, sizeof(union dpni_statistics));
1607b0aa5459SHemant Agrawal
1608b0aa5459SHemant Agrawal PMD_INIT_FUNC_TRACE();
1609b0aa5459SHemant Agrawal
1610b0aa5459SHemant Agrawal if (!dpni) {
1611a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1612d5b0924bSMatan Azrad return -EINVAL;
1613b0aa5459SHemant Agrawal }
1614b0aa5459SHemant Agrawal
1615b0aa5459SHemant Agrawal if (!stats) {
1616a10a988aSShreyansh Jain DPAA2_PMD_ERR("stats is NULL");
1617d5b0924bSMatan Azrad return -EINVAL;
1618b0aa5459SHemant Agrawal }
1619b0aa5459SHemant Agrawal
1620b0aa5459SHemant Agrawal /*Get Counters from page_0*/
1621b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
162216bbc98aSShreyansh Jain page0, 0, &value);
1623b0aa5459SHemant Agrawal if (retcode)
1624b0aa5459SHemant Agrawal goto err;
1625b0aa5459SHemant Agrawal
1626b0aa5459SHemant Agrawal stats->ipackets = value.page_0.ingress_all_frames;
1627b0aa5459SHemant Agrawal stats->ibytes = value.page_0.ingress_all_bytes;
1628b0aa5459SHemant Agrawal
1629b0aa5459SHemant Agrawal /*Get Counters from page_1*/
1630b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
163116bbc98aSShreyansh Jain page1, 0, &value);
1632b0aa5459SHemant Agrawal if (retcode)
1633b0aa5459SHemant Agrawal goto err;
1634b0aa5459SHemant Agrawal
1635b0aa5459SHemant Agrawal stats->opackets = value.page_1.egress_all_frames;
1636b0aa5459SHemant Agrawal stats->obytes = value.page_1.egress_all_bytes;
1637b0aa5459SHemant Agrawal
1638b0aa5459SHemant Agrawal /*Get Counters from page_2*/
1639b0aa5459SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
164016bbc98aSShreyansh Jain page2, 0, &value);
1641b0aa5459SHemant Agrawal if (retcode)
1642b0aa5459SHemant Agrawal goto err;
1643b0aa5459SHemant Agrawal
1644b4d97b7dSHemant Agrawal /* Ingress drop frame count due to configured rules */
1645b4d97b7dSHemant Agrawal stats->ierrors = value.page_2.ingress_filtered_frames;
1646b4d97b7dSHemant Agrawal /* Ingress drop frame count due to error */
1647b4d97b7dSHemant Agrawal stats->ierrors += value.page_2.ingress_discarded_frames;
1648b4d97b7dSHemant Agrawal
1649b0aa5459SHemant Agrawal stats->oerrors = value.page_2.egress_discarded_frames;
1650b0aa5459SHemant Agrawal stats->imissed = value.page_2.ingress_nobuffer_discards;
1651b0aa5459SHemant Agrawal
1652e43f2521SShreyansh Jain /* Fill in per queue stats */
1653e43f2521SShreyansh Jain for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1654e43f2521SShreyansh Jain (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1655e43f2521SShreyansh Jain dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1656e43f2521SShreyansh Jain dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1657e43f2521SShreyansh Jain if (dpaa2_rxq)
1658e43f2521SShreyansh Jain stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1659e43f2521SShreyansh Jain if (dpaa2_txq)
1660e43f2521SShreyansh Jain stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1661e43f2521SShreyansh Jain
1662e43f2521SShreyansh Jain /* Byte counting is not implemented */
1663e43f2521SShreyansh Jain stats->q_ibytes[i] = 0;
1664e43f2521SShreyansh Jain stats->q_obytes[i] = 0;
1665e43f2521SShreyansh Jain }
1666e43f2521SShreyansh Jain
1667d5b0924bSMatan Azrad return 0;
1668b0aa5459SHemant Agrawal
1669b0aa5459SHemant Agrawal err:
1670a10a988aSShreyansh Jain DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1671d5b0924bSMatan Azrad return retcode;
1672b0aa5459SHemant Agrawal };
1673b0aa5459SHemant Agrawal
16741d6329b2SHemant Agrawal static int
dpaa2_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)16751d6329b2SHemant Agrawal dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
16761d6329b2SHemant Agrawal unsigned int n)
16771d6329b2SHemant Agrawal {
16781d6329b2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
167981c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
16801d6329b2SHemant Agrawal int32_t retcode;
1681c720c5f6SHemant Agrawal union dpni_statistics value[5] = {};
16821d6329b2SHemant Agrawal unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
16831d6329b2SHemant Agrawal
16841d6329b2SHemant Agrawal if (n < num)
16851d6329b2SHemant Agrawal return num;
16861d6329b2SHemant Agrawal
1687876b2c90SHemant Agrawal if (xstats == NULL)
1688876b2c90SHemant Agrawal return 0;
1689876b2c90SHemant Agrawal
16901d6329b2SHemant Agrawal /* Get Counters from page_0*/
16911d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
16921d6329b2SHemant Agrawal 0, 0, &value[0]);
16931d6329b2SHemant Agrawal if (retcode)
16941d6329b2SHemant Agrawal goto err;
16951d6329b2SHemant Agrawal
16961d6329b2SHemant Agrawal /* Get Counters from page_1*/
16971d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
16981d6329b2SHemant Agrawal 1, 0, &value[1]);
16991d6329b2SHemant Agrawal if (retcode)
17001d6329b2SHemant Agrawal goto err;
17011d6329b2SHemant Agrawal
17021d6329b2SHemant Agrawal /* Get Counters from page_2*/
17031d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
17041d6329b2SHemant Agrawal 2, 0, &value[2]);
17051d6329b2SHemant Agrawal if (retcode)
17061d6329b2SHemant Agrawal goto err;
17071d6329b2SHemant Agrawal
1708c720c5f6SHemant Agrawal for (i = 0; i < priv->max_cgs; i++) {
1709c720c5f6SHemant Agrawal if (!priv->cgid_in_use[i]) {
1710c720c5f6SHemant Agrawal /* Get Counters from page_4*/
1711c720c5f6SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1712c720c5f6SHemant Agrawal priv->token,
1713c720c5f6SHemant Agrawal 4, 0, &value[4]);
1714c720c5f6SHemant Agrawal if (retcode)
1715c720c5f6SHemant Agrawal goto err;
1716c720c5f6SHemant Agrawal break;
1717c720c5f6SHemant Agrawal }
1718c720c5f6SHemant Agrawal }
1719c720c5f6SHemant Agrawal
17201d6329b2SHemant Agrawal for (i = 0; i < num; i++) {
17211d6329b2SHemant Agrawal xstats[i].id = i;
17221d6329b2SHemant Agrawal xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
17231d6329b2SHemant Agrawal raw.counter[dpaa2_xstats_strings[i].stats_id];
17241d6329b2SHemant Agrawal }
17251d6329b2SHemant Agrawal return i;
17261d6329b2SHemant Agrawal err:
1727a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
17281d6329b2SHemant Agrawal return retcode;
17291d6329b2SHemant Agrawal }
17301d6329b2SHemant Agrawal
17311d6329b2SHemant Agrawal static int
dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned int limit)17321d6329b2SHemant Agrawal dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
17331d6329b2SHemant Agrawal struct rte_eth_xstat_name *xstats_names,
1734876b2c90SHemant Agrawal unsigned int limit)
17351d6329b2SHemant Agrawal {
17361d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
17371d6329b2SHemant Agrawal
1738876b2c90SHemant Agrawal if (limit < stat_cnt)
1739876b2c90SHemant Agrawal return stat_cnt;
1740876b2c90SHemant Agrawal
17411d6329b2SHemant Agrawal if (xstats_names != NULL)
17421d6329b2SHemant Agrawal for (i = 0; i < stat_cnt; i++)
1743f9acaf84SBruce Richardson strlcpy(xstats_names[i].name,
1744f9acaf84SBruce Richardson dpaa2_xstats_strings[i].name,
1745f9acaf84SBruce Richardson sizeof(xstats_names[i].name));
17461d6329b2SHemant Agrawal
17471d6329b2SHemant Agrawal return stat_cnt;
17481d6329b2SHemant Agrawal }
17491d6329b2SHemant Agrawal
17501d6329b2SHemant Agrawal static int
dpaa2_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int n)17511d6329b2SHemant Agrawal dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
17521d6329b2SHemant Agrawal uint64_t *values, unsigned int n)
17531d6329b2SHemant Agrawal {
17541d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
17551d6329b2SHemant Agrawal uint64_t values_copy[stat_cnt];
17561d6329b2SHemant Agrawal
17571d6329b2SHemant Agrawal if (!ids) {
17581d6329b2SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
175981c42c84SShreyansh Jain struct fsl_mc_io *dpni =
176081c42c84SShreyansh Jain (struct fsl_mc_io *)dev->process_private;
17611d6329b2SHemant Agrawal int32_t retcode;
1762c720c5f6SHemant Agrawal union dpni_statistics value[5] = {};
17631d6329b2SHemant Agrawal
17641d6329b2SHemant Agrawal if (n < stat_cnt)
17651d6329b2SHemant Agrawal return stat_cnt;
17661d6329b2SHemant Agrawal
17671d6329b2SHemant Agrawal if (!values)
17681d6329b2SHemant Agrawal return 0;
17691d6329b2SHemant Agrawal
17701d6329b2SHemant Agrawal /* Get Counters from page_0*/
17711d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
17721d6329b2SHemant Agrawal 0, 0, &value[0]);
17731d6329b2SHemant Agrawal if (retcode)
17741d6329b2SHemant Agrawal return 0;
17751d6329b2SHemant Agrawal
17761d6329b2SHemant Agrawal /* Get Counters from page_1*/
17771d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
17781d6329b2SHemant Agrawal 1, 0, &value[1]);
17791d6329b2SHemant Agrawal if (retcode)
17801d6329b2SHemant Agrawal return 0;
17811d6329b2SHemant Agrawal
17821d6329b2SHemant Agrawal /* Get Counters from page_2*/
17831d6329b2SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
17841d6329b2SHemant Agrawal 2, 0, &value[2]);
17851d6329b2SHemant Agrawal if (retcode)
17861d6329b2SHemant Agrawal return 0;
17871d6329b2SHemant Agrawal
1788c720c5f6SHemant Agrawal /* Get Counters from page_4*/
1789c720c5f6SHemant Agrawal retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1790c720c5f6SHemant Agrawal 4, 0, &value[4]);
1791c720c5f6SHemant Agrawal if (retcode)
1792c720c5f6SHemant Agrawal return 0;
1793c720c5f6SHemant Agrawal
17941d6329b2SHemant Agrawal for (i = 0; i < stat_cnt; i++) {
17951d6329b2SHemant Agrawal values[i] = value[dpaa2_xstats_strings[i].page_id].
17961d6329b2SHemant Agrawal raw.counter[dpaa2_xstats_strings[i].stats_id];
17971d6329b2SHemant Agrawal }
17981d6329b2SHemant Agrawal return stat_cnt;
17991d6329b2SHemant Agrawal }
18001d6329b2SHemant Agrawal
18011d6329b2SHemant Agrawal dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
18021d6329b2SHemant Agrawal
18031d6329b2SHemant Agrawal for (i = 0; i < n; i++) {
18041d6329b2SHemant Agrawal if (ids[i] >= stat_cnt) {
1805a10a988aSShreyansh Jain DPAA2_PMD_ERR("xstats id value isn't valid");
18061d6329b2SHemant Agrawal return -1;
18071d6329b2SHemant Agrawal }
18081d6329b2SHemant Agrawal values[i] = values_copy[ids[i]];
18091d6329b2SHemant Agrawal }
18101d6329b2SHemant Agrawal return n;
18111d6329b2SHemant Agrawal }
18121d6329b2SHemant Agrawal
18131d6329b2SHemant Agrawal static int
dpaa2_xstats_get_names_by_id(struct rte_eth_dev * dev,const uint64_t * ids,struct rte_eth_xstat_name * xstats_names,unsigned int limit)18141d6329b2SHemant Agrawal dpaa2_xstats_get_names_by_id(
18151d6329b2SHemant Agrawal struct rte_eth_dev *dev,
18161d6329b2SHemant Agrawal const uint64_t *ids,
18178c9f976fSAndrew Rybchenko struct rte_eth_xstat_name *xstats_names,
18181d6329b2SHemant Agrawal unsigned int limit)
18191d6329b2SHemant Agrawal {
18201d6329b2SHemant Agrawal unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
18211d6329b2SHemant Agrawal struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
18221d6329b2SHemant Agrawal
18231d6329b2SHemant Agrawal if (!ids)
18241d6329b2SHemant Agrawal return dpaa2_xstats_get_names(dev, xstats_names, limit);
18251d6329b2SHemant Agrawal
18261d6329b2SHemant Agrawal dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
18271d6329b2SHemant Agrawal
18281d6329b2SHemant Agrawal for (i = 0; i < limit; i++) {
18291d6329b2SHemant Agrawal if (ids[i] >= stat_cnt) {
1830a10a988aSShreyansh Jain DPAA2_PMD_ERR("xstats id value isn't valid");
18311d6329b2SHemant Agrawal return -1;
18321d6329b2SHemant Agrawal }
18331d6329b2SHemant Agrawal strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
18341d6329b2SHemant Agrawal }
18351d6329b2SHemant Agrawal return limit;
18361d6329b2SHemant Agrawal }
18371d6329b2SHemant Agrawal
18389970a9adSIgor Romanov static int
dpaa2_dev_stats_reset(struct rte_eth_dev * dev)18391d6329b2SHemant Agrawal dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1840b0aa5459SHemant Agrawal {
1841b0aa5459SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
184281c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
18439970a9adSIgor Romanov int retcode;
1844e43f2521SShreyansh Jain int i;
1845e43f2521SShreyansh Jain struct dpaa2_queue *dpaa2_q;
1846b0aa5459SHemant Agrawal
1847b0aa5459SHemant Agrawal PMD_INIT_FUNC_TRACE();
1848b0aa5459SHemant Agrawal
1849b0aa5459SHemant Agrawal if (dpni == NULL) {
1850a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
18519970a9adSIgor Romanov return -EINVAL;
1852b0aa5459SHemant Agrawal }
1853b0aa5459SHemant Agrawal
1854b0aa5459SHemant Agrawal retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1855b0aa5459SHemant Agrawal if (retcode)
1856b0aa5459SHemant Agrawal goto error;
1857b0aa5459SHemant Agrawal
1858e43f2521SShreyansh Jain /* Reset the per queue stats in dpaa2_queue structure */
1859e43f2521SShreyansh Jain for (i = 0; i < priv->nb_rx_queues; i++) {
1860e43f2521SShreyansh Jain dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1861e43f2521SShreyansh Jain if (dpaa2_q)
1862e43f2521SShreyansh Jain dpaa2_q->rx_pkts = 0;
1863e43f2521SShreyansh Jain }
1864e43f2521SShreyansh Jain
1865e43f2521SShreyansh Jain for (i = 0; i < priv->nb_tx_queues; i++) {
1866e43f2521SShreyansh Jain dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1867e43f2521SShreyansh Jain if (dpaa2_q)
1868e43f2521SShreyansh Jain dpaa2_q->tx_pkts = 0;
1869e43f2521SShreyansh Jain }
1870e43f2521SShreyansh Jain
18719970a9adSIgor Romanov return 0;
1872b0aa5459SHemant Agrawal
1873b0aa5459SHemant Agrawal error:
1874a10a988aSShreyansh Jain DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
18759970a9adSIgor Romanov return retcode;
1876b0aa5459SHemant Agrawal };
1877b0aa5459SHemant Agrawal
1878c56c86ffSHemant Agrawal /* return 0 means link status changed, -1 means not changed */
1879c56c86ffSHemant Agrawal static int
dpaa2_dev_link_update(struct rte_eth_dev * dev,int wait_to_complete)1880c56c86ffSHemant Agrawal dpaa2_dev_link_update(struct rte_eth_dev *dev,
1881eadcfd95SRohit Raj int wait_to_complete)
1882c56c86ffSHemant Agrawal {
1883c56c86ffSHemant Agrawal int ret;
1884c56c86ffSHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
188581c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
18867e2eb5f0SStephen Hemminger struct rte_eth_link link;
1887c56c86ffSHemant Agrawal struct dpni_link_state state = {0};
1888eadcfd95SRohit Raj uint8_t count;
1889c56c86ffSHemant Agrawal
1890c56c86ffSHemant Agrawal if (dpni == NULL) {
1891a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1892c56c86ffSHemant Agrawal return 0;
1893c56c86ffSHemant Agrawal }
1894c56c86ffSHemant Agrawal
1895eadcfd95SRohit Raj for (count = 0; count <= MAX_REPEAT_TIME; count++) {
1896eadcfd95SRohit Raj ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token,
1897eadcfd95SRohit Raj &state);
1898c56c86ffSHemant Agrawal if (ret < 0) {
189944e87c27SShreyansh Jain DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1900c56c86ffSHemant Agrawal return -1;
1901c56c86ffSHemant Agrawal }
1902295968d1SFerruh Yigit if (state.up == RTE_ETH_LINK_DOWN &&
1903eadcfd95SRohit Raj wait_to_complete)
1904eadcfd95SRohit Raj rte_delay_ms(CHECK_INTERVAL);
1905eadcfd95SRohit Raj else
1906eadcfd95SRohit Raj break;
1907eadcfd95SRohit Raj }
1908c56c86ffSHemant Agrawal
1909c56c86ffSHemant Agrawal memset(&link, 0, sizeof(struct rte_eth_link));
1910c56c86ffSHemant Agrawal link.link_status = state.up;
1911c56c86ffSHemant Agrawal link.link_speed = state.rate;
1912c56c86ffSHemant Agrawal
1913c56c86ffSHemant Agrawal if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1914295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1915c56c86ffSHemant Agrawal else
1916295968d1SFerruh Yigit link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1917c56c86ffSHemant Agrawal
19187e2eb5f0SStephen Hemminger ret = rte_eth_linkstatus_set(dev, &link);
19197e2eb5f0SStephen Hemminger if (ret == -1)
1920a10a988aSShreyansh Jain DPAA2_PMD_DEBUG("No change in status");
1921c56c86ffSHemant Agrawal else
1922a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
19237e2eb5f0SStephen Hemminger link.link_status ? "Up" : "Down");
19247e2eb5f0SStephen Hemminger
19257e2eb5f0SStephen Hemminger return ret;
1926c56c86ffSHemant Agrawal }
1927c56c86ffSHemant Agrawal
1928a1f3a12cSHemant Agrawal /**
1929a1f3a12cSHemant Agrawal * Toggle the DPNI to enable, if not already enabled.
1930a1f3a12cSHemant Agrawal * This is not strictly PHY up/down - it is more of logical toggling.
1931a1f3a12cSHemant Agrawal */
1932a1f3a12cSHemant Agrawal static int
dpaa2_dev_set_link_up(struct rte_eth_dev * dev)1933a1f3a12cSHemant Agrawal dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1934a1f3a12cSHemant Agrawal {
1935a1f3a12cSHemant Agrawal int ret = -EINVAL;
1936a1f3a12cSHemant Agrawal struct dpaa2_dev_priv *priv;
1937a1f3a12cSHemant Agrawal struct fsl_mc_io *dpni;
1938a1f3a12cSHemant Agrawal int en = 0;
1939aa8c595aSHemant Agrawal struct dpni_link_state state = {0};
1940a1f3a12cSHemant Agrawal
1941a1f3a12cSHemant Agrawal priv = dev->data->dev_private;
194281c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private;
1943a1f3a12cSHemant Agrawal
1944a1f3a12cSHemant Agrawal if (dpni == NULL) {
1945a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
1946a1f3a12cSHemant Agrawal return ret;
1947a1f3a12cSHemant Agrawal }
1948a1f3a12cSHemant Agrawal
1949a1f3a12cSHemant Agrawal /* Check if DPNI is currently enabled */
1950a1f3a12cSHemant Agrawal ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1951a1f3a12cSHemant Agrawal if (ret) {
1952a1f3a12cSHemant Agrawal /* Unable to obtain dpni status; Not continuing */
1953a10a988aSShreyansh Jain DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1954a1f3a12cSHemant Agrawal return -EINVAL;
1955a1f3a12cSHemant Agrawal }
1956a1f3a12cSHemant Agrawal
1957a1f3a12cSHemant Agrawal /* Enable link if not already enabled */
1958a1f3a12cSHemant Agrawal if (!en) {
1959a1f3a12cSHemant Agrawal ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1960a1f3a12cSHemant Agrawal if (ret) {
1961a10a988aSShreyansh Jain DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1962a1f3a12cSHemant Agrawal return -EINVAL;
1963a1f3a12cSHemant Agrawal }
1964a1f3a12cSHemant Agrawal }
1965aa8c595aSHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1966aa8c595aSHemant Agrawal if (ret < 0) {
196744e87c27SShreyansh Jain DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1968aa8c595aSHemant Agrawal return -1;
1969aa8c595aSHemant Agrawal }
1970aa8c595aSHemant Agrawal
1971a1f3a12cSHemant Agrawal /* changing tx burst function to start enqueues */
1972a1f3a12cSHemant Agrawal dev->tx_pkt_burst = dpaa2_dev_tx;
1973aa8c595aSHemant Agrawal dev->data->dev_link.link_status = state.up;
19747e6ecac2SRohit Raj dev->data->dev_link.link_speed = state.rate;
1975a1f3a12cSHemant Agrawal
1976aa8c595aSHemant Agrawal if (state.up)
1977a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1978aa8c595aSHemant Agrawal else
1979a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1980a1f3a12cSHemant Agrawal return ret;
1981a1f3a12cSHemant Agrawal }
1982a1f3a12cSHemant Agrawal
1983a1f3a12cSHemant Agrawal /**
1984a1f3a12cSHemant Agrawal * Toggle the DPNI to disable, if not already disabled.
1985a1f3a12cSHemant Agrawal * This is not strictly PHY up/down - it is more of logical toggling.
1986a1f3a12cSHemant Agrawal */
1987a1f3a12cSHemant Agrawal static int
dpaa2_dev_set_link_down(struct rte_eth_dev * dev)1988a1f3a12cSHemant Agrawal dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1989a1f3a12cSHemant Agrawal {
1990a1f3a12cSHemant Agrawal int ret = -EINVAL;
1991a1f3a12cSHemant Agrawal struct dpaa2_dev_priv *priv;
1992a1f3a12cSHemant Agrawal struct fsl_mc_io *dpni;
1993a1f3a12cSHemant Agrawal int dpni_enabled = 0;
1994a1f3a12cSHemant Agrawal int retries = 10;
1995a1f3a12cSHemant Agrawal
1996a1f3a12cSHemant Agrawal PMD_INIT_FUNC_TRACE();
1997a1f3a12cSHemant Agrawal
1998a1f3a12cSHemant Agrawal priv = dev->data->dev_private;
199981c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private;
2000a1f3a12cSHemant Agrawal
2001a1f3a12cSHemant Agrawal if (dpni == NULL) {
2002a10a988aSShreyansh Jain DPAA2_PMD_ERR("Device has not yet been configured");
2003a1f3a12cSHemant Agrawal return ret;
2004a1f3a12cSHemant Agrawal }
2005a1f3a12cSHemant Agrawal
2006a1f3a12cSHemant Agrawal /*changing tx burst function to avoid any more enqueues */
2007*a41f593fSFerruh Yigit dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
2008a1f3a12cSHemant Agrawal
2009a1f3a12cSHemant Agrawal /* Loop while dpni_disable() attempts to drain the egress FQs
2010a1f3a12cSHemant Agrawal * and confirm them back to us.
2011a1f3a12cSHemant Agrawal */
2012a1f3a12cSHemant Agrawal do {
2013a1f3a12cSHemant Agrawal ret = dpni_disable(dpni, 0, priv->token);
2014a1f3a12cSHemant Agrawal if (ret) {
2015a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
2016a1f3a12cSHemant Agrawal return ret;
2017a1f3a12cSHemant Agrawal }
2018a1f3a12cSHemant Agrawal ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
2019a1f3a12cSHemant Agrawal if (ret) {
2020a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
2021a1f3a12cSHemant Agrawal return ret;
2022a1f3a12cSHemant Agrawal }
2023a1f3a12cSHemant Agrawal if (dpni_enabled)
2024a1f3a12cSHemant Agrawal /* Allow the MC some slack */
2025a1f3a12cSHemant Agrawal rte_delay_us(100 * 1000);
2026a1f3a12cSHemant Agrawal } while (dpni_enabled && --retries);
2027a1f3a12cSHemant Agrawal
2028a1f3a12cSHemant Agrawal if (!retries) {
2029a10a988aSShreyansh Jain DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
2030a1f3a12cSHemant Agrawal /* todo- we may have to manually cleanup queues.
2031a1f3a12cSHemant Agrawal */
2032a1f3a12cSHemant Agrawal } else {
2033a10a988aSShreyansh Jain DPAA2_PMD_INFO("Port %d Link DOWN successful",
2034a1f3a12cSHemant Agrawal dev->data->port_id);
2035a1f3a12cSHemant Agrawal }
2036a1f3a12cSHemant Agrawal
2037a1f3a12cSHemant Agrawal dev->data->dev_link.link_status = 0;
2038a1f3a12cSHemant Agrawal
2039a1f3a12cSHemant Agrawal return ret;
2040a1f3a12cSHemant Agrawal }
2041a1f3a12cSHemant Agrawal
2042977d0006SHemant Agrawal static int
dpaa2_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)2043977d0006SHemant Agrawal dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2044977d0006SHemant Agrawal {
2045977d0006SHemant Agrawal int ret = -EINVAL;
2046977d0006SHemant Agrawal struct dpaa2_dev_priv *priv;
2047977d0006SHemant Agrawal struct fsl_mc_io *dpni;
2048977d0006SHemant Agrawal struct dpni_link_state state = {0};
2049977d0006SHemant Agrawal
2050977d0006SHemant Agrawal PMD_INIT_FUNC_TRACE();
2051977d0006SHemant Agrawal
2052977d0006SHemant Agrawal priv = dev->data->dev_private;
205381c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private;
2054977d0006SHemant Agrawal
2055977d0006SHemant Agrawal if (dpni == NULL || fc_conf == NULL) {
2056a10a988aSShreyansh Jain DPAA2_PMD_ERR("device not configured");
2057977d0006SHemant Agrawal return ret;
2058977d0006SHemant Agrawal }
2059977d0006SHemant Agrawal
2060977d0006SHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2061977d0006SHemant Agrawal if (ret) {
2062a10a988aSShreyansh Jain DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
2063977d0006SHemant Agrawal return ret;
2064977d0006SHemant Agrawal }
2065977d0006SHemant Agrawal
2066977d0006SHemant Agrawal memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
2067977d0006SHemant Agrawal if (state.options & DPNI_LINK_OPT_PAUSE) {
2068977d0006SHemant Agrawal /* DPNI_LINK_OPT_PAUSE set
2069977d0006SHemant Agrawal * if ASYM_PAUSE not set,
2070977d0006SHemant Agrawal * RX Side flow control (handle received Pause frame)
2071977d0006SHemant Agrawal * TX side flow control (send Pause frame)
2072977d0006SHemant Agrawal * if ASYM_PAUSE set,
2073977d0006SHemant Agrawal * RX Side flow control (handle received Pause frame)
2074977d0006SHemant Agrawal * No TX side flow control (send Pause frame disabled)
2075977d0006SHemant Agrawal */
2076977d0006SHemant Agrawal if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
2077295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_FULL;
2078977d0006SHemant Agrawal else
2079295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2080977d0006SHemant Agrawal } else {
2081977d0006SHemant Agrawal /* DPNI_LINK_OPT_PAUSE not set
2082977d0006SHemant Agrawal * if ASYM_PAUSE set,
2083977d0006SHemant Agrawal * TX side flow control (send Pause frame)
2084977d0006SHemant Agrawal * No RX side flow control (No action on pause frame rx)
2085977d0006SHemant Agrawal * if ASYM_PAUSE not set,
2086977d0006SHemant Agrawal * Flow control disabled
2087977d0006SHemant Agrawal */
2088977d0006SHemant Agrawal if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
2089295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2090977d0006SHemant Agrawal else
2091295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_NONE;
2092977d0006SHemant Agrawal }
2093977d0006SHemant Agrawal
2094977d0006SHemant Agrawal return ret;
2095977d0006SHemant Agrawal }
2096977d0006SHemant Agrawal
2097977d0006SHemant Agrawal static int
dpaa2_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)2098977d0006SHemant Agrawal dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2099977d0006SHemant Agrawal {
2100977d0006SHemant Agrawal int ret = -EINVAL;
2101977d0006SHemant Agrawal struct dpaa2_dev_priv *priv;
2102977d0006SHemant Agrawal struct fsl_mc_io *dpni;
2103977d0006SHemant Agrawal struct dpni_link_state state = {0};
2104977d0006SHemant Agrawal struct dpni_link_cfg cfg = {0};
2105977d0006SHemant Agrawal
2106977d0006SHemant Agrawal PMD_INIT_FUNC_TRACE();
2107977d0006SHemant Agrawal
2108977d0006SHemant Agrawal priv = dev->data->dev_private;
210981c42c84SShreyansh Jain dpni = (struct fsl_mc_io *)dev->process_private;
2110977d0006SHemant Agrawal
2111977d0006SHemant Agrawal if (dpni == NULL) {
2112a10a988aSShreyansh Jain DPAA2_PMD_ERR("dpni is NULL");
2113977d0006SHemant Agrawal return ret;
2114977d0006SHemant Agrawal }
2115977d0006SHemant Agrawal
2116977d0006SHemant Agrawal /* It is necessary to obtain the current state before setting fc_conf
2117977d0006SHemant Agrawal * as MC would return error in case rate, autoneg or duplex values are
2118977d0006SHemant Agrawal * different.
2119977d0006SHemant Agrawal */
2120977d0006SHemant Agrawal ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2121977d0006SHemant Agrawal if (ret) {
2122a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
2123977d0006SHemant Agrawal return -1;
2124977d0006SHemant Agrawal }
2125977d0006SHemant Agrawal
2126977d0006SHemant Agrawal /* Disable link before setting configuration */
2127977d0006SHemant Agrawal dpaa2_dev_set_link_down(dev);
2128977d0006SHemant Agrawal
2129977d0006SHemant Agrawal /* Based on fc_conf, update cfg */
2130977d0006SHemant Agrawal cfg.rate = state.rate;
2131977d0006SHemant Agrawal cfg.options = state.options;
2132977d0006SHemant Agrawal
2133977d0006SHemant Agrawal /* update cfg with fc_conf */
2134977d0006SHemant Agrawal switch (fc_conf->mode) {
2135295968d1SFerruh Yigit case RTE_ETH_FC_FULL:
2136977d0006SHemant Agrawal /* Full flow control;
2137977d0006SHemant Agrawal * OPT_PAUSE set, ASYM_PAUSE not set
2138977d0006SHemant Agrawal */
2139977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_PAUSE;
2140977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2141f090a4c3SHemant Agrawal break;
2142295968d1SFerruh Yigit case RTE_ETH_FC_TX_PAUSE:
2143977d0006SHemant Agrawal /* Enable RX flow control
2144977d0006SHemant Agrawal * OPT_PAUSE not set;
2145977d0006SHemant Agrawal * ASYM_PAUSE set;
2146977d0006SHemant Agrawal */
2147977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2148977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2149977d0006SHemant Agrawal break;
2150295968d1SFerruh Yigit case RTE_ETH_FC_RX_PAUSE:
2151977d0006SHemant Agrawal /* Enable TX Flow control
2152977d0006SHemant Agrawal * OPT_PAUSE set
2153977d0006SHemant Agrawal * ASYM_PAUSE set
2154977d0006SHemant Agrawal */
2155977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_PAUSE;
2156977d0006SHemant Agrawal cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2157977d0006SHemant Agrawal break;
2158295968d1SFerruh Yigit case RTE_ETH_FC_NONE:
2159977d0006SHemant Agrawal /* Disable Flow control
2160977d0006SHemant Agrawal * OPT_PAUSE not set
2161977d0006SHemant Agrawal * ASYM_PAUSE not set
2162977d0006SHemant Agrawal */
2163977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2164977d0006SHemant Agrawal cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2165977d0006SHemant Agrawal break;
2166977d0006SHemant Agrawal default:
2167a10a988aSShreyansh Jain DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
2168977d0006SHemant Agrawal fc_conf->mode);
2169977d0006SHemant Agrawal return -1;
2170977d0006SHemant Agrawal }
2171977d0006SHemant Agrawal
2172977d0006SHemant Agrawal ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2173977d0006SHemant Agrawal if (ret)
2174a10a988aSShreyansh Jain DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
2175977d0006SHemant Agrawal ret);
2176977d0006SHemant Agrawal
2177977d0006SHemant Agrawal /* Enable link */
2178977d0006SHemant Agrawal dpaa2_dev_set_link_up(dev);
2179977d0006SHemant Agrawal
2180977d0006SHemant Agrawal return ret;
2181977d0006SHemant Agrawal }
2182977d0006SHemant Agrawal
218363d5c3b0SHemant Agrawal static int
dpaa2_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)218463d5c3b0SHemant Agrawal dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
218563d5c3b0SHemant Agrawal struct rte_eth_rss_conf *rss_conf)
218663d5c3b0SHemant Agrawal {
218763d5c3b0SHemant Agrawal struct rte_eth_dev_data *data = dev->data;
2188271f5aeeSJun Yang struct dpaa2_dev_priv *priv = data->dev_private;
218963d5c3b0SHemant Agrawal struct rte_eth_conf *eth_conf = &data->dev_conf;
2190271f5aeeSJun Yang int ret, tc_index;
219163d5c3b0SHemant Agrawal
219263d5c3b0SHemant Agrawal PMD_INIT_FUNC_TRACE();
219363d5c3b0SHemant Agrawal
219463d5c3b0SHemant Agrawal if (rss_conf->rss_hf) {
2195271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2196271f5aeeSJun Yang ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
2197271f5aeeSJun Yang tc_index);
219863d5c3b0SHemant Agrawal if (ret) {
2199271f5aeeSJun Yang DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
2200271f5aeeSJun Yang tc_index);
220163d5c3b0SHemant Agrawal return ret;
220263d5c3b0SHemant Agrawal }
2203271f5aeeSJun Yang }
220463d5c3b0SHemant Agrawal } else {
2205271f5aeeSJun Yang for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2206271f5aeeSJun Yang ret = dpaa2_remove_flow_dist(dev, tc_index);
220763d5c3b0SHemant Agrawal if (ret) {
2208271f5aeeSJun Yang DPAA2_PMD_ERR(
2209271f5aeeSJun Yang "Unable to remove flow dist on tc%d",
2210271f5aeeSJun Yang tc_index);
221163d5c3b0SHemant Agrawal return ret;
221263d5c3b0SHemant Agrawal }
221363d5c3b0SHemant Agrawal }
2214271f5aeeSJun Yang }
221563d5c3b0SHemant Agrawal eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
221663d5c3b0SHemant Agrawal return 0;
221763d5c3b0SHemant Agrawal }
221863d5c3b0SHemant Agrawal
221963d5c3b0SHemant Agrawal static int
dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)222063d5c3b0SHemant Agrawal dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
222163d5c3b0SHemant Agrawal struct rte_eth_rss_conf *rss_conf)
222263d5c3b0SHemant Agrawal {
222363d5c3b0SHemant Agrawal struct rte_eth_dev_data *data = dev->data;
222463d5c3b0SHemant Agrawal struct rte_eth_conf *eth_conf = &data->dev_conf;
222563d5c3b0SHemant Agrawal
222663d5c3b0SHemant Agrawal /* dpaa2 does not support rss_key, so length should be 0*/
222763d5c3b0SHemant Agrawal rss_conf->rss_key_len = 0;
222863d5c3b0SHemant Agrawal rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
222963d5c3b0SHemant Agrawal return 0;
223063d5c3b0SHemant Agrawal }
223163d5c3b0SHemant Agrawal
dpaa2_eth_eventq_attach(const struct rte_eth_dev * dev,int eth_rx_queue_id,struct dpaa2_dpcon_dev * dpcon,const struct rte_event_eth_rx_adapter_queue_conf * queue_conf)2232b677d4c6SNipun Gupta int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
2233b677d4c6SNipun Gupta int eth_rx_queue_id,
22343835cc22SNipun Gupta struct dpaa2_dpcon_dev *dpcon,
2235b677d4c6SNipun Gupta const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2236b677d4c6SNipun Gupta {
2237b677d4c6SNipun Gupta struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
223881c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2239b677d4c6SNipun Gupta struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2240b677d4c6SNipun Gupta uint8_t flow_id = dpaa2_ethq->flow_id;
2241b677d4c6SNipun Gupta struct dpni_queue cfg;
22423835cc22SNipun Gupta uint8_t options, priority;
2243b677d4c6SNipun Gupta int ret;
2244b677d4c6SNipun Gupta
2245b677d4c6SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
2246b677d4c6SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
22472d378863SNipun Gupta else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
22482d378863SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
224916c4a3c4SNipun Gupta else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
225016c4a3c4SNipun Gupta dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
2251b677d4c6SNipun Gupta else
2252b677d4c6SNipun Gupta return -EINVAL;
2253b677d4c6SNipun Gupta
22543835cc22SNipun Gupta priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
22553835cc22SNipun Gupta (dpcon->num_priorities - 1);
22563835cc22SNipun Gupta
2257b677d4c6SNipun Gupta memset(&cfg, 0, sizeof(struct dpni_queue));
2258b677d4c6SNipun Gupta options = DPNI_QUEUE_OPT_DEST;
2259b677d4c6SNipun Gupta cfg.destination.type = DPNI_DEST_DPCON;
22603835cc22SNipun Gupta cfg.destination.id = dpcon->dpcon_id;
22613835cc22SNipun Gupta cfg.destination.priority = priority;
2262b677d4c6SNipun Gupta
22632d378863SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
22642d378863SNipun Gupta options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
22652d378863SNipun Gupta cfg.destination.hold_active = 1;
22662d378863SNipun Gupta }
22672d378863SNipun Gupta
226816c4a3c4SNipun Gupta if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
226916c4a3c4SNipun Gupta !eth_priv->en_ordered) {
227016c4a3c4SNipun Gupta struct opr_cfg ocfg;
227116c4a3c4SNipun Gupta
227216c4a3c4SNipun Gupta /* Restoration window size = 256 frames */
227316c4a3c4SNipun Gupta ocfg.oprrws = 3;
227416c4a3c4SNipun Gupta /* Restoration window size = 512 frames for LX2 */
227516c4a3c4SNipun Gupta if (dpaa2_svr_family == SVR_LX2160A)
227616c4a3c4SNipun Gupta ocfg.oprrws = 4;
227716c4a3c4SNipun Gupta /* Auto advance NESN window enabled */
227816c4a3c4SNipun Gupta ocfg.oa = 1;
227916c4a3c4SNipun Gupta /* Late arrival window size disabled */
228016c4a3c4SNipun Gupta ocfg.olws = 0;
22817be78d02SJosh Soref /* ORL resource exhaustion advance NESN disabled */
228216c4a3c4SNipun Gupta ocfg.oeane = 0;
228316c4a3c4SNipun Gupta /* Loose ordering enabled */
228416c4a3c4SNipun Gupta ocfg.oloe = 1;
228516c4a3c4SNipun Gupta eth_priv->en_loose_ordered = 1;
228616c4a3c4SNipun Gupta /* Strict ordering enabled if explicitly set */
228716c4a3c4SNipun Gupta if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
228816c4a3c4SNipun Gupta ocfg.oloe = 0;
228916c4a3c4SNipun Gupta eth_priv->en_loose_ordered = 0;
229016c4a3c4SNipun Gupta }
229116c4a3c4SNipun Gupta
229216c4a3c4SNipun Gupta ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
229316c4a3c4SNipun Gupta dpaa2_ethq->tc_index, flow_id,
22942cb2abf3SHemant Agrawal OPR_OPT_CREATE, &ocfg, 0);
229516c4a3c4SNipun Gupta if (ret) {
229616c4a3c4SNipun Gupta DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
229716c4a3c4SNipun Gupta return ret;
229816c4a3c4SNipun Gupta }
229916c4a3c4SNipun Gupta
230016c4a3c4SNipun Gupta eth_priv->en_ordered = 1;
230116c4a3c4SNipun Gupta }
230216c4a3c4SNipun Gupta
2303b677d4c6SNipun Gupta options |= DPNI_QUEUE_OPT_USER_CTX;
23045ae1edffSHemant Agrawal cfg.user_context = (size_t)(dpaa2_ethq);
2305b677d4c6SNipun Gupta
2306b677d4c6SNipun Gupta ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2307b677d4c6SNipun Gupta dpaa2_ethq->tc_index, flow_id, options, &cfg);
2308b677d4c6SNipun Gupta if (ret) {
2309a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2310b677d4c6SNipun Gupta return ret;
2311b677d4c6SNipun Gupta }
2312b677d4c6SNipun Gupta
2313b677d4c6SNipun Gupta memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
2314b677d4c6SNipun Gupta
2315b677d4c6SNipun Gupta return 0;
2316b677d4c6SNipun Gupta }
2317b677d4c6SNipun Gupta
dpaa2_eth_eventq_detach(const struct rte_eth_dev * dev,int eth_rx_queue_id)2318b677d4c6SNipun Gupta int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2319b677d4c6SNipun Gupta int eth_rx_queue_id)
2320b677d4c6SNipun Gupta {
2321b677d4c6SNipun Gupta struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
232281c42c84SShreyansh Jain struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2323b677d4c6SNipun Gupta struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2324b677d4c6SNipun Gupta uint8_t flow_id = dpaa2_ethq->flow_id;
2325b677d4c6SNipun Gupta struct dpni_queue cfg;
2326b677d4c6SNipun Gupta uint8_t options;
2327b677d4c6SNipun Gupta int ret;
2328b677d4c6SNipun Gupta
2329b677d4c6SNipun Gupta memset(&cfg, 0, sizeof(struct dpni_queue));
2330b677d4c6SNipun Gupta options = DPNI_QUEUE_OPT_DEST;
2331b677d4c6SNipun Gupta cfg.destination.type = DPNI_DEST_NONE;
2332b677d4c6SNipun Gupta
2333b677d4c6SNipun Gupta ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2334b677d4c6SNipun Gupta dpaa2_ethq->tc_index, flow_id, options, &cfg);
2335b677d4c6SNipun Gupta if (ret)
2336a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2337b677d4c6SNipun Gupta
2338b677d4c6SNipun Gupta return ret;
2339b677d4c6SNipun Gupta }
2340b677d4c6SNipun Gupta
2341fe2b986aSSunil Kumar Kori static int
dpaa2_dev_flow_ops_get(struct rte_eth_dev * dev,const struct rte_flow_ops ** ops)2342fb7ad441SThomas Monjalon dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev,
2343fb7ad441SThomas Monjalon const struct rte_flow_ops **ops)
2344fe2b986aSSunil Kumar Kori {
2345fe2b986aSSunil Kumar Kori if (!dev)
2346fe2b986aSSunil Kumar Kori return -ENODEV;
2347fe2b986aSSunil Kumar Kori
2348fb7ad441SThomas Monjalon *ops = &dpaa2_flow_ops;
2349fb7ad441SThomas Monjalon return 0;
2350fe2b986aSSunil Kumar Kori }
2351fe2b986aSSunil Kumar Kori
2352de1d70f0SHemant Agrawal static void
dpaa2_rxq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)2353de1d70f0SHemant Agrawal dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2354de1d70f0SHemant Agrawal struct rte_eth_rxq_info *qinfo)
2355de1d70f0SHemant Agrawal {
2356de1d70f0SHemant Agrawal struct dpaa2_queue *rxq;
2357731fa400SHemant Agrawal struct dpaa2_dev_priv *priv = dev->data->dev_private;
2358731fa400SHemant Agrawal struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2359731fa400SHemant Agrawal uint16_t max_frame_length;
2360de1d70f0SHemant Agrawal
2361de1d70f0SHemant Agrawal rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
2362de1d70f0SHemant Agrawal
2363de1d70f0SHemant Agrawal qinfo->mp = rxq->mb_pool;
2364de1d70f0SHemant Agrawal qinfo->scattered_rx = dev->data->scattered_rx;
2365de1d70f0SHemant Agrawal qinfo->nb_desc = rxq->nb_desc;
2366731fa400SHemant Agrawal if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
2367731fa400SHemant Agrawal &max_frame_length) == 0)
2368731fa400SHemant Agrawal qinfo->rx_buf_size = max_frame_length;
2369de1d70f0SHemant Agrawal
2370de1d70f0SHemant Agrawal qinfo->conf.rx_free_thresh = 1;
2371de1d70f0SHemant Agrawal qinfo->conf.rx_drop_en = 1;
2372de1d70f0SHemant Agrawal qinfo->conf.rx_deferred_start = 0;
2373de1d70f0SHemant Agrawal qinfo->conf.offloads = rxq->offloads;
2374de1d70f0SHemant Agrawal }
2375de1d70f0SHemant Agrawal
2376de1d70f0SHemant Agrawal static void
dpaa2_txq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)2377de1d70f0SHemant Agrawal dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2378de1d70f0SHemant Agrawal struct rte_eth_txq_info *qinfo)
2379de1d70f0SHemant Agrawal {
2380de1d70f0SHemant Agrawal struct dpaa2_queue *txq;
2381de1d70f0SHemant Agrawal
2382de1d70f0SHemant Agrawal txq = dev->data->tx_queues[queue_id];
2383de1d70f0SHemant Agrawal
2384de1d70f0SHemant Agrawal qinfo->nb_desc = txq->nb_desc;
2385de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.pthresh = 0;
2386de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.hthresh = 0;
2387de1d70f0SHemant Agrawal qinfo->conf.tx_thresh.wthresh = 0;
2388de1d70f0SHemant Agrawal
2389de1d70f0SHemant Agrawal qinfo->conf.tx_free_thresh = 0;
2390de1d70f0SHemant Agrawal qinfo->conf.tx_rs_thresh = 0;
2391de1d70f0SHemant Agrawal qinfo->conf.offloads = txq->offloads;
2392de1d70f0SHemant Agrawal qinfo->conf.tx_deferred_start = 0;
2393de1d70f0SHemant Agrawal }
2394de1d70f0SHemant Agrawal
2395ac624068SGagandeep Singh static int
dpaa2_tm_ops_get(struct rte_eth_dev * dev __rte_unused,void * ops)2396ac624068SGagandeep Singh dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2397ac624068SGagandeep Singh {
2398ac624068SGagandeep Singh *(const void **)ops = &dpaa2_tm_ops;
2399ac624068SGagandeep Singh
2400ac624068SGagandeep Singh return 0;
2401ac624068SGagandeep Singh }
2402ac624068SGagandeep Singh
2403a5b375edSNipun Gupta void
rte_pmd_dpaa2_thread_init(void)2404a5b375edSNipun Gupta rte_pmd_dpaa2_thread_init(void)
2405a5b375edSNipun Gupta {
2406a5b375edSNipun Gupta int ret;
2407a5b375edSNipun Gupta
2408a5b375edSNipun Gupta if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
2409a5b375edSNipun Gupta ret = dpaa2_affine_qbman_swp();
2410a5b375edSNipun Gupta if (ret) {
2411a5b375edSNipun Gupta DPAA2_PMD_ERR(
2412a5b375edSNipun Gupta "Failed to allocate IO portal, tid: %d\n",
2413a5b375edSNipun Gupta rte_gettid());
2414a5b375edSNipun Gupta return;
2415a5b375edSNipun Gupta }
2416a5b375edSNipun Gupta }
2417a5b375edSNipun Gupta }
2418a5b375edSNipun Gupta
24193e5a335dSHemant Agrawal static struct eth_dev_ops dpaa2_ethdev_ops = {
24203e5a335dSHemant Agrawal .dev_configure = dpaa2_eth_dev_configure,
24213e5a335dSHemant Agrawal .dev_start = dpaa2_dev_start,
24223e5a335dSHemant Agrawal .dev_stop = dpaa2_dev_stop,
24233e5a335dSHemant Agrawal .dev_close = dpaa2_dev_close,
2424c0e5c69aSHemant Agrawal .promiscuous_enable = dpaa2_dev_promiscuous_enable,
2425c0e5c69aSHemant Agrawal .promiscuous_disable = dpaa2_dev_promiscuous_disable,
24265d5aeeedSHemant Agrawal .allmulticast_enable = dpaa2_dev_allmulticast_enable,
24275d5aeeedSHemant Agrawal .allmulticast_disable = dpaa2_dev_allmulticast_disable,
2428a1f3a12cSHemant Agrawal .dev_set_link_up = dpaa2_dev_set_link_up,
2429a1f3a12cSHemant Agrawal .dev_set_link_down = dpaa2_dev_set_link_down,
2430c56c86ffSHemant Agrawal .link_update = dpaa2_dev_link_update,
2431b0aa5459SHemant Agrawal .stats_get = dpaa2_dev_stats_get,
24321d6329b2SHemant Agrawal .xstats_get = dpaa2_dev_xstats_get,
24331d6329b2SHemant Agrawal .xstats_get_by_id = dpaa2_xstats_get_by_id,
24341d6329b2SHemant Agrawal .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
24351d6329b2SHemant Agrawal .xstats_get_names = dpaa2_xstats_get_names,
2436b0aa5459SHemant Agrawal .stats_reset = dpaa2_dev_stats_reset,
24371d6329b2SHemant Agrawal .xstats_reset = dpaa2_dev_stats_reset,
2438748eccb9SHemant Agrawal .fw_version_get = dpaa2_fw_version_get,
24393e5a335dSHemant Agrawal .dev_infos_get = dpaa2_dev_info_get,
2440a5fc38d4SHemant Agrawal .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2441e31d4d21SHemant Agrawal .mtu_set = dpaa2_dev_mtu_set,
24423ce294f2SHemant Agrawal .vlan_filter_set = dpaa2_vlan_filter_set,
24433ce294f2SHemant Agrawal .vlan_offload_set = dpaa2_vlan_offload_set,
2444e59b75ffSHemant Agrawal .vlan_tpid_set = dpaa2_vlan_tpid_set,
24453e5a335dSHemant Agrawal .rx_queue_setup = dpaa2_dev_rx_queue_setup,
24463e5a335dSHemant Agrawal .rx_queue_release = dpaa2_dev_rx_queue_release,
24473e5a335dSHemant Agrawal .tx_queue_setup = dpaa2_dev_tx_queue_setup,
2448ddbc2b66SApeksha Gupta .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
2449ddbc2b66SApeksha Gupta .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
2450977d0006SHemant Agrawal .flow_ctrl_get = dpaa2_flow_ctrl_get,
2451977d0006SHemant Agrawal .flow_ctrl_set = dpaa2_flow_ctrl_set,
2452b4d97b7dSHemant Agrawal .mac_addr_add = dpaa2_dev_add_mac_addr,
2453b4d97b7dSHemant Agrawal .mac_addr_remove = dpaa2_dev_remove_mac_addr,
2454b4d97b7dSHemant Agrawal .mac_addr_set = dpaa2_dev_set_mac_addr,
245563d5c3b0SHemant Agrawal .rss_hash_update = dpaa2_dev_rss_hash_update,
245663d5c3b0SHemant Agrawal .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get,
2457fb7ad441SThomas Monjalon .flow_ops_get = dpaa2_dev_flow_ops_get,
2458de1d70f0SHemant Agrawal .rxq_info_get = dpaa2_rxq_info_get,
2459de1d70f0SHemant Agrawal .txq_info_get = dpaa2_txq_info_get,
2460ac624068SGagandeep Singh .tm_ops_get = dpaa2_tm_ops_get,
2461bc767866SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
2462bc767866SPriyanka Jain .timesync_enable = dpaa2_timesync_enable,
2463bc767866SPriyanka Jain .timesync_disable = dpaa2_timesync_disable,
2464bc767866SPriyanka Jain .timesync_read_time = dpaa2_timesync_read_time,
2465bc767866SPriyanka Jain .timesync_write_time = dpaa2_timesync_write_time,
2466bc767866SPriyanka Jain .timesync_adjust_time = dpaa2_timesync_adjust_time,
2467bc767866SPriyanka Jain .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp,
2468bc767866SPriyanka Jain .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp,
2469bc767866SPriyanka Jain #endif
24703e5a335dSHemant Agrawal };
24713e5a335dSHemant Agrawal
2472c3e0a706SShreyansh Jain /* Populate the mac address from physically available (u-boot/firmware) and/or
2473c3e0a706SShreyansh Jain * one set by higher layers like MC (restool) etc.
2474c3e0a706SShreyansh Jain * Returns the table of MAC entries (multiple entries)
2475c3e0a706SShreyansh Jain */
2476c3e0a706SShreyansh Jain static int
populate_mac_addr(struct fsl_mc_io * dpni_dev,struct dpaa2_dev_priv * priv,struct rte_ether_addr * mac_entry)2477c3e0a706SShreyansh Jain populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
24786d13ea8eSOlivier Matz struct rte_ether_addr *mac_entry)
2479c3e0a706SShreyansh Jain {
2480c3e0a706SShreyansh Jain int ret;
24816d13ea8eSOlivier Matz struct rte_ether_addr phy_mac, prime_mac;
248241c24ea2SShreyansh Jain
24836d13ea8eSOlivier Matz memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
24846d13ea8eSOlivier Matz memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2485c3e0a706SShreyansh Jain
2486c3e0a706SShreyansh Jain /* Get the physical device MAC address */
2487c3e0a706SShreyansh Jain ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2488c3e0a706SShreyansh Jain phy_mac.addr_bytes);
2489c3e0a706SShreyansh Jain if (ret) {
2490c3e0a706SShreyansh Jain DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2491c3e0a706SShreyansh Jain goto cleanup;
2492c3e0a706SShreyansh Jain }
2493c3e0a706SShreyansh Jain
2494c3e0a706SShreyansh Jain ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2495c3e0a706SShreyansh Jain prime_mac.addr_bytes);
2496c3e0a706SShreyansh Jain if (ret) {
2497c3e0a706SShreyansh Jain DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2498c3e0a706SShreyansh Jain goto cleanup;
2499c3e0a706SShreyansh Jain }
2500c3e0a706SShreyansh Jain
2501c3e0a706SShreyansh Jain /* Now that both MAC have been obtained, do:
2502c3e0a706SShreyansh Jain * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2503c3e0a706SShreyansh Jain * and return phy
2504c3e0a706SShreyansh Jain * If empty_mac(phy), return prime.
2505c3e0a706SShreyansh Jain * if both are empty, create random MAC, set as prime and return
2506c3e0a706SShreyansh Jain */
2507538da7a1SOlivier Matz if (!rte_is_zero_ether_addr(&phy_mac)) {
2508c3e0a706SShreyansh Jain /* If the addresses are not same, overwrite prime */
2509538da7a1SOlivier Matz if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2510c3e0a706SShreyansh Jain ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2511c3e0a706SShreyansh Jain priv->token,
2512c3e0a706SShreyansh Jain phy_mac.addr_bytes);
2513c3e0a706SShreyansh Jain if (ret) {
2514c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2515c3e0a706SShreyansh Jain ret);
2516c3e0a706SShreyansh Jain goto cleanup;
2517c3e0a706SShreyansh Jain }
25186d13ea8eSOlivier Matz memcpy(&prime_mac, &phy_mac,
25196d13ea8eSOlivier Matz sizeof(struct rte_ether_addr));
2520c3e0a706SShreyansh Jain }
2521538da7a1SOlivier Matz } else if (rte_is_zero_ether_addr(&prime_mac)) {
2522c3e0a706SShreyansh Jain /* In case phys and prime, both are zero, create random MAC */
2523538da7a1SOlivier Matz rte_eth_random_addr(prime_mac.addr_bytes);
2524c3e0a706SShreyansh Jain ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2525c3e0a706SShreyansh Jain priv->token,
2526c3e0a706SShreyansh Jain prime_mac.addr_bytes);
2527c3e0a706SShreyansh Jain if (ret) {
2528c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2529c3e0a706SShreyansh Jain goto cleanup;
2530c3e0a706SShreyansh Jain }
2531c3e0a706SShreyansh Jain }
2532c3e0a706SShreyansh Jain
2533c3e0a706SShreyansh Jain /* prime_mac the final MAC address */
25346d13ea8eSOlivier Matz memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2535c3e0a706SShreyansh Jain return 0;
2536c3e0a706SShreyansh Jain
2537c3e0a706SShreyansh Jain cleanup:
2538c3e0a706SShreyansh Jain return -1;
2539c3e0a706SShreyansh Jain }
2540c3e0a706SShreyansh Jain
2541c147eae0SHemant Agrawal static int
check_devargs_handler(__rte_unused const char * key,const char * value,__rte_unused void * opaque)2542a3a997f0SHemant Agrawal check_devargs_handler(__rte_unused const char *key, const char *value,
2543a3a997f0SHemant Agrawal __rte_unused void *opaque)
2544a3a997f0SHemant Agrawal {
2545a3a997f0SHemant Agrawal if (strcmp(value, "1"))
2546a3a997f0SHemant Agrawal return -1;
2547a3a997f0SHemant Agrawal
2548a3a997f0SHemant Agrawal return 0;
2549a3a997f0SHemant Agrawal }
2550a3a997f0SHemant Agrawal
2551a3a997f0SHemant Agrawal static int
dpaa2_get_devargs(struct rte_devargs * devargs,const char * key)2552a3a997f0SHemant Agrawal dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2553a3a997f0SHemant Agrawal {
2554a3a997f0SHemant Agrawal struct rte_kvargs *kvlist;
2555a3a997f0SHemant Agrawal
2556a3a997f0SHemant Agrawal if (!devargs)
2557a3a997f0SHemant Agrawal return 0;
2558a3a997f0SHemant Agrawal
2559a3a997f0SHemant Agrawal kvlist = rte_kvargs_parse(devargs->args, NULL);
2560a3a997f0SHemant Agrawal if (!kvlist)
2561a3a997f0SHemant Agrawal return 0;
2562a3a997f0SHemant Agrawal
2563a3a997f0SHemant Agrawal if (!rte_kvargs_count(kvlist, key)) {
2564a3a997f0SHemant Agrawal rte_kvargs_free(kvlist);
2565a3a997f0SHemant Agrawal return 0;
2566a3a997f0SHemant Agrawal }
2567a3a997f0SHemant Agrawal
2568a3a997f0SHemant Agrawal if (rte_kvargs_process(kvlist, key,
2569a3a997f0SHemant Agrawal check_devargs_handler, NULL) < 0) {
2570a3a997f0SHemant Agrawal rte_kvargs_free(kvlist);
2571a3a997f0SHemant Agrawal return 0;
2572a3a997f0SHemant Agrawal }
2573a3a997f0SHemant Agrawal rte_kvargs_free(kvlist);
2574a3a997f0SHemant Agrawal
2575a3a997f0SHemant Agrawal return 1;
2576a3a997f0SHemant Agrawal }
2577a3a997f0SHemant Agrawal
2578a3a997f0SHemant Agrawal static int
dpaa2_dev_init(struct rte_eth_dev * eth_dev)2579c147eae0SHemant Agrawal dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2580c147eae0SHemant Agrawal {
25813e5a335dSHemant Agrawal struct rte_device *dev = eth_dev->device;
25823e5a335dSHemant Agrawal struct rte_dpaa2_device *dpaa2_dev;
25833e5a335dSHemant Agrawal struct fsl_mc_io *dpni_dev;
25843e5a335dSHemant Agrawal struct dpni_attr attr;
25853e5a335dSHemant Agrawal struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2586bee61d86SHemant Agrawal struct dpni_buffer_layout layout;
2587fe2b986aSSunil Kumar Kori int ret, hw_id, i;
25883e5a335dSHemant Agrawal
2589d401ead1SHemant Agrawal PMD_INIT_FUNC_TRACE();
2590d401ead1SHemant Agrawal
259181c42c84SShreyansh Jain dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
259281c42c84SShreyansh Jain if (!dpni_dev) {
259381c42c84SShreyansh Jain DPAA2_PMD_ERR("Memory allocation failed for dpni device");
259481c42c84SShreyansh Jain return -1;
259581c42c84SShreyansh Jain }
2596a6a5f4b4SHemant Agrawal dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
259781c42c84SShreyansh Jain eth_dev->process_private = (void *)dpni_dev;
259881c42c84SShreyansh Jain
2599c147eae0SHemant Agrawal /* For secondary processes, the primary has done all the work */
2600e7b187dbSShreyansh Jain if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2601e7b187dbSShreyansh Jain /* In case of secondary, only burst and ops API need to be
2602e7b187dbSShreyansh Jain * plugged.
2603e7b187dbSShreyansh Jain */
2604e7b187dbSShreyansh Jain eth_dev->dev_ops = &dpaa2_ethdev_ops;
2605cbfc6111SFerruh Yigit eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
2606a3a997f0SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2607a3a997f0SHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
260820191ab3SNipun Gupta else if (dpaa2_get_devargs(dev->devargs,
260920191ab3SNipun Gupta DRIVER_NO_PREFETCH_MODE))
261020191ab3SNipun Gupta eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2611a3a997f0SHemant Agrawal else
2612e7b187dbSShreyansh Jain eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2613e7b187dbSShreyansh Jain eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2614c147eae0SHemant Agrawal return 0;
2615e7b187dbSShreyansh Jain }
2616c147eae0SHemant Agrawal
26173e5a335dSHemant Agrawal dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
26183e5a335dSHemant Agrawal
26193e5a335dSHemant Agrawal hw_id = dpaa2_dev->object_id;
26203e5a335dSHemant Agrawal ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
26213e5a335dSHemant Agrawal if (ret) {
2622a10a988aSShreyansh Jain DPAA2_PMD_ERR(
2623a10a988aSShreyansh Jain "Failure in opening dpni@%d with err code %d",
2624d4984046SHemant Agrawal hw_id, ret);
2625d4984046SHemant Agrawal rte_free(dpni_dev);
26263e5a335dSHemant Agrawal return -1;
26273e5a335dSHemant Agrawal }
26283e5a335dSHemant Agrawal
2629f023d059SJun Yang if (eth_dev->data->dev_conf.lpbk_mode)
2630f023d059SJun Yang dpaa2_dev_recycle_deconfig(eth_dev);
2631f023d059SJun Yang
26323e5a335dSHemant Agrawal /* Clean the device first */
26333e5a335dSHemant Agrawal ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
26343e5a335dSHemant Agrawal if (ret) {
2635a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2636d4984046SHemant Agrawal hw_id, ret);
2637d4984046SHemant Agrawal goto init_err;
26383e5a335dSHemant Agrawal }
26393e5a335dSHemant Agrawal
26403e5a335dSHemant Agrawal ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
26413e5a335dSHemant Agrawal if (ret) {
2642a10a988aSShreyansh Jain DPAA2_PMD_ERR(
2643a10a988aSShreyansh Jain "Failure in get dpni@%d attribute, err code %d",
2644d4984046SHemant Agrawal hw_id, ret);
2645d4984046SHemant Agrawal goto init_err;
26463e5a335dSHemant Agrawal }
26473e5a335dSHemant Agrawal
264816bbc98aSShreyansh Jain priv->num_rx_tc = attr.num_rx_tcs;
264972100f0dSGagandeep Singh priv->num_tx_tc = attr.num_tx_tcs;
26504ce58f8aSJun Yang priv->qos_entries = attr.qos_entries;
26514ce58f8aSJun Yang priv->fs_entries = attr.fs_entries;
26524ce58f8aSJun Yang priv->dist_queues = attr.num_queues;
265372100f0dSGagandeep Singh priv->num_channels = attr.num_channels;
265472100f0dSGagandeep Singh priv->channel_inuse = 0;
2655f023d059SJun Yang rte_spinlock_init(&priv->lpbk_qp_lock);
26564ce58f8aSJun Yang
265713b856acSHemant Agrawal /* only if the custom CG is enabled */
265813b856acSHemant Agrawal if (attr.options & DPNI_OPT_CUSTOM_CG)
265913b856acSHemant Agrawal priv->max_cgs = attr.num_cgs;
266013b856acSHemant Agrawal else
266113b856acSHemant Agrawal priv->max_cgs = 0;
266213b856acSHemant Agrawal
266313b856acSHemant Agrawal for (i = 0; i < priv->max_cgs; i++)
266413b856acSHemant Agrawal priv->cgid_in_use[i] = 0;
266589c2ea8fSHemant Agrawal
2666fe2b986aSSunil Kumar Kori for (i = 0; i < attr.num_rx_tcs; i++)
2667fe2b986aSSunil Kumar Kori priv->nb_rx_queues += attr.num_queues;
266889c2ea8fSHemant Agrawal
266972100f0dSGagandeep Singh priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels;
2670ef18dafeSHemant Agrawal
267113b856acSHemant Agrawal DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2672a10a988aSShreyansh Jain priv->num_rx_tc, priv->nb_rx_queues,
267313b856acSHemant Agrawal priv->nb_tx_queues, priv->max_cgs);
26743e5a335dSHemant Agrawal
26753e5a335dSHemant Agrawal priv->hw = dpni_dev;
26763e5a335dSHemant Agrawal priv->hw_id = hw_id;
267733fad432SHemant Agrawal priv->options = attr.options;
267833fad432SHemant Agrawal priv->max_mac_filters = attr.mac_filter_entries;
267933fad432SHemant Agrawal priv->max_vlan_filters = attr.vlan_filter_entries;
26803e5a335dSHemant Agrawal priv->flags = 0;
2681e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
26828d21c563SHemant Agrawal printf("DPDK IEEE1588 is enabled\n");
26838d21c563SHemant Agrawal priv->flags |= DPAA2_TX_CONF_ENABLE;
2684e806bf87SPriyanka Jain #endif
26858d21c563SHemant Agrawal /* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */
26868d21c563SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) {
26878d21c563SHemant Agrawal priv->flags |= DPAA2_TX_CONF_ENABLE;
26888d21c563SHemant Agrawal DPAA2_PMD_INFO("TX_CONF Enabled");
26898d21c563SHemant Agrawal }
26903e5a335dSHemant Agrawal
26914690a611SNipun Gupta if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
26924690a611SNipun Gupta dpaa2_enable_err_queue = 1;
26934690a611SNipun Gupta DPAA2_PMD_INFO("Enable error queue");
26944690a611SNipun Gupta }
26954690a611SNipun Gupta
26963e5a335dSHemant Agrawal /* Allocate memory for hardware structure for queues */
26973e5a335dSHemant Agrawal ret = dpaa2_alloc_rx_tx_queues(eth_dev);
26983e5a335dSHemant Agrawal if (ret) {
2699a10a988aSShreyansh Jain DPAA2_PMD_ERR("Queue allocation Failed");
2700d4984046SHemant Agrawal goto init_err;
27013e5a335dSHemant Agrawal }
27023e5a335dSHemant Agrawal
2703c3e0a706SShreyansh Jain /* Allocate memory for storing MAC addresses.
2704c3e0a706SShreyansh Jain * Table of mac_filter_entries size is allocated so that RTE ether lib
2705c3e0a706SShreyansh Jain * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2706c3e0a706SShreyansh Jain */
270733fad432SHemant Agrawal eth_dev->data->mac_addrs = rte_zmalloc("dpni",
270835b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
270933fad432SHemant Agrawal if (eth_dev->data->mac_addrs == NULL) {
2710a10a988aSShreyansh Jain DPAA2_PMD_ERR(
2711d4984046SHemant Agrawal "Failed to allocate %d bytes needed to store MAC addresses",
271235b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2713d4984046SHemant Agrawal ret = -ENOMEM;
2714d4984046SHemant Agrawal goto init_err;
271533fad432SHemant Agrawal }
271633fad432SHemant Agrawal
2717c3e0a706SShreyansh Jain ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]);
271833fad432SHemant Agrawal if (ret) {
2719c3e0a706SShreyansh Jain DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2720c3e0a706SShreyansh Jain rte_free(eth_dev->data->mac_addrs);
2721c3e0a706SShreyansh Jain eth_dev->data->mac_addrs = NULL;
2722d4984046SHemant Agrawal goto init_err;
272333fad432SHemant Agrawal }
272433fad432SHemant Agrawal
2725bee61d86SHemant Agrawal /* ... tx buffer layout ... */
2726bee61d86SHemant Agrawal memset(&layout, 0, sizeof(struct dpni_buffer_layout));
27278d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) {
27289ceacab7SPriyanka Jain layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
27299ceacab7SPriyanka Jain DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
27309ceacab7SPriyanka Jain layout.pass_timestamp = true;
27319ceacab7SPriyanka Jain } else {
2732bee61d86SHemant Agrawal layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
27339ceacab7SPriyanka Jain }
2734bee61d86SHemant Agrawal layout.pass_frame_status = 1;
2735bee61d86SHemant Agrawal ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2736bee61d86SHemant Agrawal DPNI_QUEUE_TX, &layout);
2737bee61d86SHemant Agrawal if (ret) {
2738a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2739d4984046SHemant Agrawal goto init_err;
2740bee61d86SHemant Agrawal }
2741bee61d86SHemant Agrawal
2742bee61d86SHemant Agrawal /* ... tx-conf and error buffer layout ... */
2743bee61d86SHemant Agrawal memset(&layout, 0, sizeof(struct dpni_buffer_layout));
27448d21c563SHemant Agrawal if (priv->flags & DPAA2_TX_CONF_ENABLE) {
27458d21c563SHemant Agrawal layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
27469ceacab7SPriyanka Jain layout.pass_timestamp = true;
27479ceacab7SPriyanka Jain }
27488d21c563SHemant Agrawal layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2749bee61d86SHemant Agrawal layout.pass_frame_status = 1;
2750bee61d86SHemant Agrawal ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2751bee61d86SHemant Agrawal DPNI_QUEUE_TX_CONFIRM, &layout);
2752bee61d86SHemant Agrawal if (ret) {
2753a10a988aSShreyansh Jain DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2754d4984046SHemant Agrawal ret);
2755d4984046SHemant Agrawal goto init_err;
2756bee61d86SHemant Agrawal }
2757bee61d86SHemant Agrawal
27583e5a335dSHemant Agrawal eth_dev->dev_ops = &dpaa2_ethdev_ops;
2759c147eae0SHemant Agrawal
2760a3a997f0SHemant Agrawal if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2761a3a997f0SHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2762a3a997f0SHemant Agrawal DPAA2_PMD_INFO("Loopback mode");
276320191ab3SNipun Gupta } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
276420191ab3SNipun Gupta eth_dev->rx_pkt_burst = dpaa2_dev_rx;
276520191ab3SNipun Gupta DPAA2_PMD_INFO("No Prefetch mode");
2766a3a997f0SHemant Agrawal } else {
27675c6942fdSHemant Agrawal eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2768a3a997f0SHemant Agrawal }
2769cd9935ceSHemant Agrawal eth_dev->tx_pkt_burst = dpaa2_dev_tx;
27701261cd68SHemant Agrawal
27717be78d02SJosh Soref /* Init fields w.r.t. classification */
27725f176728SJun Yang memset(&priv->extract.qos_key_extract, 0,
27735f176728SJun Yang sizeof(struct dpaa2_key_extract));
2774fe2b986aSSunil Kumar Kori priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2775fe2b986aSSunil Kumar Kori if (!priv->extract.qos_extract_param) {
2776fe2b986aSSunil Kumar Kori DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
27777be78d02SJosh Soref " classification ", ret);
2778fe2b986aSSunil Kumar Kori goto init_err;
2779fe2b986aSSunil Kumar Kori }
27805f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv4_src_offset =
27815f176728SJun Yang IP_ADDRESS_OFFSET_INVALID;
27825f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
27835f176728SJun Yang IP_ADDRESS_OFFSET_INVALID;
27845f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv6_src_offset =
27855f176728SJun Yang IP_ADDRESS_OFFSET_INVALID;
27865f176728SJun Yang priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
27875f176728SJun Yang IP_ADDRESS_OFFSET_INVALID;
27885f176728SJun Yang
2789fe2b986aSSunil Kumar Kori for (i = 0; i < MAX_TCS; i++) {
27905f176728SJun Yang memset(&priv->extract.tc_key_extract[i], 0,
27915f176728SJun Yang sizeof(struct dpaa2_key_extract));
27925f176728SJun Yang priv->extract.tc_extract_param[i] =
2793fe2b986aSSunil Kumar Kori (size_t)rte_malloc(NULL, 256, 64);
27945f176728SJun Yang if (!priv->extract.tc_extract_param[i]) {
27957be78d02SJosh Soref DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification",
2796fe2b986aSSunil Kumar Kori ret);
2797fe2b986aSSunil Kumar Kori goto init_err;
2798fe2b986aSSunil Kumar Kori }
27995f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
28005f176728SJun Yang IP_ADDRESS_OFFSET_INVALID;
28015f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
28025f176728SJun Yang IP_ADDRESS_OFFSET_INVALID;
28035f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
28045f176728SJun Yang IP_ADDRESS_OFFSET_INVALID;
28055f176728SJun Yang priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
28065f176728SJun Yang IP_ADDRESS_OFFSET_INVALID;
2807fe2b986aSSunil Kumar Kori }
2808fe2b986aSSunil Kumar Kori
28096f8be0fbSHemant Agrawal ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
28106f8be0fbSHemant Agrawal RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN
28116f8be0fbSHemant Agrawal + VLAN_TAG_SIZE);
28126f8be0fbSHemant Agrawal if (ret) {
28136f8be0fbSHemant Agrawal DPAA2_PMD_ERR("Unable to set mtu. check config");
28146f8be0fbSHemant Agrawal goto init_err;
28156f8be0fbSHemant Agrawal }
28166f8be0fbSHemant Agrawal
281772ec7a67SSunil Kumar Kori /*TODO To enable soft parser support DPAA2 driver needs to integrate
281872ec7a67SSunil Kumar Kori * with external entity to receive byte code for software sequence
281972ec7a67SSunil Kumar Kori * and same will be offload to the H/W using MC interface.
282072ec7a67SSunil Kumar Kori * Currently it is assumed that DPAA2 driver has byte code by some
282172ec7a67SSunil Kumar Kori * mean and same if offloaded to H/W.
282272ec7a67SSunil Kumar Kori */
282372ec7a67SSunil Kumar Kori if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
282472ec7a67SSunil Kumar Kori WRIOP_SS_INITIALIZER(priv);
282572ec7a67SSunil Kumar Kori ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
282672ec7a67SSunil Kumar Kori if (ret < 0) {
282772ec7a67SSunil Kumar Kori DPAA2_PMD_ERR(" Error(%d) in loading softparser\n",
282872ec7a67SSunil Kumar Kori ret);
282972ec7a67SSunil Kumar Kori return ret;
283072ec7a67SSunil Kumar Kori }
283172ec7a67SSunil Kumar Kori
283272ec7a67SSunil Kumar Kori ret = dpaa2_eth_enable_wriop_soft_parser(priv,
283372ec7a67SSunil Kumar Kori DPNI_SS_INGRESS);
283472ec7a67SSunil Kumar Kori if (ret < 0) {
283572ec7a67SSunil Kumar Kori DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n",
283672ec7a67SSunil Kumar Kori ret);
283772ec7a67SSunil Kumar Kori return ret;
283872ec7a67SSunil Kumar Kori }
283972ec7a67SSunil Kumar Kori }
2840f023d059SJun Yang RTE_LOG(INFO, PMD, "%s: netdev created, connected to %s\n",
2841f023d059SJun Yang eth_dev->data->name, dpaa2_dev->ep_name);
2842f023d059SJun Yang
2843c147eae0SHemant Agrawal return 0;
2844d4984046SHemant Agrawal init_err:
28453e5a335dSHemant Agrawal dpaa2_dev_close(eth_dev);
28463e5a335dSHemant Agrawal
28475964d36aSSachin Saxena return ret;
2848c147eae0SHemant Agrawal }
2849c147eae0SHemant Agrawal
dpaa2_dev_is_dpaa2(struct rte_eth_dev * dev)2850028d1dfdSJun Yang int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev)
2851028d1dfdSJun Yang {
2852028d1dfdSJun Yang return dev->device->driver == &rte_dpaa2_pmd.driver;
2853028d1dfdSJun Yang }
2854028d1dfdSJun Yang
2855c147eae0SHemant Agrawal static int
rte_dpaa2_probe(struct rte_dpaa2_driver * dpaa2_drv,struct rte_dpaa2_device * dpaa2_dev)285655fd2703SHemant Agrawal rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2857c147eae0SHemant Agrawal struct rte_dpaa2_device *dpaa2_dev)
2858c147eae0SHemant Agrawal {
2859c147eae0SHemant Agrawal struct rte_eth_dev *eth_dev;
286081c42c84SShreyansh Jain struct dpaa2_dev_priv *dev_priv;
2861c147eae0SHemant Agrawal int diag;
2862c147eae0SHemant Agrawal
2863f4435e38SHemant Agrawal if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2864f4435e38SHemant Agrawal RTE_PKTMBUF_HEADROOM) {
2865f4435e38SHemant Agrawal DPAA2_PMD_ERR(
2866f4435e38SHemant Agrawal "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2867f4435e38SHemant Agrawal RTE_PKTMBUF_HEADROOM,
2868f4435e38SHemant Agrawal DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2869f4435e38SHemant Agrawal
2870f4435e38SHemant Agrawal return -1;
2871f4435e38SHemant Agrawal }
2872f4435e38SHemant Agrawal
2873c147eae0SHemant Agrawal if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2874e729ec76SHemant Agrawal eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2875e729ec76SHemant Agrawal if (!eth_dev)
2876e729ec76SHemant Agrawal return -ENODEV;
287781c42c84SShreyansh Jain dev_priv = rte_zmalloc("ethdev private structure",
2878c147eae0SHemant Agrawal sizeof(struct dpaa2_dev_priv),
2879c147eae0SHemant Agrawal RTE_CACHE_LINE_SIZE);
288081c42c84SShreyansh Jain if (dev_priv == NULL) {
2881a10a988aSShreyansh Jain DPAA2_PMD_CRIT(
2882a10a988aSShreyansh Jain "Unable to allocate memory for private data");
2883c147eae0SHemant Agrawal rte_eth_dev_release_port(eth_dev);
2884c147eae0SHemant Agrawal return -ENOMEM;
2885c147eae0SHemant Agrawal }
288681c42c84SShreyansh Jain eth_dev->data->dev_private = (void *)dev_priv;
288781c42c84SShreyansh Jain /* Store a pointer to eth_dev in dev_private */
288881c42c84SShreyansh Jain dev_priv->eth_dev = eth_dev;
2889e729ec76SHemant Agrawal } else {
2890e729ec76SHemant Agrawal eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
289181c42c84SShreyansh Jain if (!eth_dev) {
289281c42c84SShreyansh Jain DPAA2_PMD_DEBUG("returning enodev");
2893e729ec76SHemant Agrawal return -ENODEV;
2894c147eae0SHemant Agrawal }
289581c42c84SShreyansh Jain }
2896e729ec76SHemant Agrawal
2897c147eae0SHemant Agrawal eth_dev->device = &dpaa2_dev->device;
289855fd2703SHemant Agrawal
2899c147eae0SHemant Agrawal dpaa2_dev->eth_dev = eth_dev;
2900c147eae0SHemant Agrawal eth_dev->data->rx_mbuf_alloc_failed = 0;
2901c147eae0SHemant Agrawal
290292b7e33eSHemant Agrawal if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
290392b7e33eSHemant Agrawal eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
290492b7e33eSHemant Agrawal
2905f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2906f30e69b4SFerruh Yigit
2907c147eae0SHemant Agrawal /* Invoke PMD device initialization function */
2908c147eae0SHemant Agrawal diag = dpaa2_dev_init(eth_dev);
2909fbe90cddSThomas Monjalon if (diag == 0) {
2910fbe90cddSThomas Monjalon rte_eth_dev_probing_finish(eth_dev);
2911c147eae0SHemant Agrawal return 0;
2912fbe90cddSThomas Monjalon }
2913c147eae0SHemant Agrawal
2914c147eae0SHemant Agrawal rte_eth_dev_release_port(eth_dev);
2915c147eae0SHemant Agrawal return diag;
2916c147eae0SHemant Agrawal }
2917c147eae0SHemant Agrawal
2918c147eae0SHemant Agrawal static int
rte_dpaa2_remove(struct rte_dpaa2_device * dpaa2_dev)2919c147eae0SHemant Agrawal rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2920c147eae0SHemant Agrawal {
2921c147eae0SHemant Agrawal struct rte_eth_dev *eth_dev;
29225964d36aSSachin Saxena int ret;
2923c147eae0SHemant Agrawal
2924c147eae0SHemant Agrawal eth_dev = dpaa2_dev->eth_dev;
29255964d36aSSachin Saxena dpaa2_dev_close(eth_dev);
29265964d36aSSachin Saxena ret = rte_eth_dev_release_port(eth_dev);
2927c147eae0SHemant Agrawal
29285964d36aSSachin Saxena return ret;
2929c147eae0SHemant Agrawal }
2930c147eae0SHemant Agrawal
2931c147eae0SHemant Agrawal static struct rte_dpaa2_driver rte_dpaa2_pmd = {
293292b7e33eSHemant Agrawal .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2933bad555dfSShreyansh Jain .drv_type = DPAA2_ETH,
2934c147eae0SHemant Agrawal .probe = rte_dpaa2_probe,
2935c147eae0SHemant Agrawal .remove = rte_dpaa2_remove,
2936c147eae0SHemant Agrawal };
2937c147eae0SHemant Agrawal
29384ed8a733SVanshika Shukla RTE_PMD_REGISTER_DPAA2(NET_DPAA2_PMD_DRIVER_NAME, rte_dpaa2_pmd);
29394ed8a733SVanshika Shukla RTE_PMD_REGISTER_PARAM_STRING(NET_DPAA2_PMD_DRIVER_NAME,
294020191ab3SNipun Gupta DRIVER_LOOPBACK_MODE "=<int> "
29418d21c563SHemant Agrawal DRIVER_NO_PREFETCH_MODE "=<int>"
29424690a611SNipun Gupta DRIVER_TX_CONF "=<int>"
29434690a611SNipun Gupta DRIVER_ERROR_QUEUE "=<int>");
2944eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE);
2945