1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
22bfe3f2eSlogwang *
32bfe3f2eSlogwang * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4*2d9fd380Sjfb8856606 * Copyright 2017-2020 NXP
52bfe3f2eSlogwang *
62bfe3f2eSlogwang */
72bfe3f2eSlogwang /* System headers */
82bfe3f2eSlogwang #include <stdio.h>
92bfe3f2eSlogwang #include <inttypes.h>
102bfe3f2eSlogwang #include <unistd.h>
112bfe3f2eSlogwang #include <limits.h>
122bfe3f2eSlogwang #include <sched.h>
132bfe3f2eSlogwang #include <signal.h>
142bfe3f2eSlogwang #include <pthread.h>
152bfe3f2eSlogwang #include <sys/types.h>
162bfe3f2eSlogwang #include <sys/syscall.h>
172bfe3f2eSlogwang
184418919fSjohnjiang #include <rte_string_fns.h>
192bfe3f2eSlogwang #include <rte_byteorder.h>
202bfe3f2eSlogwang #include <rte_common.h>
212bfe3f2eSlogwang #include <rte_interrupts.h>
222bfe3f2eSlogwang #include <rte_log.h>
232bfe3f2eSlogwang #include <rte_debug.h>
242bfe3f2eSlogwang #include <rte_pci.h>
252bfe3f2eSlogwang #include <rte_atomic.h>
262bfe3f2eSlogwang #include <rte_branch_prediction.h>
272bfe3f2eSlogwang #include <rte_memory.h>
282bfe3f2eSlogwang #include <rte_tailq.h>
292bfe3f2eSlogwang #include <rte_eal.h>
302bfe3f2eSlogwang #include <rte_alarm.h>
312bfe3f2eSlogwang #include <rte_ether.h>
32d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
332bfe3f2eSlogwang #include <rte_malloc.h>
342bfe3f2eSlogwang #include <rte_ring.h>
352bfe3f2eSlogwang
362bfe3f2eSlogwang #include <rte_dpaa_bus.h>
372bfe3f2eSlogwang #include <rte_dpaa_logs.h>
382bfe3f2eSlogwang #include <dpaa_mempool.h>
392bfe3f2eSlogwang
402bfe3f2eSlogwang #include <dpaa_ethdev.h>
412bfe3f2eSlogwang #include <dpaa_rxtx.h>
42*2d9fd380Sjfb8856606 #include <dpaa_flow.h>
43d30ea906Sjfb8856606 #include <rte_pmd_dpaa.h>
442bfe3f2eSlogwang
452bfe3f2eSlogwang #include <fsl_usd.h>
462bfe3f2eSlogwang #include <fsl_qman.h>
472bfe3f2eSlogwang #include <fsl_bman.h>
482bfe3f2eSlogwang #include <fsl_fman.h>
49*2d9fd380Sjfb8856606 #include <process.h>
50*2d9fd380Sjfb8856606 #include <fmlib/fm_ext.h>
512bfe3f2eSlogwang
52d30ea906Sjfb8856606 /* Supported Rx offloads */
53d30ea906Sjfb8856606 static uint64_t dev_rx_offloads_sup =
54d30ea906Sjfb8856606 DEV_RX_OFFLOAD_JUMBO_FRAME |
55d30ea906Sjfb8856606 DEV_RX_OFFLOAD_SCATTER;
56d30ea906Sjfb8856606
57d30ea906Sjfb8856606 /* Rx offloads which cannot be disabled */
58d30ea906Sjfb8856606 static uint64_t dev_rx_offloads_nodis =
59d30ea906Sjfb8856606 DEV_RX_OFFLOAD_IPV4_CKSUM |
60d30ea906Sjfb8856606 DEV_RX_OFFLOAD_UDP_CKSUM |
61d30ea906Sjfb8856606 DEV_RX_OFFLOAD_TCP_CKSUM |
624418919fSjohnjiang DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
634418919fSjohnjiang DEV_RX_OFFLOAD_RSS_HASH;
64d30ea906Sjfb8856606
65d30ea906Sjfb8856606 /* Supported Tx offloads */
664418919fSjohnjiang static uint64_t dev_tx_offloads_sup =
674418919fSjohnjiang DEV_TX_OFFLOAD_MT_LOCKFREE |
684418919fSjohnjiang DEV_TX_OFFLOAD_MBUF_FAST_FREE;
69d30ea906Sjfb8856606
70d30ea906Sjfb8856606 /* Tx offloads which cannot be disabled */
71d30ea906Sjfb8856606 static uint64_t dev_tx_offloads_nodis =
72d30ea906Sjfb8856606 DEV_TX_OFFLOAD_IPV4_CKSUM |
73d30ea906Sjfb8856606 DEV_TX_OFFLOAD_UDP_CKSUM |
74d30ea906Sjfb8856606 DEV_TX_OFFLOAD_TCP_CKSUM |
75d30ea906Sjfb8856606 DEV_TX_OFFLOAD_SCTP_CKSUM |
76d30ea906Sjfb8856606 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
774418919fSjohnjiang DEV_TX_OFFLOAD_MULTI_SEGS;
78d30ea906Sjfb8856606
792bfe3f2eSlogwang /* Keep track of whether QMAN and BMAN have been globally initialized */
802bfe3f2eSlogwang static int is_global_init;
81*2d9fd380Sjfb8856606 static int fmc_q = 1; /* Indicates the use of static fmc for distribution */
82d30ea906Sjfb8856606 static int default_q; /* use default queue - FMC is not executed*/
83d30ea906Sjfb8856606 /* At present we only allow up to 4 push mode queues as default - as each of
84d30ea906Sjfb8856606 * this queue need dedicated portal and we are short of portals.
85d30ea906Sjfb8856606 */
86d30ea906Sjfb8856606 #define DPAA_MAX_PUSH_MODE_QUEUE 8
87d30ea906Sjfb8856606 #define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
88d30ea906Sjfb8856606
89d30ea906Sjfb8856606 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
90d30ea906Sjfb8856606 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
91d30ea906Sjfb8856606
92d30ea906Sjfb8856606
93*2d9fd380Sjfb8856606 /* Per RX FQ Taildrop in frame count */
94d30ea906Sjfb8856606 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
952bfe3f2eSlogwang
96*2d9fd380Sjfb8856606 /* Per TX FQ Taildrop in frame count, disabled by default */
97*2d9fd380Sjfb8856606 static unsigned int td_tx_threshold;
98*2d9fd380Sjfb8856606
992bfe3f2eSlogwang struct rte_dpaa_xstats_name_off {
1002bfe3f2eSlogwang char name[RTE_ETH_XSTATS_NAME_SIZE];
1012bfe3f2eSlogwang uint32_t offset;
1022bfe3f2eSlogwang };
1032bfe3f2eSlogwang
1042bfe3f2eSlogwang static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
1052bfe3f2eSlogwang {"rx_align_err",
1062bfe3f2eSlogwang offsetof(struct dpaa_if_stats, raln)},
1072bfe3f2eSlogwang {"rx_valid_pause",
1082bfe3f2eSlogwang offsetof(struct dpaa_if_stats, rxpf)},
1092bfe3f2eSlogwang {"rx_fcs_err",
1102bfe3f2eSlogwang offsetof(struct dpaa_if_stats, rfcs)},
1112bfe3f2eSlogwang {"rx_vlan_frame",
1122bfe3f2eSlogwang offsetof(struct dpaa_if_stats, rvlan)},
1132bfe3f2eSlogwang {"rx_frame_err",
1142bfe3f2eSlogwang offsetof(struct dpaa_if_stats, rerr)},
1152bfe3f2eSlogwang {"rx_drop_err",
1162bfe3f2eSlogwang offsetof(struct dpaa_if_stats, rdrp)},
1172bfe3f2eSlogwang {"rx_undersized",
1182bfe3f2eSlogwang offsetof(struct dpaa_if_stats, rund)},
1192bfe3f2eSlogwang {"rx_oversize_err",
1202bfe3f2eSlogwang offsetof(struct dpaa_if_stats, rovr)},
1212bfe3f2eSlogwang {"rx_fragment_pkt",
1222bfe3f2eSlogwang offsetof(struct dpaa_if_stats, rfrg)},
1232bfe3f2eSlogwang {"tx_valid_pause",
1242bfe3f2eSlogwang offsetof(struct dpaa_if_stats, txpf)},
1252bfe3f2eSlogwang {"tx_fcs_err",
1262bfe3f2eSlogwang offsetof(struct dpaa_if_stats, terr)},
1272bfe3f2eSlogwang {"tx_vlan_frame",
1282bfe3f2eSlogwang offsetof(struct dpaa_if_stats, tvlan)},
1292bfe3f2eSlogwang {"rx_undersized",
1302bfe3f2eSlogwang offsetof(struct dpaa_if_stats, tund)},
1312bfe3f2eSlogwang };
1322bfe3f2eSlogwang
133d30ea906Sjfb8856606 static struct rte_dpaa_driver rte_dpaa_pmd;
134d30ea906Sjfb8856606
1354418919fSjohnjiang static int
136d30ea906Sjfb8856606 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
137d30ea906Sjfb8856606
138*2d9fd380Sjfb8856606 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
139*2d9fd380Sjfb8856606 int wait_to_complete __rte_unused);
140*2d9fd380Sjfb8856606
141*2d9fd380Sjfb8856606 static void dpaa_interrupt_handler(void *param);
142*2d9fd380Sjfb8856606
143d30ea906Sjfb8856606 static inline void
dpaa_poll_queue_default_config(struct qm_mcc_initfq * opts)144d30ea906Sjfb8856606 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
145d30ea906Sjfb8856606 {
146d30ea906Sjfb8856606 memset(opts, 0, sizeof(struct qm_mcc_initfq));
147d30ea906Sjfb8856606 opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
148d30ea906Sjfb8856606 opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
149d30ea906Sjfb8856606 QM_FQCTRL_PREFERINCACHE;
150d30ea906Sjfb8856606 opts->fqd.context_a.stashing.exclusive = 0;
151d30ea906Sjfb8856606 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
152d30ea906Sjfb8856606 opts->fqd.context_a.stashing.annotation_cl =
153d30ea906Sjfb8856606 DPAA_IF_RX_ANNOTATION_STASH;
154d30ea906Sjfb8856606 opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
155d30ea906Sjfb8856606 opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
156d30ea906Sjfb8856606 }
157d30ea906Sjfb8856606
1582bfe3f2eSlogwang static int
dpaa_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)1592bfe3f2eSlogwang dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1602bfe3f2eSlogwang {
1614418919fSjohnjiang uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
162d30ea906Sjfb8856606 + VLAN_TAG_SIZE;
163d30ea906Sjfb8856606 uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
1642bfe3f2eSlogwang
1652bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
1662bfe3f2eSlogwang
1674418919fSjohnjiang if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
1682bfe3f2eSlogwang return -EINVAL;
169d30ea906Sjfb8856606 /*
170d30ea906Sjfb8856606 * Refuse mtu that requires the support of scattered packets
171d30ea906Sjfb8856606 * when this feature has not been enabled before.
172d30ea906Sjfb8856606 */
173d30ea906Sjfb8856606 if (dev->data->min_rx_buf_size &&
174d30ea906Sjfb8856606 !dev->data->scattered_rx && frame_size > buffsz) {
175d30ea906Sjfb8856606 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
176d30ea906Sjfb8856606 return -EINVAL;
177d30ea906Sjfb8856606 }
178d30ea906Sjfb8856606
179d30ea906Sjfb8856606 /* check <seg size> * <max_seg> >= max_frame */
180d30ea906Sjfb8856606 if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
181d30ea906Sjfb8856606 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
182d30ea906Sjfb8856606 DPAA_PMD_ERR("Too big to fit for Max SG list %d",
183d30ea906Sjfb8856606 buffsz * DPAA_SGT_MAX_ENTRIES);
184d30ea906Sjfb8856606 return -EINVAL;
185d30ea906Sjfb8856606 }
186d30ea906Sjfb8856606
1874418919fSjohnjiang if (frame_size > RTE_ETHER_MAX_LEN)
1884418919fSjohnjiang dev->data->dev_conf.rxmode.offloads |=
189d30ea906Sjfb8856606 DEV_RX_OFFLOAD_JUMBO_FRAME;
1902bfe3f2eSlogwang else
191d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.offloads &=
192d30ea906Sjfb8856606 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1932bfe3f2eSlogwang
194d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1952bfe3f2eSlogwang
196*2d9fd380Sjfb8856606 fman_if_set_maxfrm(dev->process_private, frame_size);
1972bfe3f2eSlogwang
1982bfe3f2eSlogwang return 0;
1992bfe3f2eSlogwang }
2002bfe3f2eSlogwang
2012bfe3f2eSlogwang static int
dpaa_eth_dev_configure(struct rte_eth_dev * dev)202d30ea906Sjfb8856606 dpaa_eth_dev_configure(struct rte_eth_dev *dev)
2032bfe3f2eSlogwang {
204d30ea906Sjfb8856606 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
205d30ea906Sjfb8856606 uint64_t rx_offloads = eth_conf->rxmode.offloads;
206d30ea906Sjfb8856606 uint64_t tx_offloads = eth_conf->txmode.offloads;
207*2d9fd380Sjfb8856606 struct rte_device *rdev = dev->device;
208*2d9fd380Sjfb8856606 struct rte_eth_link *link = &dev->data->dev_link;
209*2d9fd380Sjfb8856606 struct rte_dpaa_device *dpaa_dev;
210*2d9fd380Sjfb8856606 struct fman_if *fif = dev->process_private;
211*2d9fd380Sjfb8856606 struct __fman_if *__fif;
212*2d9fd380Sjfb8856606 struct rte_intr_handle *intr_handle;
213*2d9fd380Sjfb8856606 int speed, duplex;
214*2d9fd380Sjfb8856606 int ret;
215d30ea906Sjfb8856606
2162bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
2172bfe3f2eSlogwang
218*2d9fd380Sjfb8856606 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
219*2d9fd380Sjfb8856606 intr_handle = &dpaa_dev->intr_handle;
220*2d9fd380Sjfb8856606 __fif = container_of(fif, struct __fman_if, __if);
221*2d9fd380Sjfb8856606
2224418919fSjohnjiang /* Rx offloads which are enabled by default */
223d30ea906Sjfb8856606 if (dev_rx_offloads_nodis & ~rx_offloads) {
2244418919fSjohnjiang DPAA_PMD_INFO(
2254418919fSjohnjiang "Some of rx offloads enabled by default - requested 0x%" PRIx64
2264418919fSjohnjiang " fixed are 0x%" PRIx64,
227d30ea906Sjfb8856606 rx_offloads, dev_rx_offloads_nodis);
228d30ea906Sjfb8856606 }
229d30ea906Sjfb8856606
2304418919fSjohnjiang /* Tx offloads which are enabled by default */
231d30ea906Sjfb8856606 if (dev_tx_offloads_nodis & ~tx_offloads) {
2324418919fSjohnjiang DPAA_PMD_INFO(
2334418919fSjohnjiang "Some of tx offloads enabled by default - requested 0x%" PRIx64
2344418919fSjohnjiang " fixed are 0x%" PRIx64,
235d30ea906Sjfb8856606 tx_offloads, dev_tx_offloads_nodis);
236d30ea906Sjfb8856606 }
237d30ea906Sjfb8856606
238d30ea906Sjfb8856606 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
239d30ea906Sjfb8856606 uint32_t max_len;
240d30ea906Sjfb8856606
241d30ea906Sjfb8856606 DPAA_PMD_DEBUG("enabling jumbo");
242d30ea906Sjfb8856606
2432bfe3f2eSlogwang if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
2442bfe3f2eSlogwang DPAA_MAX_RX_PKT_LEN)
245d30ea906Sjfb8856606 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
246d30ea906Sjfb8856606 else {
247d30ea906Sjfb8856606 DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
248d30ea906Sjfb8856606 "supported is %d",
249d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.max_rx_pkt_len,
250d30ea906Sjfb8856606 DPAA_MAX_RX_PKT_LEN);
251d30ea906Sjfb8856606 max_len = DPAA_MAX_RX_PKT_LEN;
2522bfe3f2eSlogwang }
253d30ea906Sjfb8856606
254*2d9fd380Sjfb8856606 fman_if_set_maxfrm(dev->process_private, max_len);
255d30ea906Sjfb8856606 dev->data->mtu = max_len
2564418919fSjohnjiang - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
257d30ea906Sjfb8856606 }
258d30ea906Sjfb8856606
259d30ea906Sjfb8856606 if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
260d30ea906Sjfb8856606 DPAA_PMD_DEBUG("enabling scatter mode");
261*2d9fd380Sjfb8856606 fman_if_set_sg(dev->process_private, 1);
262d30ea906Sjfb8856606 dev->data->scattered_rx = 1;
263d30ea906Sjfb8856606 }
264d30ea906Sjfb8856606
265*2d9fd380Sjfb8856606 if (!(default_q || fmc_q)) {
266*2d9fd380Sjfb8856606 if (dpaa_fm_config(dev,
267*2d9fd380Sjfb8856606 eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
268*2d9fd380Sjfb8856606 dpaa_write_fm_config_to_file();
269*2d9fd380Sjfb8856606 DPAA_PMD_ERR("FM port configuration: Failed\n");
270*2d9fd380Sjfb8856606 return -1;
271*2d9fd380Sjfb8856606 }
272*2d9fd380Sjfb8856606 dpaa_write_fm_config_to_file();
273*2d9fd380Sjfb8856606 }
274*2d9fd380Sjfb8856606
275*2d9fd380Sjfb8856606 /* if the interrupts were configured on this devices*/
276*2d9fd380Sjfb8856606 if (intr_handle && intr_handle->fd) {
277*2d9fd380Sjfb8856606 if (dev->data->dev_conf.intr_conf.lsc != 0)
278*2d9fd380Sjfb8856606 rte_intr_callback_register(intr_handle,
279*2d9fd380Sjfb8856606 dpaa_interrupt_handler,
280*2d9fd380Sjfb8856606 (void *)dev);
281*2d9fd380Sjfb8856606
282*2d9fd380Sjfb8856606 ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd);
283*2d9fd380Sjfb8856606 if (ret) {
284*2d9fd380Sjfb8856606 if (dev->data->dev_conf.intr_conf.lsc != 0) {
285*2d9fd380Sjfb8856606 rte_intr_callback_unregister(intr_handle,
286*2d9fd380Sjfb8856606 dpaa_interrupt_handler,
287*2d9fd380Sjfb8856606 (void *)dev);
288*2d9fd380Sjfb8856606 if (ret == EINVAL)
289*2d9fd380Sjfb8856606 printf("Failed to enable interrupt: Not Supported\n");
290*2d9fd380Sjfb8856606 else
291*2d9fd380Sjfb8856606 printf("Failed to enable interrupt\n");
292*2d9fd380Sjfb8856606 }
293*2d9fd380Sjfb8856606 dev->data->dev_conf.intr_conf.lsc = 0;
294*2d9fd380Sjfb8856606 dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
295*2d9fd380Sjfb8856606 }
296*2d9fd380Sjfb8856606 }
297*2d9fd380Sjfb8856606
298*2d9fd380Sjfb8856606 /* Wait for link status to get updated */
299*2d9fd380Sjfb8856606 if (!link->link_status)
300*2d9fd380Sjfb8856606 sleep(1);
301*2d9fd380Sjfb8856606
302*2d9fd380Sjfb8856606 /* Configure link only if link is UP*/
303*2d9fd380Sjfb8856606 if (link->link_status) {
304*2d9fd380Sjfb8856606 if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
305*2d9fd380Sjfb8856606 /* Start autoneg only if link is not in autoneg mode */
306*2d9fd380Sjfb8856606 if (!link->link_autoneg)
307*2d9fd380Sjfb8856606 dpaa_restart_link_autoneg(__fif->node_name);
308*2d9fd380Sjfb8856606 } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
309*2d9fd380Sjfb8856606 switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
310*2d9fd380Sjfb8856606 case ETH_LINK_SPEED_10M_HD:
311*2d9fd380Sjfb8856606 speed = ETH_SPEED_NUM_10M;
312*2d9fd380Sjfb8856606 duplex = ETH_LINK_HALF_DUPLEX;
313*2d9fd380Sjfb8856606 break;
314*2d9fd380Sjfb8856606 case ETH_LINK_SPEED_10M:
315*2d9fd380Sjfb8856606 speed = ETH_SPEED_NUM_10M;
316*2d9fd380Sjfb8856606 duplex = ETH_LINK_FULL_DUPLEX;
317*2d9fd380Sjfb8856606 break;
318*2d9fd380Sjfb8856606 case ETH_LINK_SPEED_100M_HD:
319*2d9fd380Sjfb8856606 speed = ETH_SPEED_NUM_100M;
320*2d9fd380Sjfb8856606 duplex = ETH_LINK_HALF_DUPLEX;
321*2d9fd380Sjfb8856606 break;
322*2d9fd380Sjfb8856606 case ETH_LINK_SPEED_100M:
323*2d9fd380Sjfb8856606 speed = ETH_SPEED_NUM_100M;
324*2d9fd380Sjfb8856606 duplex = ETH_LINK_FULL_DUPLEX;
325*2d9fd380Sjfb8856606 break;
326*2d9fd380Sjfb8856606 case ETH_LINK_SPEED_1G:
327*2d9fd380Sjfb8856606 speed = ETH_SPEED_NUM_1G;
328*2d9fd380Sjfb8856606 duplex = ETH_LINK_FULL_DUPLEX;
329*2d9fd380Sjfb8856606 break;
330*2d9fd380Sjfb8856606 case ETH_LINK_SPEED_2_5G:
331*2d9fd380Sjfb8856606 speed = ETH_SPEED_NUM_2_5G;
332*2d9fd380Sjfb8856606 duplex = ETH_LINK_FULL_DUPLEX;
333*2d9fd380Sjfb8856606 break;
334*2d9fd380Sjfb8856606 case ETH_LINK_SPEED_10G:
335*2d9fd380Sjfb8856606 speed = ETH_SPEED_NUM_10G;
336*2d9fd380Sjfb8856606 duplex = ETH_LINK_FULL_DUPLEX;
337*2d9fd380Sjfb8856606 break;
338*2d9fd380Sjfb8856606 default:
339*2d9fd380Sjfb8856606 speed = ETH_SPEED_NUM_NONE;
340*2d9fd380Sjfb8856606 duplex = ETH_LINK_FULL_DUPLEX;
341*2d9fd380Sjfb8856606 break;
342*2d9fd380Sjfb8856606 }
343*2d9fd380Sjfb8856606 /* Set link speed */
344*2d9fd380Sjfb8856606 dpaa_update_link_speed(__fif->node_name, speed, duplex);
345*2d9fd380Sjfb8856606 } else {
346*2d9fd380Sjfb8856606 /* Manual autoneg - custom advertisement speed. */
347*2d9fd380Sjfb8856606 printf("Custom Advertisement speeds not supported\n");
348*2d9fd380Sjfb8856606 }
349*2d9fd380Sjfb8856606 }
350*2d9fd380Sjfb8856606
3512bfe3f2eSlogwang return 0;
3522bfe3f2eSlogwang }
3532bfe3f2eSlogwang
3542bfe3f2eSlogwang static const uint32_t *
dpaa_supported_ptypes_get(struct rte_eth_dev * dev)3552bfe3f2eSlogwang dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
3562bfe3f2eSlogwang {
3572bfe3f2eSlogwang static const uint32_t ptypes[] = {
3582bfe3f2eSlogwang RTE_PTYPE_L2_ETHER,
3594418919fSjohnjiang RTE_PTYPE_L2_ETHER_VLAN,
3604418919fSjohnjiang RTE_PTYPE_L2_ETHER_ARP,
3614418919fSjohnjiang RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3624418919fSjohnjiang RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3634418919fSjohnjiang RTE_PTYPE_L4_ICMP,
3644418919fSjohnjiang RTE_PTYPE_L4_TCP,
3654418919fSjohnjiang RTE_PTYPE_L4_UDP,
3664418919fSjohnjiang RTE_PTYPE_L4_FRAG,
3672bfe3f2eSlogwang RTE_PTYPE_L4_TCP,
3682bfe3f2eSlogwang RTE_PTYPE_L4_UDP,
3692bfe3f2eSlogwang RTE_PTYPE_L4_SCTP
3702bfe3f2eSlogwang };
3712bfe3f2eSlogwang
3722bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
3732bfe3f2eSlogwang
3742bfe3f2eSlogwang if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
3752bfe3f2eSlogwang return ptypes;
3762bfe3f2eSlogwang return NULL;
3772bfe3f2eSlogwang }
3782bfe3f2eSlogwang
dpaa_interrupt_handler(void * param)379*2d9fd380Sjfb8856606 static void dpaa_interrupt_handler(void *param)
380*2d9fd380Sjfb8856606 {
381*2d9fd380Sjfb8856606 struct rte_eth_dev *dev = param;
382*2d9fd380Sjfb8856606 struct rte_device *rdev = dev->device;
383*2d9fd380Sjfb8856606 struct rte_dpaa_device *dpaa_dev;
384*2d9fd380Sjfb8856606 struct rte_intr_handle *intr_handle;
385*2d9fd380Sjfb8856606 uint64_t buf;
386*2d9fd380Sjfb8856606 int bytes_read;
387*2d9fd380Sjfb8856606
388*2d9fd380Sjfb8856606 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
389*2d9fd380Sjfb8856606 intr_handle = &dpaa_dev->intr_handle;
390*2d9fd380Sjfb8856606
391*2d9fd380Sjfb8856606 bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t));
392*2d9fd380Sjfb8856606 if (bytes_read < 0)
393*2d9fd380Sjfb8856606 DPAA_PMD_ERR("Error reading eventfd\n");
394*2d9fd380Sjfb8856606 dpaa_eth_link_update(dev, 0);
395*2d9fd380Sjfb8856606 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
396*2d9fd380Sjfb8856606 }
397*2d9fd380Sjfb8856606
dpaa_eth_dev_start(struct rte_eth_dev * dev)3982bfe3f2eSlogwang static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
3992bfe3f2eSlogwang {
4002bfe3f2eSlogwang struct dpaa_if *dpaa_intf = dev->data->dev_private;
4012bfe3f2eSlogwang
4022bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
4032bfe3f2eSlogwang
404*2d9fd380Sjfb8856606 if (!(default_q || fmc_q))
405*2d9fd380Sjfb8856606 dpaa_write_fm_config_to_file();
406*2d9fd380Sjfb8856606
4072bfe3f2eSlogwang /* Change tx callback to the real one */
408*2d9fd380Sjfb8856606 if (dpaa_intf->cgr_tx)
409*2d9fd380Sjfb8856606 dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
410*2d9fd380Sjfb8856606 else
4112bfe3f2eSlogwang dev->tx_pkt_burst = dpaa_eth_queue_tx;
412*2d9fd380Sjfb8856606
413*2d9fd380Sjfb8856606 fman_if_enable_rx(dev->process_private);
4142bfe3f2eSlogwang
4152bfe3f2eSlogwang return 0;
4162bfe3f2eSlogwang }
4172bfe3f2eSlogwang
dpaa_eth_dev_stop(struct rte_eth_dev * dev)418*2d9fd380Sjfb8856606 static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
4192bfe3f2eSlogwang {
420*2d9fd380Sjfb8856606 struct fman_if *fif = dev->process_private;
4212bfe3f2eSlogwang
4222bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
423*2d9fd380Sjfb8856606 dev->data->dev_started = 0;
4242bfe3f2eSlogwang
425*2d9fd380Sjfb8856606 if (!fif->is_shared_mac)
426*2d9fd380Sjfb8856606 fman_if_disable_rx(fif);
4272bfe3f2eSlogwang dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
428*2d9fd380Sjfb8856606
429*2d9fd380Sjfb8856606 return 0;
4302bfe3f2eSlogwang }
4312bfe3f2eSlogwang
dpaa_eth_dev_close(struct rte_eth_dev * dev)432*2d9fd380Sjfb8856606 static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
4332bfe3f2eSlogwang {
434*2d9fd380Sjfb8856606 struct fman_if *fif = dev->process_private;
435*2d9fd380Sjfb8856606 struct __fman_if *__fif;
436*2d9fd380Sjfb8856606 struct rte_device *rdev = dev->device;
437*2d9fd380Sjfb8856606 struct rte_dpaa_device *dpaa_dev;
438*2d9fd380Sjfb8856606 struct rte_intr_handle *intr_handle;
439*2d9fd380Sjfb8856606 struct rte_eth_link *link = &dev->data->dev_link;
440*2d9fd380Sjfb8856606 struct dpaa_if *dpaa_intf = dev->data->dev_private;
441*2d9fd380Sjfb8856606 int loop;
442*2d9fd380Sjfb8856606 int ret;
443*2d9fd380Sjfb8856606
4442bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
4452bfe3f2eSlogwang
446*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
447*2d9fd380Sjfb8856606 return 0;
448*2d9fd380Sjfb8856606
449*2d9fd380Sjfb8856606 if (!dpaa_intf) {
450*2d9fd380Sjfb8856606 DPAA_PMD_WARN("Already closed or not started");
451*2d9fd380Sjfb8856606 return -1;
452*2d9fd380Sjfb8856606 }
453*2d9fd380Sjfb8856606
454*2d9fd380Sjfb8856606 /* DPAA FM deconfig */
455*2d9fd380Sjfb8856606 if (!(default_q || fmc_q)) {
456*2d9fd380Sjfb8856606 if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
457*2d9fd380Sjfb8856606 DPAA_PMD_WARN("DPAA FM deconfig failed\n");
458*2d9fd380Sjfb8856606 }
459*2d9fd380Sjfb8856606
460*2d9fd380Sjfb8856606 dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
461*2d9fd380Sjfb8856606 intr_handle = &dpaa_dev->intr_handle;
462*2d9fd380Sjfb8856606 __fif = container_of(fif, struct __fman_if, __if);
463*2d9fd380Sjfb8856606
464*2d9fd380Sjfb8856606 ret = dpaa_eth_dev_stop(dev);
465*2d9fd380Sjfb8856606
466*2d9fd380Sjfb8856606 /* Reset link to autoneg */
467*2d9fd380Sjfb8856606 if (link->link_status && !link->link_autoneg)
468*2d9fd380Sjfb8856606 dpaa_restart_link_autoneg(__fif->node_name);
469*2d9fd380Sjfb8856606
470*2d9fd380Sjfb8856606 if (intr_handle && intr_handle->fd &&
471*2d9fd380Sjfb8856606 dev->data->dev_conf.intr_conf.lsc != 0) {
472*2d9fd380Sjfb8856606 dpaa_intr_disable(__fif->node_name);
473*2d9fd380Sjfb8856606 rte_intr_callback_unregister(intr_handle,
474*2d9fd380Sjfb8856606 dpaa_interrupt_handler,
475*2d9fd380Sjfb8856606 (void *)dev);
476*2d9fd380Sjfb8856606 }
477*2d9fd380Sjfb8856606
478*2d9fd380Sjfb8856606 /* release configuration memory */
479*2d9fd380Sjfb8856606 if (dpaa_intf->fc_conf)
480*2d9fd380Sjfb8856606 rte_free(dpaa_intf->fc_conf);
481*2d9fd380Sjfb8856606
482*2d9fd380Sjfb8856606 /* Release RX congestion Groups */
483*2d9fd380Sjfb8856606 if (dpaa_intf->cgr_rx) {
484*2d9fd380Sjfb8856606 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
485*2d9fd380Sjfb8856606 qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
486*2d9fd380Sjfb8856606
487*2d9fd380Sjfb8856606 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
488*2d9fd380Sjfb8856606 dpaa_intf->nb_rx_queues);
489*2d9fd380Sjfb8856606 }
490*2d9fd380Sjfb8856606
491*2d9fd380Sjfb8856606 rte_free(dpaa_intf->cgr_rx);
492*2d9fd380Sjfb8856606 dpaa_intf->cgr_rx = NULL;
493*2d9fd380Sjfb8856606 /* Release TX congestion Groups */
494*2d9fd380Sjfb8856606 if (dpaa_intf->cgr_tx) {
495*2d9fd380Sjfb8856606 for (loop = 0; loop < MAX_DPAA_CORES; loop++)
496*2d9fd380Sjfb8856606 qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
497*2d9fd380Sjfb8856606
498*2d9fd380Sjfb8856606 qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid,
499*2d9fd380Sjfb8856606 MAX_DPAA_CORES);
500*2d9fd380Sjfb8856606 rte_free(dpaa_intf->cgr_tx);
501*2d9fd380Sjfb8856606 dpaa_intf->cgr_tx = NULL;
502*2d9fd380Sjfb8856606 }
503*2d9fd380Sjfb8856606
504*2d9fd380Sjfb8856606 rte_free(dpaa_intf->rx_queues);
505*2d9fd380Sjfb8856606 dpaa_intf->rx_queues = NULL;
506*2d9fd380Sjfb8856606
507*2d9fd380Sjfb8856606 rte_free(dpaa_intf->tx_queues);
508*2d9fd380Sjfb8856606 dpaa_intf->tx_queues = NULL;
509*2d9fd380Sjfb8856606
510*2d9fd380Sjfb8856606 return ret;
5112bfe3f2eSlogwang }
5122bfe3f2eSlogwang
5132bfe3f2eSlogwang static int
dpaa_fw_version_get(struct rte_eth_dev * dev __rte_unused,char * fw_version,size_t fw_size)5142bfe3f2eSlogwang dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
5152bfe3f2eSlogwang char *fw_version,
5162bfe3f2eSlogwang size_t fw_size)
5172bfe3f2eSlogwang {
5182bfe3f2eSlogwang int ret;
5192bfe3f2eSlogwang FILE *svr_file = NULL;
5202bfe3f2eSlogwang unsigned int svr_ver = 0;
5212bfe3f2eSlogwang
5222bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
5232bfe3f2eSlogwang
5242bfe3f2eSlogwang svr_file = fopen(DPAA_SOC_ID_FILE, "r");
5252bfe3f2eSlogwang if (!svr_file) {
5262bfe3f2eSlogwang DPAA_PMD_ERR("Unable to open SoC device");
5272bfe3f2eSlogwang return -ENOTSUP; /* Not supported on this infra */
5282bfe3f2eSlogwang }
529d30ea906Sjfb8856606 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
530d30ea906Sjfb8856606 dpaa_svr_family = svr_ver & SVR_MASK;
531d30ea906Sjfb8856606 else
5322bfe3f2eSlogwang DPAA_PMD_ERR("Unable to read SoC device");
5332bfe3f2eSlogwang
5342bfe3f2eSlogwang fclose(svr_file);
5352bfe3f2eSlogwang
5362bfe3f2eSlogwang ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
5372bfe3f2eSlogwang svr_ver, fman_ip_rev);
5382bfe3f2eSlogwang ret += 1; /* add the size of '\0' */
5392bfe3f2eSlogwang
5402bfe3f2eSlogwang if (fw_size < (uint32_t)ret)
5412bfe3f2eSlogwang return ret;
5422bfe3f2eSlogwang else
5432bfe3f2eSlogwang return 0;
5442bfe3f2eSlogwang }
5452bfe3f2eSlogwang
dpaa_eth_dev_info(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)5464418919fSjohnjiang static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
5472bfe3f2eSlogwang struct rte_eth_dev_info *dev_info)
5482bfe3f2eSlogwang {
5492bfe3f2eSlogwang struct dpaa_if *dpaa_intf = dev->data->dev_private;
550*2d9fd380Sjfb8856606 struct fman_if *fif = dev->process_private;
5512bfe3f2eSlogwang
5524418919fSjohnjiang DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
5532bfe3f2eSlogwang
5542bfe3f2eSlogwang dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
5552bfe3f2eSlogwang dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
5562bfe3f2eSlogwang dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
5572bfe3f2eSlogwang dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
5582bfe3f2eSlogwang dev_info->max_hash_mac_addrs = 0;
5592bfe3f2eSlogwang dev_info->max_vfs = 0;
5602bfe3f2eSlogwang dev_info->max_vmdq_pools = ETH_16_POOLS;
5612bfe3f2eSlogwang dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
562d30ea906Sjfb8856606
563*2d9fd380Sjfb8856606 if (fif->mac_type == fman_mac_1g) {
564*2d9fd380Sjfb8856606 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
565*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_10M
566*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_100M_HD
567*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_100M
568*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_1G;
569*2d9fd380Sjfb8856606 } else if (fif->mac_type == fman_mac_2_5g) {
570*2d9fd380Sjfb8856606 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
571*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_10M
572*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_100M_HD
573*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_100M
574*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_1G
575*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_2_5G;
576*2d9fd380Sjfb8856606 } else if (fif->mac_type == fman_mac_10g) {
577*2d9fd380Sjfb8856606 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
578*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_10M
579*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_100M_HD
580*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_100M
581*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_1G
582*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_2_5G
583*2d9fd380Sjfb8856606 | ETH_LINK_SPEED_10G;
5844418919fSjohnjiang } else {
585d30ea906Sjfb8856606 DPAA_PMD_ERR("invalid link_speed: %s, %d",
586*2d9fd380Sjfb8856606 dpaa_intf->name, fif->mac_type);
5874418919fSjohnjiang return -EINVAL;
5884418919fSjohnjiang }
589d30ea906Sjfb8856606
590d30ea906Sjfb8856606 dev_info->rx_offload_capa = dev_rx_offloads_sup |
591d30ea906Sjfb8856606 dev_rx_offloads_nodis;
592d30ea906Sjfb8856606 dev_info->tx_offload_capa = dev_tx_offloads_sup |
593d30ea906Sjfb8856606 dev_tx_offloads_nodis;
594d30ea906Sjfb8856606 dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
595d30ea906Sjfb8856606 dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
596*2d9fd380Sjfb8856606 dev_info->default_rxportconf.nb_queues = 1;
597*2d9fd380Sjfb8856606 dev_info->default_txportconf.nb_queues = 1;
598*2d9fd380Sjfb8856606 dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
599*2d9fd380Sjfb8856606 dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
6004418919fSjohnjiang
6014418919fSjohnjiang return 0;
6022bfe3f2eSlogwang }
6032bfe3f2eSlogwang
604*2d9fd380Sjfb8856606 static int
dpaa_dev_rx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)605*2d9fd380Sjfb8856606 dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
606*2d9fd380Sjfb8856606 __rte_unused uint16_t queue_id,
607*2d9fd380Sjfb8856606 struct rte_eth_burst_mode *mode)
608*2d9fd380Sjfb8856606 {
609*2d9fd380Sjfb8856606 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
610*2d9fd380Sjfb8856606 int ret = -EINVAL;
611*2d9fd380Sjfb8856606 unsigned int i;
612*2d9fd380Sjfb8856606 const struct burst_info {
613*2d9fd380Sjfb8856606 uint64_t flags;
614*2d9fd380Sjfb8856606 const char *output;
615*2d9fd380Sjfb8856606 } rx_offload_map[] = {
616*2d9fd380Sjfb8856606 {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
617*2d9fd380Sjfb8856606 {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
618*2d9fd380Sjfb8856606 {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
619*2d9fd380Sjfb8856606 {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
620*2d9fd380Sjfb8856606 {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
621*2d9fd380Sjfb8856606 {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
622*2d9fd380Sjfb8856606 {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
623*2d9fd380Sjfb8856606 };
624*2d9fd380Sjfb8856606
625*2d9fd380Sjfb8856606 /* Update Rx offload info */
626*2d9fd380Sjfb8856606 for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
627*2d9fd380Sjfb8856606 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
628*2d9fd380Sjfb8856606 snprintf(mode->info, sizeof(mode->info), "%s",
629*2d9fd380Sjfb8856606 rx_offload_map[i].output);
630*2d9fd380Sjfb8856606 ret = 0;
631*2d9fd380Sjfb8856606 break;
632*2d9fd380Sjfb8856606 }
633*2d9fd380Sjfb8856606 }
634*2d9fd380Sjfb8856606 return ret;
635*2d9fd380Sjfb8856606 }
636*2d9fd380Sjfb8856606
637*2d9fd380Sjfb8856606 static int
dpaa_dev_tx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)638*2d9fd380Sjfb8856606 dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
639*2d9fd380Sjfb8856606 __rte_unused uint16_t queue_id,
640*2d9fd380Sjfb8856606 struct rte_eth_burst_mode *mode)
641*2d9fd380Sjfb8856606 {
642*2d9fd380Sjfb8856606 struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
643*2d9fd380Sjfb8856606 int ret = -EINVAL;
644*2d9fd380Sjfb8856606 unsigned int i;
645*2d9fd380Sjfb8856606 const struct burst_info {
646*2d9fd380Sjfb8856606 uint64_t flags;
647*2d9fd380Sjfb8856606 const char *output;
648*2d9fd380Sjfb8856606 } tx_offload_map[] = {
649*2d9fd380Sjfb8856606 {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
650*2d9fd380Sjfb8856606 {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
651*2d9fd380Sjfb8856606 {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
652*2d9fd380Sjfb8856606 {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
653*2d9fd380Sjfb8856606 {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
654*2d9fd380Sjfb8856606 {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
655*2d9fd380Sjfb8856606 {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
656*2d9fd380Sjfb8856606 {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
657*2d9fd380Sjfb8856606 };
658*2d9fd380Sjfb8856606
659*2d9fd380Sjfb8856606 /* Update Tx offload info */
660*2d9fd380Sjfb8856606 for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
661*2d9fd380Sjfb8856606 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
662*2d9fd380Sjfb8856606 snprintf(mode->info, sizeof(mode->info), "%s",
663*2d9fd380Sjfb8856606 tx_offload_map[i].output);
664*2d9fd380Sjfb8856606 ret = 0;
665*2d9fd380Sjfb8856606 break;
666*2d9fd380Sjfb8856606 }
667*2d9fd380Sjfb8856606 }
668*2d9fd380Sjfb8856606 return ret;
669*2d9fd380Sjfb8856606 }
670*2d9fd380Sjfb8856606
dpaa_eth_link_update(struct rte_eth_dev * dev,int wait_to_complete __rte_unused)6712bfe3f2eSlogwang static int dpaa_eth_link_update(struct rte_eth_dev *dev,
6722bfe3f2eSlogwang int wait_to_complete __rte_unused)
6732bfe3f2eSlogwang {
6742bfe3f2eSlogwang struct dpaa_if *dpaa_intf = dev->data->dev_private;
6752bfe3f2eSlogwang struct rte_eth_link *link = &dev->data->dev_link;
676*2d9fd380Sjfb8856606 struct fman_if *fif = dev->process_private;
677*2d9fd380Sjfb8856606 struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
678*2d9fd380Sjfb8856606 int ret, ioctl_version;
6792bfe3f2eSlogwang
6802bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
6812bfe3f2eSlogwang
682*2d9fd380Sjfb8856606 ioctl_version = dpaa_get_ioctl_version_number();
683*2d9fd380Sjfb8856606
684*2d9fd380Sjfb8856606
685*2d9fd380Sjfb8856606 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
686*2d9fd380Sjfb8856606 ret = dpaa_get_link_status(__fif->node_name, link);
687*2d9fd380Sjfb8856606 if (ret)
688*2d9fd380Sjfb8856606 return ret;
689*2d9fd380Sjfb8856606 } else {
690*2d9fd380Sjfb8856606 link->link_status = dpaa_intf->valid;
691*2d9fd380Sjfb8856606 }
692*2d9fd380Sjfb8856606
693*2d9fd380Sjfb8856606 if (ioctl_version < 2) {
694*2d9fd380Sjfb8856606 link->link_duplex = ETH_LINK_FULL_DUPLEX;
695*2d9fd380Sjfb8856606 link->link_autoneg = ETH_LINK_AUTONEG;
696*2d9fd380Sjfb8856606
697*2d9fd380Sjfb8856606 if (fif->mac_type == fman_mac_1g)
698d30ea906Sjfb8856606 link->link_speed = ETH_SPEED_NUM_1G;
699*2d9fd380Sjfb8856606 else if (fif->mac_type == fman_mac_2_5g)
700*2d9fd380Sjfb8856606 link->link_speed = ETH_SPEED_NUM_2_5G;
701*2d9fd380Sjfb8856606 else if (fif->mac_type == fman_mac_10g)
702d30ea906Sjfb8856606 link->link_speed = ETH_SPEED_NUM_10G;
7032bfe3f2eSlogwang else
7042bfe3f2eSlogwang DPAA_PMD_ERR("invalid link_speed: %s, %d",
705*2d9fd380Sjfb8856606 dpaa_intf->name, fif->mac_type);
706*2d9fd380Sjfb8856606 }
7072bfe3f2eSlogwang
708*2d9fd380Sjfb8856606 DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
709*2d9fd380Sjfb8856606 link->link_status ? "Up" : "Down");
7102bfe3f2eSlogwang return 0;
7112bfe3f2eSlogwang }
7122bfe3f2eSlogwang
dpaa_eth_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)7132bfe3f2eSlogwang static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
7142bfe3f2eSlogwang struct rte_eth_stats *stats)
7152bfe3f2eSlogwang {
7162bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
7172bfe3f2eSlogwang
718*2d9fd380Sjfb8856606 fman_if_stats_get(dev->process_private, stats);
7192bfe3f2eSlogwang return 0;
7202bfe3f2eSlogwang }
7212bfe3f2eSlogwang
dpaa_eth_stats_reset(struct rte_eth_dev * dev)7224418919fSjohnjiang static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
7232bfe3f2eSlogwang {
7242bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
7252bfe3f2eSlogwang
726*2d9fd380Sjfb8856606 fman_if_stats_reset(dev->process_private);
7274418919fSjohnjiang
7284418919fSjohnjiang return 0;
7292bfe3f2eSlogwang }
7302bfe3f2eSlogwang
7312bfe3f2eSlogwang static int
dpaa_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)7322bfe3f2eSlogwang dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
7332bfe3f2eSlogwang unsigned int n)
7342bfe3f2eSlogwang {
7352bfe3f2eSlogwang unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
7362bfe3f2eSlogwang uint64_t values[sizeof(struct dpaa_if_stats) / 8];
7372bfe3f2eSlogwang
7385af785ecSfengbojiang(姜凤波) if (n < num)
7395af785ecSfengbojiang(姜凤波) return num;
7405af785ecSfengbojiang(姜凤波)
741d30ea906Sjfb8856606 if (xstats == NULL)
742d30ea906Sjfb8856606 return 0;
743d30ea906Sjfb8856606
744*2d9fd380Sjfb8856606 fman_if_stats_get_all(dev->process_private, values,
7452bfe3f2eSlogwang sizeof(struct dpaa_if_stats) / 8);
7462bfe3f2eSlogwang
7472bfe3f2eSlogwang for (i = 0; i < num; i++) {
7482bfe3f2eSlogwang xstats[i].id = i;
7492bfe3f2eSlogwang xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
7502bfe3f2eSlogwang }
7512bfe3f2eSlogwang return i;
7522bfe3f2eSlogwang }
7532bfe3f2eSlogwang
7542bfe3f2eSlogwang static int
dpaa_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned int limit)7552bfe3f2eSlogwang dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
7562bfe3f2eSlogwang struct rte_eth_xstat_name *xstats_names,
757579bf1e2Sjfb8856606 unsigned int limit)
7582bfe3f2eSlogwang {
7592bfe3f2eSlogwang unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
7602bfe3f2eSlogwang
761579bf1e2Sjfb8856606 if (limit < stat_cnt)
762579bf1e2Sjfb8856606 return stat_cnt;
763579bf1e2Sjfb8856606
7642bfe3f2eSlogwang if (xstats_names != NULL)
7652bfe3f2eSlogwang for (i = 0; i < stat_cnt; i++)
7664418919fSjohnjiang strlcpy(xstats_names[i].name,
7674418919fSjohnjiang dpaa_xstats_strings[i].name,
7684418919fSjohnjiang sizeof(xstats_names[i].name));
7692bfe3f2eSlogwang
7702bfe3f2eSlogwang return stat_cnt;
7712bfe3f2eSlogwang }
7722bfe3f2eSlogwang
7732bfe3f2eSlogwang static int
dpaa_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int n)7742bfe3f2eSlogwang dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
7752bfe3f2eSlogwang uint64_t *values, unsigned int n)
7762bfe3f2eSlogwang {
7772bfe3f2eSlogwang unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
7782bfe3f2eSlogwang uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
7792bfe3f2eSlogwang
7802bfe3f2eSlogwang if (!ids) {
7812bfe3f2eSlogwang if (n < stat_cnt)
7822bfe3f2eSlogwang return stat_cnt;
7832bfe3f2eSlogwang
7842bfe3f2eSlogwang if (!values)
7852bfe3f2eSlogwang return 0;
7862bfe3f2eSlogwang
787*2d9fd380Sjfb8856606 fman_if_stats_get_all(dev->process_private, values_copy,
788579bf1e2Sjfb8856606 sizeof(struct dpaa_if_stats) / 8);
7892bfe3f2eSlogwang
7902bfe3f2eSlogwang for (i = 0; i < stat_cnt; i++)
7912bfe3f2eSlogwang values[i] =
7922bfe3f2eSlogwang values_copy[dpaa_xstats_strings[i].offset / 8];
7932bfe3f2eSlogwang
7942bfe3f2eSlogwang return stat_cnt;
7952bfe3f2eSlogwang }
7962bfe3f2eSlogwang
7972bfe3f2eSlogwang dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
7982bfe3f2eSlogwang
7992bfe3f2eSlogwang for (i = 0; i < n; i++) {
8002bfe3f2eSlogwang if (ids[i] >= stat_cnt) {
8012bfe3f2eSlogwang DPAA_PMD_ERR("id value isn't valid");
8022bfe3f2eSlogwang return -1;
8032bfe3f2eSlogwang }
8042bfe3f2eSlogwang values[i] = values_copy[ids[i]];
8052bfe3f2eSlogwang }
8062bfe3f2eSlogwang return n;
8072bfe3f2eSlogwang }
8082bfe3f2eSlogwang
8092bfe3f2eSlogwang static int
dpaa_xstats_get_names_by_id(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,const uint64_t * ids,unsigned int limit)8102bfe3f2eSlogwang dpaa_xstats_get_names_by_id(
8112bfe3f2eSlogwang struct rte_eth_dev *dev,
8122bfe3f2eSlogwang struct rte_eth_xstat_name *xstats_names,
8132bfe3f2eSlogwang const uint64_t *ids,
8142bfe3f2eSlogwang unsigned int limit)
8152bfe3f2eSlogwang {
8162bfe3f2eSlogwang unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
8172bfe3f2eSlogwang struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
8182bfe3f2eSlogwang
8192bfe3f2eSlogwang if (!ids)
8202bfe3f2eSlogwang return dpaa_xstats_get_names(dev, xstats_names, limit);
8212bfe3f2eSlogwang
8222bfe3f2eSlogwang dpaa_xstats_get_names(dev, xstats_names_copy, limit);
8232bfe3f2eSlogwang
8242bfe3f2eSlogwang for (i = 0; i < limit; i++) {
8252bfe3f2eSlogwang if (ids[i] >= stat_cnt) {
8262bfe3f2eSlogwang DPAA_PMD_ERR("id value isn't valid");
8272bfe3f2eSlogwang return -1;
8282bfe3f2eSlogwang }
8292bfe3f2eSlogwang strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
8302bfe3f2eSlogwang }
8312bfe3f2eSlogwang return limit;
8322bfe3f2eSlogwang }
8332bfe3f2eSlogwang
dpaa_eth_promiscuous_enable(struct rte_eth_dev * dev)8344418919fSjohnjiang static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
8352bfe3f2eSlogwang {
8362bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
8372bfe3f2eSlogwang
838*2d9fd380Sjfb8856606 fman_if_promiscuous_enable(dev->process_private);
8394418919fSjohnjiang
8404418919fSjohnjiang return 0;
8412bfe3f2eSlogwang }
8422bfe3f2eSlogwang
dpaa_eth_promiscuous_disable(struct rte_eth_dev * dev)8434418919fSjohnjiang static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
8442bfe3f2eSlogwang {
8452bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
8462bfe3f2eSlogwang
847*2d9fd380Sjfb8856606 fman_if_promiscuous_disable(dev->process_private);
8484418919fSjohnjiang
8494418919fSjohnjiang return 0;
8502bfe3f2eSlogwang }
8512bfe3f2eSlogwang
dpaa_eth_multicast_enable(struct rte_eth_dev * dev)8524418919fSjohnjiang static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
8532bfe3f2eSlogwang {
8542bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
8552bfe3f2eSlogwang
856*2d9fd380Sjfb8856606 fman_if_set_mcast_filter_table(dev->process_private);
8574418919fSjohnjiang
8584418919fSjohnjiang return 0;
8592bfe3f2eSlogwang }
8602bfe3f2eSlogwang
dpaa_eth_multicast_disable(struct rte_eth_dev * dev)8614418919fSjohnjiang static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
8622bfe3f2eSlogwang {
8632bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
8642bfe3f2eSlogwang
865*2d9fd380Sjfb8856606 fman_if_reset_mcast_filter_table(dev->process_private);
866*2d9fd380Sjfb8856606
867*2d9fd380Sjfb8856606 return 0;
868*2d9fd380Sjfb8856606 }
869*2d9fd380Sjfb8856606
dpaa_fman_if_pool_setup(struct rte_eth_dev * dev)870*2d9fd380Sjfb8856606 static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
871*2d9fd380Sjfb8856606 {
872*2d9fd380Sjfb8856606 struct dpaa_if *dpaa_intf = dev->data->dev_private;
873*2d9fd380Sjfb8856606 struct fman_if_ic_params icp;
874*2d9fd380Sjfb8856606 uint32_t fd_offset;
875*2d9fd380Sjfb8856606 uint32_t bp_size;
876*2d9fd380Sjfb8856606
877*2d9fd380Sjfb8856606 memset(&icp, 0, sizeof(icp));
878*2d9fd380Sjfb8856606 /* set ICEOF for to the default value , which is 0*/
879*2d9fd380Sjfb8856606 icp.iciof = DEFAULT_ICIOF;
880*2d9fd380Sjfb8856606 icp.iceof = DEFAULT_RX_ICEOF;
881*2d9fd380Sjfb8856606 icp.icsz = DEFAULT_ICSZ;
882*2d9fd380Sjfb8856606 fman_if_set_ic_params(dev->process_private, &icp);
883*2d9fd380Sjfb8856606
884*2d9fd380Sjfb8856606 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
885*2d9fd380Sjfb8856606 fman_if_set_fdoff(dev->process_private, fd_offset);
886*2d9fd380Sjfb8856606
887*2d9fd380Sjfb8856606 /* Buffer pool size should be equal to Dataroom Size*/
888*2d9fd380Sjfb8856606 bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
889*2d9fd380Sjfb8856606
890*2d9fd380Sjfb8856606 fman_if_set_bp(dev->process_private,
891*2d9fd380Sjfb8856606 dpaa_intf->bp_info->mp->size,
892*2d9fd380Sjfb8856606 dpaa_intf->bp_info->bpid, bp_size);
893*2d9fd380Sjfb8856606 }
894*2d9fd380Sjfb8856606
dpaa_eth_rx_queue_bp_check(struct rte_eth_dev * dev,int8_t vsp_id,uint32_t bpid)895*2d9fd380Sjfb8856606 static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
896*2d9fd380Sjfb8856606 int8_t vsp_id, uint32_t bpid)
897*2d9fd380Sjfb8856606 {
898*2d9fd380Sjfb8856606 struct dpaa_if *dpaa_intf = dev->data->dev_private;
899*2d9fd380Sjfb8856606 struct fman_if *fif = dev->process_private;
900*2d9fd380Sjfb8856606
901*2d9fd380Sjfb8856606 if (fif->num_profiles) {
902*2d9fd380Sjfb8856606 if (vsp_id < 0)
903*2d9fd380Sjfb8856606 vsp_id = fif->base_profile_id;
904*2d9fd380Sjfb8856606 } else {
905*2d9fd380Sjfb8856606 if (vsp_id < 0)
906*2d9fd380Sjfb8856606 vsp_id = 0;
907*2d9fd380Sjfb8856606 }
908*2d9fd380Sjfb8856606
909*2d9fd380Sjfb8856606 if (dpaa_intf->vsp_bpid[vsp_id] &&
910*2d9fd380Sjfb8856606 bpid != dpaa_intf->vsp_bpid[vsp_id]) {
911*2d9fd380Sjfb8856606 DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
912*2d9fd380Sjfb8856606
913*2d9fd380Sjfb8856606 return -1;
914*2d9fd380Sjfb8856606 }
9154418919fSjohnjiang
9164418919fSjohnjiang return 0;
9172bfe3f2eSlogwang }
9182bfe3f2eSlogwang
9192bfe3f2eSlogwang static
dpaa_eth_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id __rte_unused,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)9202bfe3f2eSlogwang int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
921d30ea906Sjfb8856606 uint16_t nb_desc,
9222bfe3f2eSlogwang unsigned int socket_id __rte_unused,
923*2d9fd380Sjfb8856606 const struct rte_eth_rxconf *rx_conf,
9242bfe3f2eSlogwang struct rte_mempool *mp)
9252bfe3f2eSlogwang {
9262bfe3f2eSlogwang struct dpaa_if *dpaa_intf = dev->data->dev_private;
927*2d9fd380Sjfb8856606 struct fman_if *fif = dev->process_private;
928d30ea906Sjfb8856606 struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
929d30ea906Sjfb8856606 struct qm_mcc_initfq opts = {0};
930d30ea906Sjfb8856606 u32 flags = 0;
931d30ea906Sjfb8856606 int ret;
932d30ea906Sjfb8856606 u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
9332bfe3f2eSlogwang
9342bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
9352bfe3f2eSlogwang
936d30ea906Sjfb8856606 if (queue_idx >= dev->data->nb_rx_queues) {
937d30ea906Sjfb8856606 rte_errno = EOVERFLOW;
938d30ea906Sjfb8856606 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
939d30ea906Sjfb8856606 (void *)dev, queue_idx, dev->data->nb_rx_queues);
940d30ea906Sjfb8856606 return -rte_errno;
941d30ea906Sjfb8856606 }
942d30ea906Sjfb8856606
943*2d9fd380Sjfb8856606 /* Rx deferred start is not supported */
944*2d9fd380Sjfb8856606 if (rx_conf->rx_deferred_start) {
945*2d9fd380Sjfb8856606 DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
946*2d9fd380Sjfb8856606 return -EINVAL;
947*2d9fd380Sjfb8856606 }
948*2d9fd380Sjfb8856606 rxq->nb_desc = UINT16_MAX;
949*2d9fd380Sjfb8856606 rxq->offloads = rx_conf->offloads;
950*2d9fd380Sjfb8856606
951d30ea906Sjfb8856606 DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
952d30ea906Sjfb8856606 queue_idx, rxq->fqid);
953d30ea906Sjfb8856606
954*2d9fd380Sjfb8856606 if (!fif->num_profiles) {
955*2d9fd380Sjfb8856606 if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
956*2d9fd380Sjfb8856606 dpaa_intf->bp_info->mp != mp) {
957*2d9fd380Sjfb8856606 DPAA_PMD_WARN("Multiple pools on same interface not"
958*2d9fd380Sjfb8856606 " supported");
959*2d9fd380Sjfb8856606 return -EINVAL;
960*2d9fd380Sjfb8856606 }
961*2d9fd380Sjfb8856606 } else {
962*2d9fd380Sjfb8856606 if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
963*2d9fd380Sjfb8856606 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
964*2d9fd380Sjfb8856606 return -EINVAL;
965*2d9fd380Sjfb8856606 }
966*2d9fd380Sjfb8856606 }
967*2d9fd380Sjfb8856606
968d30ea906Sjfb8856606 /* Max packet can fit in single buffer */
969d30ea906Sjfb8856606 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
970d30ea906Sjfb8856606 ;
971d30ea906Sjfb8856606 } else if (dev->data->dev_conf.rxmode.offloads &
972d30ea906Sjfb8856606 DEV_RX_OFFLOAD_SCATTER) {
973d30ea906Sjfb8856606 if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
974d30ea906Sjfb8856606 buffsz * DPAA_SGT_MAX_ENTRIES) {
975d30ea906Sjfb8856606 DPAA_PMD_ERR("max RxPkt size %d too big to fit "
976d30ea906Sjfb8856606 "MaxSGlist %d",
977d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.max_rx_pkt_len,
978d30ea906Sjfb8856606 buffsz * DPAA_SGT_MAX_ENTRIES);
979d30ea906Sjfb8856606 rte_errno = EOVERFLOW;
980d30ea906Sjfb8856606 return -rte_errno;
981d30ea906Sjfb8856606 }
982d30ea906Sjfb8856606 } else {
983d30ea906Sjfb8856606 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
984d30ea906Sjfb8856606 " larger than a single mbuf (%u) and scattered"
985d30ea906Sjfb8856606 " mode has not been requested",
986d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.max_rx_pkt_len,
987d30ea906Sjfb8856606 buffsz - RTE_PKTMBUF_HEADROOM);
988d30ea906Sjfb8856606 }
9892bfe3f2eSlogwang
9902bfe3f2eSlogwang dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
9912bfe3f2eSlogwang
992*2d9fd380Sjfb8856606 /* For shared interface, it's done in kernel, skip.*/
993*2d9fd380Sjfb8856606 if (!fif->is_shared_mac)
994*2d9fd380Sjfb8856606 dpaa_fman_if_pool_setup(dev);
9952bfe3f2eSlogwang
996*2d9fd380Sjfb8856606 if (fif->num_profiles) {
997*2d9fd380Sjfb8856606 int8_t vsp_id = rxq->vsp_id;
9982bfe3f2eSlogwang
999*2d9fd380Sjfb8856606 if (vsp_id >= 0) {
1000*2d9fd380Sjfb8856606 ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
1001*2d9fd380Sjfb8856606 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
1002*2d9fd380Sjfb8856606 fif);
1003*2d9fd380Sjfb8856606 if (ret) {
1004*2d9fd380Sjfb8856606 DPAA_PMD_ERR("dpaa_port_vsp_update failed");
1005*2d9fd380Sjfb8856606 return ret;
10062bfe3f2eSlogwang }
1007*2d9fd380Sjfb8856606 } else {
1008*2d9fd380Sjfb8856606 DPAA_PMD_INFO("Base profile is associated to"
1009*2d9fd380Sjfb8856606 " RXQ fqid:%d\r\n", rxq->fqid);
1010*2d9fd380Sjfb8856606 if (fif->is_shared_mac) {
1011*2d9fd380Sjfb8856606 DPAA_PMD_ERR("Fatal: Base profile is associated"
1012*2d9fd380Sjfb8856606 " to shared interface on DPDK.");
1013*2d9fd380Sjfb8856606 return -EINVAL;
1014*2d9fd380Sjfb8856606 }
1015*2d9fd380Sjfb8856606 dpaa_intf->vsp_bpid[fif->base_profile_id] =
1016*2d9fd380Sjfb8856606 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1017*2d9fd380Sjfb8856606 }
1018*2d9fd380Sjfb8856606 } else {
1019*2d9fd380Sjfb8856606 dpaa_intf->vsp_bpid[0] =
1020*2d9fd380Sjfb8856606 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1021*2d9fd380Sjfb8856606 }
1022*2d9fd380Sjfb8856606
1023*2d9fd380Sjfb8856606 dpaa_intf->valid = 1;
1024d30ea906Sjfb8856606 DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
1025*2d9fd380Sjfb8856606 fman_if_get_sg_enable(fif),
1026d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1027d30ea906Sjfb8856606 /* checking if push mode only, no error check for now */
10284418919fSjohnjiang if (!rxq->is_static &&
10294418919fSjohnjiang dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
10304418919fSjohnjiang struct qman_portal *qp;
10314418919fSjohnjiang int q_fd;
10324418919fSjohnjiang
1033d30ea906Sjfb8856606 dpaa_push_queue_idx++;
1034d30ea906Sjfb8856606 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
1035d30ea906Sjfb8856606 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
1036d30ea906Sjfb8856606 QM_FQCTRL_CTXASTASHING |
1037d30ea906Sjfb8856606 QM_FQCTRL_PREFERINCACHE;
1038d30ea906Sjfb8856606 opts.fqd.context_a.stashing.exclusive = 0;
1039d30ea906Sjfb8856606 /* In muticore scenario stashing becomes a bottleneck on LS1046.
1040d30ea906Sjfb8856606 * So do not enable stashing in this case
1041d30ea906Sjfb8856606 */
1042d30ea906Sjfb8856606 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
1043d30ea906Sjfb8856606 opts.fqd.context_a.stashing.annotation_cl =
1044d30ea906Sjfb8856606 DPAA_IF_RX_ANNOTATION_STASH;
1045d30ea906Sjfb8856606 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
1046d30ea906Sjfb8856606 opts.fqd.context_a.stashing.context_cl =
1047d30ea906Sjfb8856606 DPAA_IF_RX_CONTEXT_STASH;
1048d30ea906Sjfb8856606
1049d30ea906Sjfb8856606 /*Create a channel and associate given queue with the channel*/
1050d30ea906Sjfb8856606 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
1051d30ea906Sjfb8856606 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1052d30ea906Sjfb8856606 opts.fqd.dest.channel = rxq->ch_id;
1053d30ea906Sjfb8856606 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
1054d30ea906Sjfb8856606 flags = QMAN_INITFQ_FLAG_SCHED;
1055d30ea906Sjfb8856606
1056d30ea906Sjfb8856606 /* Configure tail drop */
1057d30ea906Sjfb8856606 if (dpaa_intf->cgr_rx) {
1058d30ea906Sjfb8856606 opts.we_mask |= QM_INITFQ_WE_CGID;
1059d30ea906Sjfb8856606 opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
1060d30ea906Sjfb8856606 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1061d30ea906Sjfb8856606 }
1062d30ea906Sjfb8856606 ret = qman_init_fq(rxq, flags, &opts);
1063d30ea906Sjfb8856606 if (ret) {
1064d30ea906Sjfb8856606 DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
1065d30ea906Sjfb8856606 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1066d30ea906Sjfb8856606 return ret;
1067d30ea906Sjfb8856606 }
1068d30ea906Sjfb8856606 if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
1069d30ea906Sjfb8856606 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
1070d30ea906Sjfb8856606 } else {
1071d30ea906Sjfb8856606 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
1072d30ea906Sjfb8856606 rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
1073d30ea906Sjfb8856606 }
1074d30ea906Sjfb8856606
1075d30ea906Sjfb8856606 rxq->is_static = true;
10764418919fSjohnjiang
10774418919fSjohnjiang /* Allocate qman specific portals */
10784418919fSjohnjiang qp = fsl_qman_fq_portal_create(&q_fd);
10794418919fSjohnjiang if (!qp) {
10804418919fSjohnjiang DPAA_PMD_ERR("Unable to alloc fq portal");
10814418919fSjohnjiang return -1;
1082d30ea906Sjfb8856606 }
10834418919fSjohnjiang rxq->qp = qp;
10844418919fSjohnjiang
10854418919fSjohnjiang /* Set up the device interrupt handler */
10864418919fSjohnjiang if (!dev->intr_handle) {
10874418919fSjohnjiang struct rte_dpaa_device *dpaa_dev;
10884418919fSjohnjiang struct rte_device *rdev = dev->device;
10894418919fSjohnjiang
10904418919fSjohnjiang dpaa_dev = container_of(rdev, struct rte_dpaa_device,
10914418919fSjohnjiang device);
10924418919fSjohnjiang dev->intr_handle = &dpaa_dev->intr_handle;
10934418919fSjohnjiang dev->intr_handle->intr_vec = rte_zmalloc(NULL,
10944418919fSjohnjiang dpaa_push_mode_max_queue, 0);
10954418919fSjohnjiang if (!dev->intr_handle->intr_vec) {
10964418919fSjohnjiang DPAA_PMD_ERR("intr_vec alloc failed");
10974418919fSjohnjiang return -ENOMEM;
10984418919fSjohnjiang }
10994418919fSjohnjiang dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
11004418919fSjohnjiang dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
11014418919fSjohnjiang }
11024418919fSjohnjiang
11034418919fSjohnjiang dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
11044418919fSjohnjiang dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
11054418919fSjohnjiang dev->intr_handle->efds[queue_idx] = q_fd;
11064418919fSjohnjiang rxq->q_fd = q_fd;
11074418919fSjohnjiang }
11084418919fSjohnjiang rxq->bp_array = rte_dpaa_bpid_info;
1109d30ea906Sjfb8856606 dev->data->rx_queues[queue_idx] = rxq;
1110d30ea906Sjfb8856606
1111d30ea906Sjfb8856606 /* configure the CGR size as per the desc size */
1112d30ea906Sjfb8856606 if (dpaa_intf->cgr_rx) {
1113d30ea906Sjfb8856606 struct qm_mcc_initcgr cgr_opts = {0};
1114d30ea906Sjfb8856606
1115*2d9fd380Sjfb8856606 rxq->nb_desc = nb_desc;
1116d30ea906Sjfb8856606 /* Enable tail drop with cgr on this queue */
1117d30ea906Sjfb8856606 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
1118d30ea906Sjfb8856606 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
1119d30ea906Sjfb8856606 if (ret) {
1120d30ea906Sjfb8856606 DPAA_PMD_WARN(
1121d30ea906Sjfb8856606 "rx taildrop modify fail on fqid %d (ret=%d)",
1122d30ea906Sjfb8856606 rxq->fqid, ret);
1123d30ea906Sjfb8856606 }
1124d30ea906Sjfb8856606 }
1125*2d9fd380Sjfb8856606 /* Enable main queue to receive error packets also by default */
1126*2d9fd380Sjfb8856606 fman_if_set_err_fqid(fif, rxq->fqid);
1127d30ea906Sjfb8856606 return 0;
1128d30ea906Sjfb8856606 }
1129d30ea906Sjfb8856606
1130d30ea906Sjfb8856606 int
dpaa_eth_eventq_attach(const struct rte_eth_dev * dev,int eth_rx_queue_id,u16 ch_id,const struct rte_event_eth_rx_adapter_queue_conf * queue_conf)1131d30ea906Sjfb8856606 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
1132d30ea906Sjfb8856606 int eth_rx_queue_id,
1133d30ea906Sjfb8856606 u16 ch_id,
1134d30ea906Sjfb8856606 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1135d30ea906Sjfb8856606 {
1136d30ea906Sjfb8856606 int ret;
1137d30ea906Sjfb8856606 u32 flags = 0;
1138d30ea906Sjfb8856606 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1139d30ea906Sjfb8856606 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1140d30ea906Sjfb8856606 struct qm_mcc_initfq opts = {0};
1141d30ea906Sjfb8856606
1142d30ea906Sjfb8856606 if (dpaa_push_mode_max_queue)
1143d30ea906Sjfb8856606 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
1144d30ea906Sjfb8856606 "PUSH mode already enabled for first %d queues.\n"
1145d30ea906Sjfb8856606 "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
1146d30ea906Sjfb8856606 dpaa_push_mode_max_queue);
1147d30ea906Sjfb8856606
1148d30ea906Sjfb8856606 dpaa_poll_queue_default_config(&opts);
1149d30ea906Sjfb8856606
1150d30ea906Sjfb8856606 switch (queue_conf->ev.sched_type) {
1151d30ea906Sjfb8856606 case RTE_SCHED_TYPE_ATOMIC:
1152d30ea906Sjfb8856606 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
1153d30ea906Sjfb8856606 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
1154d30ea906Sjfb8856606 * configuration with HOLD_ACTIVE setting
1155d30ea906Sjfb8856606 */
1156d30ea906Sjfb8856606 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
1157d30ea906Sjfb8856606 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
1158d30ea906Sjfb8856606 break;
1159d30ea906Sjfb8856606 case RTE_SCHED_TYPE_ORDERED:
1160d30ea906Sjfb8856606 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
1161d30ea906Sjfb8856606 return -1;
1162d30ea906Sjfb8856606 default:
1163d30ea906Sjfb8856606 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
1164d30ea906Sjfb8856606 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
1165d30ea906Sjfb8856606 break;
1166d30ea906Sjfb8856606 }
1167d30ea906Sjfb8856606
1168d30ea906Sjfb8856606 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1169d30ea906Sjfb8856606 opts.fqd.dest.channel = ch_id;
1170d30ea906Sjfb8856606 opts.fqd.dest.wq = queue_conf->ev.priority;
1171d30ea906Sjfb8856606
1172d30ea906Sjfb8856606 if (dpaa_intf->cgr_rx) {
1173d30ea906Sjfb8856606 opts.we_mask |= QM_INITFQ_WE_CGID;
1174d30ea906Sjfb8856606 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1175d30ea906Sjfb8856606 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1176d30ea906Sjfb8856606 }
1177d30ea906Sjfb8856606
1178d30ea906Sjfb8856606 flags = QMAN_INITFQ_FLAG_SCHED;
1179d30ea906Sjfb8856606
1180d30ea906Sjfb8856606 ret = qman_init_fq(rxq, flags, &opts);
1181d30ea906Sjfb8856606 if (ret) {
1182d30ea906Sjfb8856606 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
1183d30ea906Sjfb8856606 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1184d30ea906Sjfb8856606 return ret;
1185d30ea906Sjfb8856606 }
1186d30ea906Sjfb8856606
1187d30ea906Sjfb8856606 /* copy configuration which needs to be filled during dequeue */
1188d30ea906Sjfb8856606 memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
1189d30ea906Sjfb8856606 dev->data->rx_queues[eth_rx_queue_id] = rxq;
1190d30ea906Sjfb8856606
1191d30ea906Sjfb8856606 return ret;
1192d30ea906Sjfb8856606 }
1193d30ea906Sjfb8856606
1194d30ea906Sjfb8856606 int
dpaa_eth_eventq_detach(const struct rte_eth_dev * dev,int eth_rx_queue_id)1195d30ea906Sjfb8856606 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
1196d30ea906Sjfb8856606 int eth_rx_queue_id)
1197d30ea906Sjfb8856606 {
1198d30ea906Sjfb8856606 struct qm_mcc_initfq opts;
1199d30ea906Sjfb8856606 int ret;
1200d30ea906Sjfb8856606 u32 flags = 0;
1201d30ea906Sjfb8856606 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1202d30ea906Sjfb8856606 struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1203d30ea906Sjfb8856606
1204d30ea906Sjfb8856606 dpaa_poll_queue_default_config(&opts);
1205d30ea906Sjfb8856606
1206d30ea906Sjfb8856606 if (dpaa_intf->cgr_rx) {
1207d30ea906Sjfb8856606 opts.we_mask |= QM_INITFQ_WE_CGID;
1208d30ea906Sjfb8856606 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1209d30ea906Sjfb8856606 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1210d30ea906Sjfb8856606 }
1211d30ea906Sjfb8856606
1212d30ea906Sjfb8856606 ret = qman_init_fq(rxq, flags, &opts);
1213d30ea906Sjfb8856606 if (ret) {
1214d30ea906Sjfb8856606 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
1215d30ea906Sjfb8856606 rxq->fqid, ret);
1216d30ea906Sjfb8856606 }
1217d30ea906Sjfb8856606
1218d30ea906Sjfb8856606 rxq->cb.dqrr_dpdk_cb = NULL;
1219d30ea906Sjfb8856606 dev->data->rx_queues[eth_rx_queue_id] = NULL;
12202bfe3f2eSlogwang
12212bfe3f2eSlogwang return 0;
12222bfe3f2eSlogwang }
12232bfe3f2eSlogwang
12242bfe3f2eSlogwang static
dpaa_eth_rx_queue_release(void * rxq __rte_unused)12252bfe3f2eSlogwang void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
12262bfe3f2eSlogwang {
12272bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
12282bfe3f2eSlogwang }
12292bfe3f2eSlogwang
12302bfe3f2eSlogwang static
dpaa_eth_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc __rte_unused,unsigned int socket_id __rte_unused,const struct rte_eth_txconf * tx_conf)12312bfe3f2eSlogwang int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
12322bfe3f2eSlogwang uint16_t nb_desc __rte_unused,
12332bfe3f2eSlogwang unsigned int socket_id __rte_unused,
1234*2d9fd380Sjfb8856606 const struct rte_eth_txconf *tx_conf)
12352bfe3f2eSlogwang {
12362bfe3f2eSlogwang struct dpaa_if *dpaa_intf = dev->data->dev_private;
1237*2d9fd380Sjfb8856606 struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
12382bfe3f2eSlogwang
12392bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
12402bfe3f2eSlogwang
1241*2d9fd380Sjfb8856606 /* Tx deferred start is not supported */
1242*2d9fd380Sjfb8856606 if (tx_conf->tx_deferred_start) {
1243*2d9fd380Sjfb8856606 DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
1244*2d9fd380Sjfb8856606 return -EINVAL;
1245*2d9fd380Sjfb8856606 }
1246*2d9fd380Sjfb8856606 txq->nb_desc = UINT16_MAX;
1247*2d9fd380Sjfb8856606 txq->offloads = tx_conf->offloads;
1248*2d9fd380Sjfb8856606
1249d30ea906Sjfb8856606 if (queue_idx >= dev->data->nb_tx_queues) {
1250d30ea906Sjfb8856606 rte_errno = EOVERFLOW;
1251d30ea906Sjfb8856606 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
1252d30ea906Sjfb8856606 (void *)dev, queue_idx, dev->data->nb_tx_queues);
1253d30ea906Sjfb8856606 return -rte_errno;
1254d30ea906Sjfb8856606 }
1255d30ea906Sjfb8856606
1256d30ea906Sjfb8856606 DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
1257*2d9fd380Sjfb8856606 queue_idx, txq->fqid);
1258*2d9fd380Sjfb8856606 dev->data->tx_queues[queue_idx] = txq;
1259*2d9fd380Sjfb8856606
12602bfe3f2eSlogwang return 0;
12612bfe3f2eSlogwang }
12622bfe3f2eSlogwang
dpaa_eth_tx_queue_release(void * txq __rte_unused)12632bfe3f2eSlogwang static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
12642bfe3f2eSlogwang {
12652bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
12662bfe3f2eSlogwang }
12672bfe3f2eSlogwang
1268d30ea906Sjfb8856606 static uint32_t
dpaa_dev_rx_queue_count(struct rte_eth_dev * dev,uint16_t rx_queue_id)1269d30ea906Sjfb8856606 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1270d30ea906Sjfb8856606 {
1271d30ea906Sjfb8856606 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1272d30ea906Sjfb8856606 struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
1273d30ea906Sjfb8856606 u32 frm_cnt = 0;
1274d30ea906Sjfb8856606
1275d30ea906Sjfb8856606 PMD_INIT_FUNC_TRACE();
1276d30ea906Sjfb8856606
1277d30ea906Sjfb8856606 if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
12780c6bd470Sfengbojiang DPAA_PMD_DEBUG("RX frame count for q(%d) is %u",
1279d30ea906Sjfb8856606 rx_queue_id, frm_cnt);
1280d30ea906Sjfb8856606 }
1281d30ea906Sjfb8856606 return frm_cnt;
1282d30ea906Sjfb8856606 }
1283d30ea906Sjfb8856606
dpaa_link_down(struct rte_eth_dev * dev)12842bfe3f2eSlogwang static int dpaa_link_down(struct rte_eth_dev *dev)
12852bfe3f2eSlogwang {
1286*2d9fd380Sjfb8856606 struct fman_if *fif = dev->process_private;
1287*2d9fd380Sjfb8856606 struct __fman_if *__fif;
1288*2d9fd380Sjfb8856606
12892bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
12902bfe3f2eSlogwang
1291*2d9fd380Sjfb8856606 __fif = container_of(fif, struct __fman_if, __if);
1292*2d9fd380Sjfb8856606
1293*2d9fd380Sjfb8856606 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1294*2d9fd380Sjfb8856606 dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
1295*2d9fd380Sjfb8856606 else
1296*2d9fd380Sjfb8856606 return dpaa_eth_dev_stop(dev);
12972bfe3f2eSlogwang return 0;
12982bfe3f2eSlogwang }
12992bfe3f2eSlogwang
dpaa_link_up(struct rte_eth_dev * dev)13002bfe3f2eSlogwang static int dpaa_link_up(struct rte_eth_dev *dev)
13012bfe3f2eSlogwang {
1302*2d9fd380Sjfb8856606 struct fman_if *fif = dev->process_private;
1303*2d9fd380Sjfb8856606 struct __fman_if *__fif;
1304*2d9fd380Sjfb8856606
13052bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
13062bfe3f2eSlogwang
1307*2d9fd380Sjfb8856606 __fif = container_of(fif, struct __fman_if, __if);
1308*2d9fd380Sjfb8856606
1309*2d9fd380Sjfb8856606 if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1310*2d9fd380Sjfb8856606 dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
1311*2d9fd380Sjfb8856606 else
13122bfe3f2eSlogwang dpaa_eth_dev_start(dev);
13132bfe3f2eSlogwang return 0;
13142bfe3f2eSlogwang }
13152bfe3f2eSlogwang
13162bfe3f2eSlogwang static int
dpaa_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)13172bfe3f2eSlogwang dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
13182bfe3f2eSlogwang struct rte_eth_fc_conf *fc_conf)
13192bfe3f2eSlogwang {
13202bfe3f2eSlogwang struct dpaa_if *dpaa_intf = dev->data->dev_private;
13212bfe3f2eSlogwang struct rte_eth_fc_conf *net_fc;
13222bfe3f2eSlogwang
13232bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
13242bfe3f2eSlogwang
13252bfe3f2eSlogwang if (!(dpaa_intf->fc_conf)) {
13262bfe3f2eSlogwang dpaa_intf->fc_conf = rte_zmalloc(NULL,
13272bfe3f2eSlogwang sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
13282bfe3f2eSlogwang if (!dpaa_intf->fc_conf) {
13292bfe3f2eSlogwang DPAA_PMD_ERR("unable to save flow control info");
13302bfe3f2eSlogwang return -ENOMEM;
13312bfe3f2eSlogwang }
13322bfe3f2eSlogwang }
13332bfe3f2eSlogwang net_fc = dpaa_intf->fc_conf;
13342bfe3f2eSlogwang
13352bfe3f2eSlogwang if (fc_conf->high_water < fc_conf->low_water) {
13362bfe3f2eSlogwang DPAA_PMD_ERR("Incorrect Flow Control Configuration");
13372bfe3f2eSlogwang return -EINVAL;
13382bfe3f2eSlogwang }
13392bfe3f2eSlogwang
13402bfe3f2eSlogwang if (fc_conf->mode == RTE_FC_NONE) {
13412bfe3f2eSlogwang return 0;
13422bfe3f2eSlogwang } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
13432bfe3f2eSlogwang fc_conf->mode == RTE_FC_FULL) {
1344*2d9fd380Sjfb8856606 fman_if_set_fc_threshold(dev->process_private,
1345*2d9fd380Sjfb8856606 fc_conf->high_water,
13462bfe3f2eSlogwang fc_conf->low_water,
13472bfe3f2eSlogwang dpaa_intf->bp_info->bpid);
13482bfe3f2eSlogwang if (fc_conf->pause_time)
1349*2d9fd380Sjfb8856606 fman_if_set_fc_quanta(dev->process_private,
13502bfe3f2eSlogwang fc_conf->pause_time);
13512bfe3f2eSlogwang }
13522bfe3f2eSlogwang
13532bfe3f2eSlogwang /* Save the information in dpaa device */
13542bfe3f2eSlogwang net_fc->pause_time = fc_conf->pause_time;
13552bfe3f2eSlogwang net_fc->high_water = fc_conf->high_water;
13562bfe3f2eSlogwang net_fc->low_water = fc_conf->low_water;
13572bfe3f2eSlogwang net_fc->send_xon = fc_conf->send_xon;
13582bfe3f2eSlogwang net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
13592bfe3f2eSlogwang net_fc->mode = fc_conf->mode;
13602bfe3f2eSlogwang net_fc->autoneg = fc_conf->autoneg;
13612bfe3f2eSlogwang
13622bfe3f2eSlogwang return 0;
13632bfe3f2eSlogwang }
13642bfe3f2eSlogwang
13652bfe3f2eSlogwang static int
dpaa_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)13662bfe3f2eSlogwang dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
13672bfe3f2eSlogwang struct rte_eth_fc_conf *fc_conf)
13682bfe3f2eSlogwang {
13692bfe3f2eSlogwang struct dpaa_if *dpaa_intf = dev->data->dev_private;
13702bfe3f2eSlogwang struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
13712bfe3f2eSlogwang int ret;
13722bfe3f2eSlogwang
13732bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
13742bfe3f2eSlogwang
13752bfe3f2eSlogwang if (net_fc) {
13762bfe3f2eSlogwang fc_conf->pause_time = net_fc->pause_time;
13772bfe3f2eSlogwang fc_conf->high_water = net_fc->high_water;
13782bfe3f2eSlogwang fc_conf->low_water = net_fc->low_water;
13792bfe3f2eSlogwang fc_conf->send_xon = net_fc->send_xon;
13802bfe3f2eSlogwang fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
13812bfe3f2eSlogwang fc_conf->mode = net_fc->mode;
13822bfe3f2eSlogwang fc_conf->autoneg = net_fc->autoneg;
13832bfe3f2eSlogwang return 0;
13842bfe3f2eSlogwang }
1385*2d9fd380Sjfb8856606 ret = fman_if_get_fc_threshold(dev->process_private);
13862bfe3f2eSlogwang if (ret) {
13872bfe3f2eSlogwang fc_conf->mode = RTE_FC_TX_PAUSE;
1388*2d9fd380Sjfb8856606 fc_conf->pause_time =
1389*2d9fd380Sjfb8856606 fman_if_get_fc_quanta(dev->process_private);
13902bfe3f2eSlogwang } else {
13912bfe3f2eSlogwang fc_conf->mode = RTE_FC_NONE;
13922bfe3f2eSlogwang }
13932bfe3f2eSlogwang
13942bfe3f2eSlogwang return 0;
13952bfe3f2eSlogwang }
13962bfe3f2eSlogwang
13972bfe3f2eSlogwang static int
dpaa_dev_add_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr,uint32_t index,__rte_unused uint32_t pool)13982bfe3f2eSlogwang dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
13994418919fSjohnjiang struct rte_ether_addr *addr,
14002bfe3f2eSlogwang uint32_t index,
14012bfe3f2eSlogwang __rte_unused uint32_t pool)
14022bfe3f2eSlogwang {
14032bfe3f2eSlogwang int ret;
14042bfe3f2eSlogwang
14052bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
14062bfe3f2eSlogwang
1407*2d9fd380Sjfb8856606 ret = fman_if_add_mac_addr(dev->process_private,
1408*2d9fd380Sjfb8856606 addr->addr_bytes, index);
14092bfe3f2eSlogwang
14102bfe3f2eSlogwang if (ret)
14110c6bd470Sfengbojiang DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
14122bfe3f2eSlogwang return 0;
14132bfe3f2eSlogwang }
14142bfe3f2eSlogwang
14152bfe3f2eSlogwang static void
dpaa_dev_remove_mac_addr(struct rte_eth_dev * dev,uint32_t index)14162bfe3f2eSlogwang dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
14172bfe3f2eSlogwang uint32_t index)
14182bfe3f2eSlogwang {
14192bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
14202bfe3f2eSlogwang
1421*2d9fd380Sjfb8856606 fman_if_clear_mac_addr(dev->process_private, index);
14222bfe3f2eSlogwang }
14232bfe3f2eSlogwang
1424d30ea906Sjfb8856606 static int
dpaa_dev_set_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr)14252bfe3f2eSlogwang dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
14264418919fSjohnjiang struct rte_ether_addr *addr)
14272bfe3f2eSlogwang {
14282bfe3f2eSlogwang int ret;
14292bfe3f2eSlogwang
14302bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
14312bfe3f2eSlogwang
1432*2d9fd380Sjfb8856606 ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0);
14332bfe3f2eSlogwang if (ret)
14340c6bd470Sfengbojiang DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
1435d30ea906Sjfb8856606
1436d30ea906Sjfb8856606 return ret;
14372bfe3f2eSlogwang }
14382bfe3f2eSlogwang
1439*2d9fd380Sjfb8856606 static int
dpaa_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1440*2d9fd380Sjfb8856606 dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
1441*2d9fd380Sjfb8856606 struct rte_eth_rss_conf *rss_conf)
1442*2d9fd380Sjfb8856606 {
1443*2d9fd380Sjfb8856606 struct rte_eth_dev_data *data = dev->data;
1444*2d9fd380Sjfb8856606 struct rte_eth_conf *eth_conf = &data->dev_conf;
1445*2d9fd380Sjfb8856606
1446*2d9fd380Sjfb8856606 PMD_INIT_FUNC_TRACE();
1447*2d9fd380Sjfb8856606
1448*2d9fd380Sjfb8856606 if (!(default_q || fmc_q)) {
1449*2d9fd380Sjfb8856606 if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
1450*2d9fd380Sjfb8856606 DPAA_PMD_ERR("FM port configuration: Failed\n");
1451*2d9fd380Sjfb8856606 return -1;
1452*2d9fd380Sjfb8856606 }
1453*2d9fd380Sjfb8856606 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1454*2d9fd380Sjfb8856606 } else {
1455*2d9fd380Sjfb8856606 DPAA_PMD_ERR("Function not supported\n");
1456*2d9fd380Sjfb8856606 return -ENOTSUP;
1457*2d9fd380Sjfb8856606 }
1458*2d9fd380Sjfb8856606 return 0;
1459*2d9fd380Sjfb8856606 }
1460*2d9fd380Sjfb8856606
1461*2d9fd380Sjfb8856606 static int
dpaa_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1462*2d9fd380Sjfb8856606 dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1463*2d9fd380Sjfb8856606 struct rte_eth_rss_conf *rss_conf)
1464*2d9fd380Sjfb8856606 {
1465*2d9fd380Sjfb8856606 struct rte_eth_dev_data *data = dev->data;
1466*2d9fd380Sjfb8856606 struct rte_eth_conf *eth_conf = &data->dev_conf;
1467*2d9fd380Sjfb8856606
1468*2d9fd380Sjfb8856606 /* dpaa does not support rss_key, so length should be 0*/
1469*2d9fd380Sjfb8856606 rss_conf->rss_key_len = 0;
1470*2d9fd380Sjfb8856606 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1471*2d9fd380Sjfb8856606 return 0;
1472*2d9fd380Sjfb8856606 }
1473*2d9fd380Sjfb8856606
dpaa_dev_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)14744418919fSjohnjiang static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
14754418919fSjohnjiang uint16_t queue_id)
14764418919fSjohnjiang {
14774418919fSjohnjiang struct dpaa_if *dpaa_intf = dev->data->dev_private;
14784418919fSjohnjiang struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
14794418919fSjohnjiang
14804418919fSjohnjiang if (!rxq->is_static)
14814418919fSjohnjiang return -EINVAL;
14824418919fSjohnjiang
14834418919fSjohnjiang return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
14844418919fSjohnjiang }
14854418919fSjohnjiang
dpaa_dev_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)14864418919fSjohnjiang static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
14874418919fSjohnjiang uint16_t queue_id)
14884418919fSjohnjiang {
14894418919fSjohnjiang struct dpaa_if *dpaa_intf = dev->data->dev_private;
14904418919fSjohnjiang struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
14914418919fSjohnjiang uint32_t temp;
14924418919fSjohnjiang ssize_t temp1;
14934418919fSjohnjiang
14944418919fSjohnjiang if (!rxq->is_static)
14954418919fSjohnjiang return -EINVAL;
14964418919fSjohnjiang
14974418919fSjohnjiang qman_fq_portal_irqsource_remove(rxq->qp, ~0);
14984418919fSjohnjiang
14994418919fSjohnjiang temp1 = read(rxq->q_fd, &temp, sizeof(temp));
15004418919fSjohnjiang if (temp1 != sizeof(temp))
1501*2d9fd380Sjfb8856606 DPAA_PMD_ERR("irq read error");
15024418919fSjohnjiang
15034418919fSjohnjiang qman_fq_portal_thread_irq(rxq->qp);
15044418919fSjohnjiang
15054418919fSjohnjiang return 0;
15064418919fSjohnjiang }
15074418919fSjohnjiang
1508*2d9fd380Sjfb8856606 static void
dpaa_rxq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)1509*2d9fd380Sjfb8856606 dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1510*2d9fd380Sjfb8856606 struct rte_eth_rxq_info *qinfo)
1511*2d9fd380Sjfb8856606 {
1512*2d9fd380Sjfb8856606 struct dpaa_if *dpaa_intf = dev->data->dev_private;
1513*2d9fd380Sjfb8856606 struct qman_fq *rxq;
1514*2d9fd380Sjfb8856606
1515*2d9fd380Sjfb8856606 rxq = dev->data->rx_queues[queue_id];
1516*2d9fd380Sjfb8856606
1517*2d9fd380Sjfb8856606 qinfo->mp = dpaa_intf->bp_info->mp;
1518*2d9fd380Sjfb8856606 qinfo->scattered_rx = dev->data->scattered_rx;
1519*2d9fd380Sjfb8856606 qinfo->nb_desc = rxq->nb_desc;
1520*2d9fd380Sjfb8856606 qinfo->conf.rx_free_thresh = 1;
1521*2d9fd380Sjfb8856606 qinfo->conf.rx_drop_en = 1;
1522*2d9fd380Sjfb8856606 qinfo->conf.rx_deferred_start = 0;
1523*2d9fd380Sjfb8856606 qinfo->conf.offloads = rxq->offloads;
1524*2d9fd380Sjfb8856606 }
1525*2d9fd380Sjfb8856606
1526*2d9fd380Sjfb8856606 static void
dpaa_txq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)1527*2d9fd380Sjfb8856606 dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1528*2d9fd380Sjfb8856606 struct rte_eth_txq_info *qinfo)
1529*2d9fd380Sjfb8856606 {
1530*2d9fd380Sjfb8856606 struct qman_fq *txq;
1531*2d9fd380Sjfb8856606
1532*2d9fd380Sjfb8856606 txq = dev->data->tx_queues[queue_id];
1533*2d9fd380Sjfb8856606
1534*2d9fd380Sjfb8856606 qinfo->nb_desc = txq->nb_desc;
1535*2d9fd380Sjfb8856606 qinfo->conf.tx_thresh.pthresh = 0;
1536*2d9fd380Sjfb8856606 qinfo->conf.tx_thresh.hthresh = 0;
1537*2d9fd380Sjfb8856606 qinfo->conf.tx_thresh.wthresh = 0;
1538*2d9fd380Sjfb8856606
1539*2d9fd380Sjfb8856606 qinfo->conf.tx_free_thresh = 0;
1540*2d9fd380Sjfb8856606 qinfo->conf.tx_rs_thresh = 0;
1541*2d9fd380Sjfb8856606 qinfo->conf.offloads = txq->offloads;
1542*2d9fd380Sjfb8856606 qinfo->conf.tx_deferred_start = 0;
1543*2d9fd380Sjfb8856606 }
1544*2d9fd380Sjfb8856606
15452bfe3f2eSlogwang static struct eth_dev_ops dpaa_devops = {
15462bfe3f2eSlogwang .dev_configure = dpaa_eth_dev_configure,
15472bfe3f2eSlogwang .dev_start = dpaa_eth_dev_start,
15482bfe3f2eSlogwang .dev_stop = dpaa_eth_dev_stop,
15492bfe3f2eSlogwang .dev_close = dpaa_eth_dev_close,
15502bfe3f2eSlogwang .dev_infos_get = dpaa_eth_dev_info,
15512bfe3f2eSlogwang .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
15522bfe3f2eSlogwang
15532bfe3f2eSlogwang .rx_queue_setup = dpaa_eth_rx_queue_setup,
15542bfe3f2eSlogwang .tx_queue_setup = dpaa_eth_tx_queue_setup,
15552bfe3f2eSlogwang .rx_queue_release = dpaa_eth_rx_queue_release,
15562bfe3f2eSlogwang .tx_queue_release = dpaa_eth_tx_queue_release,
1557*2d9fd380Sjfb8856606 .rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
1558*2d9fd380Sjfb8856606 .tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
1559*2d9fd380Sjfb8856606 .rxq_info_get = dpaa_rxq_info_get,
1560*2d9fd380Sjfb8856606 .txq_info_get = dpaa_txq_info_get,
15612bfe3f2eSlogwang
15622bfe3f2eSlogwang .flow_ctrl_get = dpaa_flow_ctrl_get,
15632bfe3f2eSlogwang .flow_ctrl_set = dpaa_flow_ctrl_set,
15642bfe3f2eSlogwang
15652bfe3f2eSlogwang .link_update = dpaa_eth_link_update,
15662bfe3f2eSlogwang .stats_get = dpaa_eth_stats_get,
15672bfe3f2eSlogwang .xstats_get = dpaa_dev_xstats_get,
15682bfe3f2eSlogwang .xstats_get_by_id = dpaa_xstats_get_by_id,
15692bfe3f2eSlogwang .xstats_get_names_by_id = dpaa_xstats_get_names_by_id,
15702bfe3f2eSlogwang .xstats_get_names = dpaa_xstats_get_names,
15712bfe3f2eSlogwang .xstats_reset = dpaa_eth_stats_reset,
15722bfe3f2eSlogwang .stats_reset = dpaa_eth_stats_reset,
15732bfe3f2eSlogwang .promiscuous_enable = dpaa_eth_promiscuous_enable,
15742bfe3f2eSlogwang .promiscuous_disable = dpaa_eth_promiscuous_disable,
15752bfe3f2eSlogwang .allmulticast_enable = dpaa_eth_multicast_enable,
15762bfe3f2eSlogwang .allmulticast_disable = dpaa_eth_multicast_disable,
15772bfe3f2eSlogwang .mtu_set = dpaa_mtu_set,
15782bfe3f2eSlogwang .dev_set_link_down = dpaa_link_down,
15792bfe3f2eSlogwang .dev_set_link_up = dpaa_link_up,
15802bfe3f2eSlogwang .mac_addr_add = dpaa_dev_add_mac_addr,
15812bfe3f2eSlogwang .mac_addr_remove = dpaa_dev_remove_mac_addr,
15822bfe3f2eSlogwang .mac_addr_set = dpaa_dev_set_mac_addr,
15832bfe3f2eSlogwang
15842bfe3f2eSlogwang .fw_version_get = dpaa_fw_version_get,
15854418919fSjohnjiang
15864418919fSjohnjiang .rx_queue_intr_enable = dpaa_dev_queue_intr_enable,
15874418919fSjohnjiang .rx_queue_intr_disable = dpaa_dev_queue_intr_disable,
1588*2d9fd380Sjfb8856606 .rss_hash_update = dpaa_dev_rss_hash_update,
1589*2d9fd380Sjfb8856606 .rss_hash_conf_get = dpaa_dev_rss_hash_conf_get,
15902bfe3f2eSlogwang };
15912bfe3f2eSlogwang
1592d30ea906Sjfb8856606 static bool
is_device_supported(struct rte_eth_dev * dev,struct rte_dpaa_driver * drv)1593d30ea906Sjfb8856606 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
1594d30ea906Sjfb8856606 {
1595d30ea906Sjfb8856606 if (strcmp(dev->device->driver->name,
1596d30ea906Sjfb8856606 drv->driver.name))
1597d30ea906Sjfb8856606 return false;
1598d30ea906Sjfb8856606
1599d30ea906Sjfb8856606 return true;
1600d30ea906Sjfb8856606 }
1601d30ea906Sjfb8856606
1602d30ea906Sjfb8856606 static bool
is_dpaa_supported(struct rte_eth_dev * dev)1603d30ea906Sjfb8856606 is_dpaa_supported(struct rte_eth_dev *dev)
1604d30ea906Sjfb8856606 {
1605d30ea906Sjfb8856606 return is_device_supported(dev, &rte_dpaa_pmd);
1606d30ea906Sjfb8856606 }
1607d30ea906Sjfb8856606
1608d30ea906Sjfb8856606 int
rte_pmd_dpaa_set_tx_loopback(uint16_t port,uint8_t on)16090c6bd470Sfengbojiang rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
1610d30ea906Sjfb8856606 {
1611d30ea906Sjfb8856606 struct rte_eth_dev *dev;
1612d30ea906Sjfb8856606
1613d30ea906Sjfb8856606 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1614d30ea906Sjfb8856606
1615d30ea906Sjfb8856606 dev = &rte_eth_devices[port];
1616d30ea906Sjfb8856606
1617d30ea906Sjfb8856606 if (!is_dpaa_supported(dev))
1618d30ea906Sjfb8856606 return -ENOTSUP;
1619d30ea906Sjfb8856606
1620d30ea906Sjfb8856606 if (on)
1621*2d9fd380Sjfb8856606 fman_if_loopback_enable(dev->process_private);
1622d30ea906Sjfb8856606 else
1623*2d9fd380Sjfb8856606 fman_if_loopback_disable(dev->process_private);
1624d30ea906Sjfb8856606
1625d30ea906Sjfb8856606 return 0;
1626d30ea906Sjfb8856606 }
1627d30ea906Sjfb8856606
dpaa_fc_set_default(struct dpaa_if * dpaa_intf,struct fman_if * fman_intf)1628*2d9fd380Sjfb8856606 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
1629*2d9fd380Sjfb8856606 struct fman_if *fman_intf)
16302bfe3f2eSlogwang {
16312bfe3f2eSlogwang struct rte_eth_fc_conf *fc_conf;
16322bfe3f2eSlogwang int ret;
16332bfe3f2eSlogwang
16342bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
16352bfe3f2eSlogwang
16362bfe3f2eSlogwang if (!(dpaa_intf->fc_conf)) {
16372bfe3f2eSlogwang dpaa_intf->fc_conf = rte_zmalloc(NULL,
16382bfe3f2eSlogwang sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
16392bfe3f2eSlogwang if (!dpaa_intf->fc_conf) {
16402bfe3f2eSlogwang DPAA_PMD_ERR("unable to save flow control info");
16412bfe3f2eSlogwang return -ENOMEM;
16422bfe3f2eSlogwang }
16432bfe3f2eSlogwang }
16442bfe3f2eSlogwang fc_conf = dpaa_intf->fc_conf;
1645*2d9fd380Sjfb8856606 ret = fman_if_get_fc_threshold(fman_intf);
16462bfe3f2eSlogwang if (ret) {
16472bfe3f2eSlogwang fc_conf->mode = RTE_FC_TX_PAUSE;
1648*2d9fd380Sjfb8856606 fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
16492bfe3f2eSlogwang } else {
16502bfe3f2eSlogwang fc_conf->mode = RTE_FC_NONE;
16512bfe3f2eSlogwang }
16522bfe3f2eSlogwang
16532bfe3f2eSlogwang return 0;
16542bfe3f2eSlogwang }
16552bfe3f2eSlogwang
16562bfe3f2eSlogwang /* Initialise an Rx FQ */
dpaa_rx_queue_init(struct qman_fq * fq,struct qman_cgr * cgr_rx,uint32_t fqid)1657d30ea906Sjfb8856606 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
16582bfe3f2eSlogwang uint32_t fqid)
16592bfe3f2eSlogwang {
16602bfe3f2eSlogwang struct qm_mcc_initfq opts = {0};
16612bfe3f2eSlogwang int ret;
1662d30ea906Sjfb8856606 u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
1663d30ea906Sjfb8856606 struct qm_mcc_initcgr cgr_opts = {
1664d30ea906Sjfb8856606 .we_mask = QM_CGR_WE_CS_THRES |
1665d30ea906Sjfb8856606 QM_CGR_WE_CSTD_EN |
1666d30ea906Sjfb8856606 QM_CGR_WE_MODE,
1667d30ea906Sjfb8856606 .cgr = {
1668d30ea906Sjfb8856606 .cstd_en = QM_CGR_EN,
1669d30ea906Sjfb8856606 .mode = QMAN_CGR_MODE_FRAME
1670d30ea906Sjfb8856606 }
1671d30ea906Sjfb8856606 };
16722bfe3f2eSlogwang
1673*2d9fd380Sjfb8856606 if (fmc_q || default_q) {
16742bfe3f2eSlogwang ret = qman_reserve_fqid(fqid);
16752bfe3f2eSlogwang if (ret) {
1676*2d9fd380Sjfb8856606 DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
16772bfe3f2eSlogwang fqid, ret);
16782bfe3f2eSlogwang return -EINVAL;
16792bfe3f2eSlogwang }
1680d30ea906Sjfb8856606 }
1681*2d9fd380Sjfb8856606
1682d30ea906Sjfb8856606 DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
1683d30ea906Sjfb8856606 ret = qman_create_fq(fqid, flags, fq);
16842bfe3f2eSlogwang if (ret) {
1685d30ea906Sjfb8856606 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
16862bfe3f2eSlogwang fqid, ret);
16872bfe3f2eSlogwang return ret;
16882bfe3f2eSlogwang }
1689d30ea906Sjfb8856606 fq->is_static = false;
16902bfe3f2eSlogwang
1691d30ea906Sjfb8856606 dpaa_poll_queue_default_config(&opts);
16922bfe3f2eSlogwang
1693d30ea906Sjfb8856606 if (cgr_rx) {
1694d30ea906Sjfb8856606 /* Enable tail drop with cgr on this queue */
1695d30ea906Sjfb8856606 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
1696d30ea906Sjfb8856606 cgr_rx->cb = NULL;
1697d30ea906Sjfb8856606 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
1698d30ea906Sjfb8856606 &cgr_opts);
1699d30ea906Sjfb8856606 if (ret) {
1700d30ea906Sjfb8856606 DPAA_PMD_WARN(
1701d30ea906Sjfb8856606 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1702d30ea906Sjfb8856606 fq->fqid, ret);
1703d30ea906Sjfb8856606 goto without_cgr;
1704d30ea906Sjfb8856606 }
1705d30ea906Sjfb8856606 opts.we_mask |= QM_INITFQ_WE_CGID;
1706d30ea906Sjfb8856606 opts.fqd.cgid = cgr_rx->cgrid;
1707d30ea906Sjfb8856606 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1708d30ea906Sjfb8856606 }
1709d30ea906Sjfb8856606 without_cgr:
17102bfe3f2eSlogwang ret = qman_init_fq(fq, 0, &opts);
17112bfe3f2eSlogwang if (ret)
1712d30ea906Sjfb8856606 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
17132bfe3f2eSlogwang return ret;
17142bfe3f2eSlogwang }
17152bfe3f2eSlogwang
17162bfe3f2eSlogwang /* Initialise a Tx FQ */
dpaa_tx_queue_init(struct qman_fq * fq,struct fman_if * fman_intf,struct qman_cgr * cgr_tx)17172bfe3f2eSlogwang static int dpaa_tx_queue_init(struct qman_fq *fq,
1718*2d9fd380Sjfb8856606 struct fman_if *fman_intf,
1719*2d9fd380Sjfb8856606 struct qman_cgr *cgr_tx)
17202bfe3f2eSlogwang {
17212bfe3f2eSlogwang struct qm_mcc_initfq opts = {0};
1722*2d9fd380Sjfb8856606 struct qm_mcc_initcgr cgr_opts = {
1723*2d9fd380Sjfb8856606 .we_mask = QM_CGR_WE_CS_THRES |
1724*2d9fd380Sjfb8856606 QM_CGR_WE_CSTD_EN |
1725*2d9fd380Sjfb8856606 QM_CGR_WE_MODE,
1726*2d9fd380Sjfb8856606 .cgr = {
1727*2d9fd380Sjfb8856606 .cstd_en = QM_CGR_EN,
1728*2d9fd380Sjfb8856606 .mode = QMAN_CGR_MODE_FRAME
1729*2d9fd380Sjfb8856606 }
1730*2d9fd380Sjfb8856606 };
17312bfe3f2eSlogwang int ret;
17322bfe3f2eSlogwang
17332bfe3f2eSlogwang ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
17342bfe3f2eSlogwang QMAN_FQ_FLAG_TO_DCPORTAL, fq);
17352bfe3f2eSlogwang if (ret) {
17362bfe3f2eSlogwang DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
17372bfe3f2eSlogwang return ret;
17382bfe3f2eSlogwang }
17392bfe3f2eSlogwang opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
17402bfe3f2eSlogwang QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
17412bfe3f2eSlogwang opts.fqd.dest.channel = fman_intf->tx_channel_id;
17422bfe3f2eSlogwang opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
17432bfe3f2eSlogwang opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
17442bfe3f2eSlogwang opts.fqd.context_b = 0;
17452bfe3f2eSlogwang /* no tx-confirmation */
17462bfe3f2eSlogwang opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
17472bfe3f2eSlogwang opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1748d30ea906Sjfb8856606 DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
1749*2d9fd380Sjfb8856606
1750*2d9fd380Sjfb8856606 if (cgr_tx) {
1751*2d9fd380Sjfb8856606 /* Enable tail drop with cgr on this queue */
1752*2d9fd380Sjfb8856606 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
1753*2d9fd380Sjfb8856606 td_tx_threshold, 0);
1754*2d9fd380Sjfb8856606 cgr_tx->cb = NULL;
1755*2d9fd380Sjfb8856606 ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
1756*2d9fd380Sjfb8856606 &cgr_opts);
1757*2d9fd380Sjfb8856606 if (ret) {
1758*2d9fd380Sjfb8856606 DPAA_PMD_WARN(
1759*2d9fd380Sjfb8856606 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1760*2d9fd380Sjfb8856606 fq->fqid, ret);
1761*2d9fd380Sjfb8856606 goto without_cgr;
1762*2d9fd380Sjfb8856606 }
1763*2d9fd380Sjfb8856606 opts.we_mask |= QM_INITFQ_WE_CGID;
1764*2d9fd380Sjfb8856606 opts.fqd.cgid = cgr_tx->cgrid;
1765*2d9fd380Sjfb8856606 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1766*2d9fd380Sjfb8856606 DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n",
1767*2d9fd380Sjfb8856606 td_tx_threshold);
1768*2d9fd380Sjfb8856606 }
1769*2d9fd380Sjfb8856606 without_cgr:
17702bfe3f2eSlogwang ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
17712bfe3f2eSlogwang if (ret)
1772d30ea906Sjfb8856606 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
17732bfe3f2eSlogwang return ret;
17742bfe3f2eSlogwang }
17752bfe3f2eSlogwang
17762bfe3f2eSlogwang #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
17772bfe3f2eSlogwang /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
dpaa_debug_queue_init(struct qman_fq * fq,uint32_t fqid)17782bfe3f2eSlogwang static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
17792bfe3f2eSlogwang {
17802bfe3f2eSlogwang struct qm_mcc_initfq opts = {0};
17812bfe3f2eSlogwang int ret;
17822bfe3f2eSlogwang
17832bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
17842bfe3f2eSlogwang
17852bfe3f2eSlogwang ret = qman_reserve_fqid(fqid);
17862bfe3f2eSlogwang if (ret) {
17872bfe3f2eSlogwang DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
17882bfe3f2eSlogwang fqid, ret);
17892bfe3f2eSlogwang return -EINVAL;
17902bfe3f2eSlogwang }
17912bfe3f2eSlogwang /* "map" this Rx FQ to one of the interfaces Tx FQID */
17922bfe3f2eSlogwang DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
17932bfe3f2eSlogwang ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
17942bfe3f2eSlogwang if (ret) {
17952bfe3f2eSlogwang DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
17962bfe3f2eSlogwang fqid, ret);
17972bfe3f2eSlogwang return ret;
17982bfe3f2eSlogwang }
17992bfe3f2eSlogwang opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
18002bfe3f2eSlogwang opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
18012bfe3f2eSlogwang ret = qman_init_fq(fq, 0, &opts);
18022bfe3f2eSlogwang if (ret)
18032bfe3f2eSlogwang DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
18042bfe3f2eSlogwang fqid, ret);
18052bfe3f2eSlogwang return ret;
18062bfe3f2eSlogwang }
18072bfe3f2eSlogwang #endif
18082bfe3f2eSlogwang
18092bfe3f2eSlogwang /* Initialise a network interface */
18102bfe3f2eSlogwang static int
dpaa_dev_init_secondary(struct rte_eth_dev * eth_dev)1811*2d9fd380Sjfb8856606 dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
1812*2d9fd380Sjfb8856606 {
1813*2d9fd380Sjfb8856606 struct rte_dpaa_device *dpaa_device;
1814*2d9fd380Sjfb8856606 struct fm_eth_port_cfg *cfg;
1815*2d9fd380Sjfb8856606 struct dpaa_if *dpaa_intf;
1816*2d9fd380Sjfb8856606 struct fman_if *fman_intf;
1817*2d9fd380Sjfb8856606 int dev_id;
1818*2d9fd380Sjfb8856606
1819*2d9fd380Sjfb8856606 PMD_INIT_FUNC_TRACE();
1820*2d9fd380Sjfb8856606
1821*2d9fd380Sjfb8856606 dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1822*2d9fd380Sjfb8856606 dev_id = dpaa_device->id.dev_id;
1823*2d9fd380Sjfb8856606 cfg = dpaa_get_eth_port_cfg(dev_id);
1824*2d9fd380Sjfb8856606 fman_intf = cfg->fman_if;
1825*2d9fd380Sjfb8856606 eth_dev->process_private = fman_intf;
1826*2d9fd380Sjfb8856606
1827*2d9fd380Sjfb8856606 /* Plugging of UCODE burst API not supported in Secondary */
1828*2d9fd380Sjfb8856606 dpaa_intf = eth_dev->data->dev_private;
1829*2d9fd380Sjfb8856606 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1830*2d9fd380Sjfb8856606 if (dpaa_intf->cgr_tx)
1831*2d9fd380Sjfb8856606 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
1832*2d9fd380Sjfb8856606 else
1833*2d9fd380Sjfb8856606 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
1834*2d9fd380Sjfb8856606 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1835*2d9fd380Sjfb8856606 qman_set_fq_lookup_table(
1836*2d9fd380Sjfb8856606 dpaa_intf->rx_queues->qman_fq_lookup_table);
1837*2d9fd380Sjfb8856606 #endif
1838*2d9fd380Sjfb8856606
1839*2d9fd380Sjfb8856606 return 0;
1840*2d9fd380Sjfb8856606 }
1841*2d9fd380Sjfb8856606
1842*2d9fd380Sjfb8856606 /* Initialise a network interface */
1843*2d9fd380Sjfb8856606 static int
dpaa_dev_init(struct rte_eth_dev * eth_dev)18442bfe3f2eSlogwang dpaa_dev_init(struct rte_eth_dev *eth_dev)
18452bfe3f2eSlogwang {
18464418919fSjohnjiang int num_rx_fqs, fqid;
18472bfe3f2eSlogwang int loop, ret = 0;
18482bfe3f2eSlogwang int dev_id;
18492bfe3f2eSlogwang struct rte_dpaa_device *dpaa_device;
18502bfe3f2eSlogwang struct dpaa_if *dpaa_intf;
18512bfe3f2eSlogwang struct fm_eth_port_cfg *cfg;
18522bfe3f2eSlogwang struct fman_if *fman_intf;
18532bfe3f2eSlogwang struct fman_if_bpool *bp, *tmp_bp;
1854d30ea906Sjfb8856606 uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1855*2d9fd380Sjfb8856606 uint32_t cgrid_tx[MAX_DPAA_CORES];
1856*2d9fd380Sjfb8856606 uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
1857*2d9fd380Sjfb8856606 int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
1858*2d9fd380Sjfb8856606 int8_t vsp_id = -1;
18592bfe3f2eSlogwang
18602bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
18612bfe3f2eSlogwang
18622bfe3f2eSlogwang dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
18632bfe3f2eSlogwang dev_id = dpaa_device->id.dev_id;
18642bfe3f2eSlogwang dpaa_intf = eth_dev->data->dev_private;
1865*2d9fd380Sjfb8856606 cfg = dpaa_get_eth_port_cfg(dev_id);
18662bfe3f2eSlogwang fman_intf = cfg->fman_if;
18672bfe3f2eSlogwang
18682bfe3f2eSlogwang dpaa_intf->name = dpaa_device->name;
18692bfe3f2eSlogwang
18702bfe3f2eSlogwang /* save fman_if & cfg in the interface struture */
1871*2d9fd380Sjfb8856606 eth_dev->process_private = fman_intf;
18722bfe3f2eSlogwang dpaa_intf->ifid = dev_id;
18732bfe3f2eSlogwang dpaa_intf->cfg = cfg;
18742bfe3f2eSlogwang
1875*2d9fd380Sjfb8856606 memset((char *)dev_rx_fqids, 0,
1876*2d9fd380Sjfb8856606 sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
1877*2d9fd380Sjfb8856606
1878*2d9fd380Sjfb8856606 memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
1879*2d9fd380Sjfb8856606
18802bfe3f2eSlogwang /* Initialize Rx FQ's */
1881d30ea906Sjfb8856606 if (default_q) {
1882d30ea906Sjfb8856606 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1883*2d9fd380Sjfb8856606 } else if (fmc_q) {
1884*2d9fd380Sjfb8856606 num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
1885*2d9fd380Sjfb8856606 dev_vspids,
1886*2d9fd380Sjfb8856606 DPAA_MAX_NUM_PCD_QUEUES);
1887*2d9fd380Sjfb8856606 if (num_rx_fqs < 0) {
1888*2d9fd380Sjfb8856606 DPAA_PMD_ERR("%s FMC initializes failed!",
1889*2d9fd380Sjfb8856606 dpaa_intf->name);
1890*2d9fd380Sjfb8856606 goto free_rx;
1891d30ea906Sjfb8856606 }
1892*2d9fd380Sjfb8856606 if (!num_rx_fqs) {
1893*2d9fd380Sjfb8856606 DPAA_PMD_WARN("%s is not configured by FMC.",
1894*2d9fd380Sjfb8856606 dpaa_intf->name);
1895*2d9fd380Sjfb8856606 }
1896*2d9fd380Sjfb8856606 } else {
1897*2d9fd380Sjfb8856606 /* FMCLESS mode, load balance to multiple cores.*/
1898*2d9fd380Sjfb8856606 num_rx_fqs = rte_lcore_count();
1899*2d9fd380Sjfb8856606 }
1900d30ea906Sjfb8856606
1901d30ea906Sjfb8856606 /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
19022bfe3f2eSlogwang * queues.
19032bfe3f2eSlogwang */
1904*2d9fd380Sjfb8856606 if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
19052bfe3f2eSlogwang DPAA_PMD_ERR("Invalid number of RX queues\n");
19062bfe3f2eSlogwang return -EINVAL;
19072bfe3f2eSlogwang }
19082bfe3f2eSlogwang
1909*2d9fd380Sjfb8856606 if (num_rx_fqs > 0) {
19102bfe3f2eSlogwang dpaa_intf->rx_queues = rte_zmalloc(NULL,
19112bfe3f2eSlogwang sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
19122bfe3f2eSlogwang if (!dpaa_intf->rx_queues) {
19132bfe3f2eSlogwang DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
19142bfe3f2eSlogwang return -ENOMEM;
19152bfe3f2eSlogwang }
1916*2d9fd380Sjfb8856606 } else {
1917*2d9fd380Sjfb8856606 dpaa_intf->rx_queues = NULL;
1918*2d9fd380Sjfb8856606 }
1919*2d9fd380Sjfb8856606
1920*2d9fd380Sjfb8856606 memset(cgrid, 0, sizeof(cgrid));
1921*2d9fd380Sjfb8856606 memset(cgrid_tx, 0, sizeof(cgrid_tx));
1922*2d9fd380Sjfb8856606
1923*2d9fd380Sjfb8856606 /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
1924*2d9fd380Sjfb8856606 * Tx tail drop is disabled.
1925*2d9fd380Sjfb8856606 */
1926*2d9fd380Sjfb8856606 if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
1927*2d9fd380Sjfb8856606 td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
1928*2d9fd380Sjfb8856606 DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
1929*2d9fd380Sjfb8856606 td_tx_threshold);
1930*2d9fd380Sjfb8856606 /* if a very large value is being configured */
1931*2d9fd380Sjfb8856606 if (td_tx_threshold > UINT16_MAX)
1932*2d9fd380Sjfb8856606 td_tx_threshold = CGR_RX_PERFQ_THRESH;
1933*2d9fd380Sjfb8856606 }
19342bfe3f2eSlogwang
1935d30ea906Sjfb8856606 /* If congestion control is enabled globally*/
1936*2d9fd380Sjfb8856606 if (num_rx_fqs > 0 && td_threshold) {
1937d30ea906Sjfb8856606 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1938d30ea906Sjfb8856606 sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1939d30ea906Sjfb8856606 if (!dpaa_intf->cgr_rx) {
1940d30ea906Sjfb8856606 DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1941d30ea906Sjfb8856606 ret = -ENOMEM;
1942d30ea906Sjfb8856606 goto free_rx;
1943d30ea906Sjfb8856606 }
1944d30ea906Sjfb8856606
1945d30ea906Sjfb8856606 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1946d30ea906Sjfb8856606 if (ret != num_rx_fqs) {
1947d30ea906Sjfb8856606 DPAA_PMD_WARN("insufficient CGRIDs available");
1948d30ea906Sjfb8856606 ret = -EINVAL;
1949d30ea906Sjfb8856606 goto free_rx;
1950d30ea906Sjfb8856606 }
1951d30ea906Sjfb8856606 } else {
1952d30ea906Sjfb8856606 dpaa_intf->cgr_rx = NULL;
1953d30ea906Sjfb8856606 }
1954d30ea906Sjfb8856606
1955*2d9fd380Sjfb8856606 if (!fmc_q && !default_q) {
1956*2d9fd380Sjfb8856606 ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
1957*2d9fd380Sjfb8856606 num_rx_fqs, 0);
1958*2d9fd380Sjfb8856606 if (ret < 0) {
1959*2d9fd380Sjfb8856606 DPAA_PMD_ERR("Failed to alloc rx fqid's\n");
1960*2d9fd380Sjfb8856606 goto free_rx;
1961*2d9fd380Sjfb8856606 }
1962*2d9fd380Sjfb8856606 }
1963*2d9fd380Sjfb8856606
19642bfe3f2eSlogwang for (loop = 0; loop < num_rx_fqs; loop++) {
1965d30ea906Sjfb8856606 if (default_q)
1966d30ea906Sjfb8856606 fqid = cfg->rx_def;
1967d30ea906Sjfb8856606 else
1968*2d9fd380Sjfb8856606 fqid = dev_rx_fqids[loop];
1969*2d9fd380Sjfb8856606
1970*2d9fd380Sjfb8856606 vsp_id = dev_vspids[loop];
1971d30ea906Sjfb8856606
1972d30ea906Sjfb8856606 if (dpaa_intf->cgr_rx)
1973d30ea906Sjfb8856606 dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1974d30ea906Sjfb8856606
1975d30ea906Sjfb8856606 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1976d30ea906Sjfb8856606 dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1977d30ea906Sjfb8856606 fqid);
19782bfe3f2eSlogwang if (ret)
19792bfe3f2eSlogwang goto free_rx;
1980*2d9fd380Sjfb8856606 dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
19812bfe3f2eSlogwang dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
19822bfe3f2eSlogwang }
19832bfe3f2eSlogwang dpaa_intf->nb_rx_queues = num_rx_fqs;
19842bfe3f2eSlogwang
1985d30ea906Sjfb8856606 /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
19862bfe3f2eSlogwang dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
19874418919fSjohnjiang MAX_DPAA_CORES, MAX_CACHELINE);
19882bfe3f2eSlogwang if (!dpaa_intf->tx_queues) {
19892bfe3f2eSlogwang DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
19902bfe3f2eSlogwang ret = -ENOMEM;
19912bfe3f2eSlogwang goto free_rx;
19922bfe3f2eSlogwang }
19932bfe3f2eSlogwang
1994*2d9fd380Sjfb8856606 /* If congestion control is enabled globally*/
1995*2d9fd380Sjfb8856606 if (td_tx_threshold) {
1996*2d9fd380Sjfb8856606 dpaa_intf->cgr_tx = rte_zmalloc(NULL,
1997*2d9fd380Sjfb8856606 sizeof(struct qman_cgr) * MAX_DPAA_CORES,
1998*2d9fd380Sjfb8856606 MAX_CACHELINE);
1999*2d9fd380Sjfb8856606 if (!dpaa_intf->cgr_tx) {
2000*2d9fd380Sjfb8856606 DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n");
2001*2d9fd380Sjfb8856606 ret = -ENOMEM;
2002*2d9fd380Sjfb8856606 goto free_rx;
2003*2d9fd380Sjfb8856606 }
2004*2d9fd380Sjfb8856606
2005*2d9fd380Sjfb8856606 ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
2006*2d9fd380Sjfb8856606 1, 0);
2007*2d9fd380Sjfb8856606 if (ret != MAX_DPAA_CORES) {
2008*2d9fd380Sjfb8856606 DPAA_PMD_WARN("insufficient CGRIDs available");
2009*2d9fd380Sjfb8856606 ret = -EINVAL;
2010*2d9fd380Sjfb8856606 goto free_rx;
2011*2d9fd380Sjfb8856606 }
2012*2d9fd380Sjfb8856606 } else {
2013*2d9fd380Sjfb8856606 dpaa_intf->cgr_tx = NULL;
2014*2d9fd380Sjfb8856606 }
2015*2d9fd380Sjfb8856606
2016*2d9fd380Sjfb8856606
20174418919fSjohnjiang for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
2018*2d9fd380Sjfb8856606 if (dpaa_intf->cgr_tx)
2019*2d9fd380Sjfb8856606 dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
2020*2d9fd380Sjfb8856606
20212bfe3f2eSlogwang ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
2022*2d9fd380Sjfb8856606 fman_intf,
2023*2d9fd380Sjfb8856606 dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
20242bfe3f2eSlogwang if (ret)
20252bfe3f2eSlogwang goto free_tx;
20262bfe3f2eSlogwang dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
20272bfe3f2eSlogwang }
20284418919fSjohnjiang dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
20292bfe3f2eSlogwang
20302bfe3f2eSlogwang #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
2031*2d9fd380Sjfb8856606 ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2032*2d9fd380Sjfb8856606 [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
2033*2d9fd380Sjfb8856606 if (ret) {
2034*2d9fd380Sjfb8856606 DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
2035*2d9fd380Sjfb8856606 goto free_tx;
2036*2d9fd380Sjfb8856606 }
20372bfe3f2eSlogwang dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
2038*2d9fd380Sjfb8856606 ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2039*2d9fd380Sjfb8856606 [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
2040*2d9fd380Sjfb8856606 if (ret) {
2041*2d9fd380Sjfb8856606 DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
2042*2d9fd380Sjfb8856606 goto free_tx;
2043*2d9fd380Sjfb8856606 }
20442bfe3f2eSlogwang dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
20452bfe3f2eSlogwang #endif
20462bfe3f2eSlogwang
20472bfe3f2eSlogwang DPAA_PMD_DEBUG("All frame queues created");
20482bfe3f2eSlogwang
20492bfe3f2eSlogwang /* Get the initial configuration for flow control */
2050*2d9fd380Sjfb8856606 dpaa_fc_set_default(dpaa_intf, fman_intf);
20512bfe3f2eSlogwang
20522bfe3f2eSlogwang /* reset bpool list, initialize bpool dynamically */
20532bfe3f2eSlogwang list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
20542bfe3f2eSlogwang list_del(&bp->node);
20554418919fSjohnjiang rte_free(bp);
20562bfe3f2eSlogwang }
20572bfe3f2eSlogwang
20582bfe3f2eSlogwang /* Populate ethdev structure */
20592bfe3f2eSlogwang eth_dev->dev_ops = &dpaa_devops;
2060*2d9fd380Sjfb8856606 eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
20612bfe3f2eSlogwang eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
20622bfe3f2eSlogwang eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
20632bfe3f2eSlogwang
20642bfe3f2eSlogwang /* Allocate memory for storing MAC addresses */
20652bfe3f2eSlogwang eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
20664418919fSjohnjiang RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
20672bfe3f2eSlogwang if (eth_dev->data->mac_addrs == NULL) {
20682bfe3f2eSlogwang DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
20692bfe3f2eSlogwang "store MAC addresses",
20704418919fSjohnjiang RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
20712bfe3f2eSlogwang ret = -ENOMEM;
20722bfe3f2eSlogwang goto free_tx;
20732bfe3f2eSlogwang }
20742bfe3f2eSlogwang
20752bfe3f2eSlogwang /* copy the primary mac address */
20764418919fSjohnjiang rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
20772bfe3f2eSlogwang
2078*2d9fd380Sjfb8856606 RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
2079*2d9fd380Sjfb8856606 dpaa_device->name,
2080*2d9fd380Sjfb8856606 fman_intf->mac_addr.addr_bytes[0],
2081*2d9fd380Sjfb8856606 fman_intf->mac_addr.addr_bytes[1],
2082*2d9fd380Sjfb8856606 fman_intf->mac_addr.addr_bytes[2],
2083*2d9fd380Sjfb8856606 fman_intf->mac_addr.addr_bytes[3],
2084*2d9fd380Sjfb8856606 fman_intf->mac_addr.addr_bytes[4],
2085*2d9fd380Sjfb8856606 fman_intf->mac_addr.addr_bytes[5]);
20862bfe3f2eSlogwang
2087*2d9fd380Sjfb8856606 if (!fman_intf->is_shared_mac) {
2088*2d9fd380Sjfb8856606 /* Configure error packet handling */
2089*2d9fd380Sjfb8856606 fman_if_receive_rx_errors(fman_intf,
2090*2d9fd380Sjfb8856606 FM_FD_RX_STATUS_ERR_MASK);
20912bfe3f2eSlogwang /* Disable RX mode */
20922bfe3f2eSlogwang fman_if_disable_rx(fman_intf);
20932bfe3f2eSlogwang /* Disable promiscuous mode */
20942bfe3f2eSlogwang fman_if_promiscuous_disable(fman_intf);
20952bfe3f2eSlogwang /* Disable multicast */
20962bfe3f2eSlogwang fman_if_reset_mcast_filter_table(fman_intf);
20972bfe3f2eSlogwang /* Reset interface statistics */
20982bfe3f2eSlogwang fman_if_stats_reset(fman_intf);
2099d30ea906Sjfb8856606 /* Disable SG by default */
2100d30ea906Sjfb8856606 fman_if_set_sg(fman_intf, 0);
2101*2d9fd380Sjfb8856606 fman_if_set_maxfrm(fman_intf,
2102*2d9fd380Sjfb8856606 RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
2103*2d9fd380Sjfb8856606 }
21042bfe3f2eSlogwang
21052bfe3f2eSlogwang return 0;
21062bfe3f2eSlogwang
21072bfe3f2eSlogwang free_tx:
21082bfe3f2eSlogwang rte_free(dpaa_intf->tx_queues);
21092bfe3f2eSlogwang dpaa_intf->tx_queues = NULL;
21102bfe3f2eSlogwang dpaa_intf->nb_tx_queues = 0;
21112bfe3f2eSlogwang
21122bfe3f2eSlogwang free_rx:
2113d30ea906Sjfb8856606 rte_free(dpaa_intf->cgr_rx);
2114*2d9fd380Sjfb8856606 rte_free(dpaa_intf->cgr_tx);
21152bfe3f2eSlogwang rte_free(dpaa_intf->rx_queues);
21162bfe3f2eSlogwang dpaa_intf->rx_queues = NULL;
21172bfe3f2eSlogwang dpaa_intf->nb_rx_queues = 0;
21182bfe3f2eSlogwang return ret;
21192bfe3f2eSlogwang }
21202bfe3f2eSlogwang
21212bfe3f2eSlogwang static int
rte_dpaa_probe(struct rte_dpaa_driver * dpaa_drv,struct rte_dpaa_device * dpaa_dev)2122*2d9fd380Sjfb8856606 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
21232bfe3f2eSlogwang struct rte_dpaa_device *dpaa_dev)
21242bfe3f2eSlogwang {
21252bfe3f2eSlogwang int diag;
21262bfe3f2eSlogwang int ret;
21272bfe3f2eSlogwang struct rte_eth_dev *eth_dev;
21282bfe3f2eSlogwang
21292bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
21302bfe3f2eSlogwang
21314b05018fSfengbojiang if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
21324b05018fSfengbojiang RTE_PKTMBUF_HEADROOM) {
21334b05018fSfengbojiang DPAA_PMD_ERR(
21344b05018fSfengbojiang "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
21354b05018fSfengbojiang RTE_PKTMBUF_HEADROOM,
21364b05018fSfengbojiang DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
21374b05018fSfengbojiang
21384b05018fSfengbojiang return -1;
21394b05018fSfengbojiang }
21404b05018fSfengbojiang
21412bfe3f2eSlogwang /* In case of secondary process, the device is already configured
21422bfe3f2eSlogwang * and no further action is required, except portal initialization
21432bfe3f2eSlogwang * and verifying secondary attachment to port name.
21442bfe3f2eSlogwang */
21452bfe3f2eSlogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
21462bfe3f2eSlogwang eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
21472bfe3f2eSlogwang if (!eth_dev)
21482bfe3f2eSlogwang return -ENOMEM;
2149d30ea906Sjfb8856606 eth_dev->device = &dpaa_dev->device;
2150d30ea906Sjfb8856606 eth_dev->dev_ops = &dpaa_devops;
2151*2d9fd380Sjfb8856606
2152*2d9fd380Sjfb8856606 ret = dpaa_dev_init_secondary(eth_dev);
2153*2d9fd380Sjfb8856606 if (ret != 0) {
2154*2d9fd380Sjfb8856606 RTE_LOG(ERR, PMD, "secondary dev init failed\n");
2155*2d9fd380Sjfb8856606 return ret;
2156*2d9fd380Sjfb8856606 }
2157*2d9fd380Sjfb8856606
2158d30ea906Sjfb8856606 rte_eth_dev_probing_finish(eth_dev);
21592bfe3f2eSlogwang return 0;
21602bfe3f2eSlogwang }
21612bfe3f2eSlogwang
21624418919fSjohnjiang if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
2163d30ea906Sjfb8856606 if (access("/tmp/fmc.bin", F_OK) == -1) {
21640c6bd470Sfengbojiang DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
2165d30ea906Sjfb8856606 default_q = 1;
2166d30ea906Sjfb8856606 }
2167d30ea906Sjfb8856606
2168*2d9fd380Sjfb8856606 if (!(default_q || fmc_q)) {
2169*2d9fd380Sjfb8856606 if (dpaa_fm_init()) {
2170*2d9fd380Sjfb8856606 DPAA_PMD_ERR("FM init failed\n");
2171*2d9fd380Sjfb8856606 return -1;
2172*2d9fd380Sjfb8856606 }
2173*2d9fd380Sjfb8856606 }
2174*2d9fd380Sjfb8856606
2175d30ea906Sjfb8856606 /* disabling the default push mode for LS1043 */
2176d30ea906Sjfb8856606 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
2177d30ea906Sjfb8856606 dpaa_push_mode_max_queue = 0;
2178d30ea906Sjfb8856606
2179d30ea906Sjfb8856606 /* if push mode queues to be enabled. Currenly we are allowing
2180d30ea906Sjfb8856606 * only one queue per thread.
2181d30ea906Sjfb8856606 */
2182d30ea906Sjfb8856606 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
2183d30ea906Sjfb8856606 dpaa_push_mode_max_queue =
2184d30ea906Sjfb8856606 atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
2185d30ea906Sjfb8856606 if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
2186d30ea906Sjfb8856606 dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
2187d30ea906Sjfb8856606 }
2188d30ea906Sjfb8856606
21892bfe3f2eSlogwang is_global_init = 1;
21902bfe3f2eSlogwang }
21912bfe3f2eSlogwang
2192*2d9fd380Sjfb8856606 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
21932bfe3f2eSlogwang ret = rte_dpaa_portal_init((void *)1);
21942bfe3f2eSlogwang if (ret) {
21952bfe3f2eSlogwang DPAA_PMD_ERR("Unable to initialize portal");
21962bfe3f2eSlogwang return ret;
21972bfe3f2eSlogwang }
2198d30ea906Sjfb8856606 }
21992bfe3f2eSlogwang
2200*2d9fd380Sjfb8856606 eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
22014418919fSjohnjiang if (!eth_dev)
22024418919fSjohnjiang return -ENOMEM;
22032bfe3f2eSlogwang
2204*2d9fd380Sjfb8856606 eth_dev->data->dev_private =
2205*2d9fd380Sjfb8856606 rte_zmalloc("ethdev private structure",
22062bfe3f2eSlogwang sizeof(struct dpaa_if),
22072bfe3f2eSlogwang RTE_CACHE_LINE_SIZE);
22082bfe3f2eSlogwang if (!eth_dev->data->dev_private) {
22092bfe3f2eSlogwang DPAA_PMD_ERR("Cannot allocate memzone for port data");
22102bfe3f2eSlogwang rte_eth_dev_release_port(eth_dev);
22112bfe3f2eSlogwang return -ENOMEM;
22122bfe3f2eSlogwang }
2213*2d9fd380Sjfb8856606
22142bfe3f2eSlogwang eth_dev->device = &dpaa_dev->device;
22152bfe3f2eSlogwang dpaa_dev->eth_dev = eth_dev;
22162bfe3f2eSlogwang
2217*2d9fd380Sjfb8856606 qman_ern_register_cb(dpaa_free_mbuf);
2218*2d9fd380Sjfb8856606
2219*2d9fd380Sjfb8856606 if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
2220*2d9fd380Sjfb8856606 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2221*2d9fd380Sjfb8856606
2222*2d9fd380Sjfb8856606 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2223*2d9fd380Sjfb8856606
22242bfe3f2eSlogwang /* Invoke PMD device initialization function */
22252bfe3f2eSlogwang diag = dpaa_dev_init(eth_dev);
2226d30ea906Sjfb8856606 if (diag == 0) {
2227d30ea906Sjfb8856606 rte_eth_dev_probing_finish(eth_dev);
22282bfe3f2eSlogwang return 0;
2229d30ea906Sjfb8856606 }
22302bfe3f2eSlogwang
22312bfe3f2eSlogwang rte_eth_dev_release_port(eth_dev);
22322bfe3f2eSlogwang return diag;
22332bfe3f2eSlogwang }
22342bfe3f2eSlogwang
22352bfe3f2eSlogwang static int
rte_dpaa_remove(struct rte_dpaa_device * dpaa_dev)22362bfe3f2eSlogwang rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
22372bfe3f2eSlogwang {
22382bfe3f2eSlogwang struct rte_eth_dev *eth_dev;
2239*2d9fd380Sjfb8856606 int ret;
22402bfe3f2eSlogwang
22412bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
22422bfe3f2eSlogwang
22432bfe3f2eSlogwang eth_dev = dpaa_dev->eth_dev;
2244*2d9fd380Sjfb8856606 dpaa_eth_dev_close(eth_dev);
2245*2d9fd380Sjfb8856606 ret = rte_eth_dev_release_port(eth_dev);
22462bfe3f2eSlogwang
2247*2d9fd380Sjfb8856606 return ret;
2248*2d9fd380Sjfb8856606 }
22492bfe3f2eSlogwang
dpaa_finish(void)2250*2d9fd380Sjfb8856606 static void __attribute__((destructor(102))) dpaa_finish(void)
2251*2d9fd380Sjfb8856606 {
2252*2d9fd380Sjfb8856606 /* For secondary, primary will do all the cleanup */
2253*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2254*2d9fd380Sjfb8856606 return;
2255*2d9fd380Sjfb8856606
2256*2d9fd380Sjfb8856606 if (!(default_q || fmc_q)) {
2257*2d9fd380Sjfb8856606 unsigned int i;
2258*2d9fd380Sjfb8856606
2259*2d9fd380Sjfb8856606 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
2260*2d9fd380Sjfb8856606 if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
2261*2d9fd380Sjfb8856606 struct rte_eth_dev *dev = &rte_eth_devices[i];
2262*2d9fd380Sjfb8856606 struct dpaa_if *dpaa_intf =
2263*2d9fd380Sjfb8856606 dev->data->dev_private;
2264*2d9fd380Sjfb8856606 struct fman_if *fif =
2265*2d9fd380Sjfb8856606 dev->process_private;
2266*2d9fd380Sjfb8856606 if (dpaa_intf->port_handle)
2267*2d9fd380Sjfb8856606 if (dpaa_fm_deconfig(dpaa_intf, fif))
2268*2d9fd380Sjfb8856606 DPAA_PMD_WARN("DPAA FM "
2269*2d9fd380Sjfb8856606 "deconfig failed\n");
2270*2d9fd380Sjfb8856606 if (fif->num_profiles) {
2271*2d9fd380Sjfb8856606 if (dpaa_port_vsp_cleanup(dpaa_intf,
2272*2d9fd380Sjfb8856606 fif))
2273*2d9fd380Sjfb8856606 DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
2274*2d9fd380Sjfb8856606 }
2275*2d9fd380Sjfb8856606 }
2276*2d9fd380Sjfb8856606 }
2277*2d9fd380Sjfb8856606 if (is_global_init)
2278*2d9fd380Sjfb8856606 if (dpaa_fm_term())
2279*2d9fd380Sjfb8856606 DPAA_PMD_WARN("DPAA FM term failed\n");
2280*2d9fd380Sjfb8856606
2281*2d9fd380Sjfb8856606 is_global_init = 0;
2282*2d9fd380Sjfb8856606
2283*2d9fd380Sjfb8856606 DPAA_PMD_INFO("DPAA fman cleaned up");
2284*2d9fd380Sjfb8856606 }
22852bfe3f2eSlogwang }
22862bfe3f2eSlogwang
22872bfe3f2eSlogwang static struct rte_dpaa_driver rte_dpaa_pmd = {
2288*2d9fd380Sjfb8856606 .drv_flags = RTE_DPAA_DRV_INTR_LSC,
22892bfe3f2eSlogwang .drv_type = FSL_DPAA_ETH,
22902bfe3f2eSlogwang .probe = rte_dpaa_probe,
22912bfe3f2eSlogwang .remove = rte_dpaa_remove,
22922bfe3f2eSlogwang };
22932bfe3f2eSlogwang
22942bfe3f2eSlogwang RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
2295*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE);
2296