1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606 * Copyright(c) 2017 Marvell International Ltd.
3d30ea906Sjfb8856606 * Copyright(c) 2017 Semihalf.
4d30ea906Sjfb8856606 * All rights reserved.
5d30ea906Sjfb8856606 */
6d30ea906Sjfb8856606
74418919fSjohnjiang #include <rte_string_fns.h>
8d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
9d30ea906Sjfb8856606 #include <rte_kvargs.h>
10d30ea906Sjfb8856606 #include <rte_log.h>
11d30ea906Sjfb8856606 #include <rte_malloc.h>
12d30ea906Sjfb8856606 #include <rte_bus_vdev.h>
13d30ea906Sjfb8856606
14d30ea906Sjfb8856606 #include <fcntl.h>
15d30ea906Sjfb8856606 #include <linux/ethtool.h>
16d30ea906Sjfb8856606 #include <linux/sockios.h>
17d30ea906Sjfb8856606 #include <net/if.h>
18d30ea906Sjfb8856606 #include <net/if_arp.h>
19d30ea906Sjfb8856606 #include <sys/ioctl.h>
20d30ea906Sjfb8856606 #include <sys/socket.h>
21d30ea906Sjfb8856606 #include <sys/stat.h>
22d30ea906Sjfb8856606 #include <sys/types.h>
23d30ea906Sjfb8856606
24d30ea906Sjfb8856606 #include <rte_mvep_common.h>
25d30ea906Sjfb8856606 #include "mrvl_ethdev.h"
26d30ea906Sjfb8856606 #include "mrvl_qos.h"
27d30ea906Sjfb8856606 #include "mrvl_flow.h"
28d30ea906Sjfb8856606 #include "mrvl_mtr.h"
29d30ea906Sjfb8856606 #include "mrvl_tm.h"
30d30ea906Sjfb8856606
31d30ea906Sjfb8856606 /* bitmask with reserved hifs */
32d30ea906Sjfb8856606 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
33d30ea906Sjfb8856606 /* bitmask with reserved bpools */
34d30ea906Sjfb8856606 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
35d30ea906Sjfb8856606 /* bitmask with reserved kernel RSS tables */
36d30ea906Sjfb8856606 #define MRVL_MUSDK_RSS_RESERVED 0x01
37d30ea906Sjfb8856606 /* maximum number of available hifs */
38d30ea906Sjfb8856606 #define MRVL_MUSDK_HIFS_MAX 9
39d30ea906Sjfb8856606
40d30ea906Sjfb8856606 /* prefetch shift */
41d30ea906Sjfb8856606 #define MRVL_MUSDK_PREFETCH_SHIFT 2
42d30ea906Sjfb8856606
43d30ea906Sjfb8856606 /* TCAM has 25 entries reserved for uc/mc filter entries */
44d30ea906Sjfb8856606 #define MRVL_MAC_ADDRS_MAX 25
45d30ea906Sjfb8856606 #define MRVL_MATCH_LEN 16
46d30ea906Sjfb8856606 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
47d30ea906Sjfb8856606 /* Maximum allowable packet size */
48d30ea906Sjfb8856606 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
49d30ea906Sjfb8856606
50d30ea906Sjfb8856606 #define MRVL_IFACE_NAME_ARG "iface"
51d30ea906Sjfb8856606 #define MRVL_CFG_ARG "cfg"
52d30ea906Sjfb8856606
53d30ea906Sjfb8856606 #define MRVL_BURST_SIZE 64
54d30ea906Sjfb8856606
55d30ea906Sjfb8856606 #define MRVL_ARP_LENGTH 28
56d30ea906Sjfb8856606
57d30ea906Sjfb8856606 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
58d30ea906Sjfb8856606 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
59d30ea906Sjfb8856606
60d30ea906Sjfb8856606 /** Port Rx offload capabilities */
61d30ea906Sjfb8856606 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
62d30ea906Sjfb8856606 DEV_RX_OFFLOAD_JUMBO_FRAME | \
63d30ea906Sjfb8856606 DEV_RX_OFFLOAD_CHECKSUM)
64d30ea906Sjfb8856606
65d30ea906Sjfb8856606 /** Port Tx offloads capabilities */
66d30ea906Sjfb8856606 #define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
67d30ea906Sjfb8856606 DEV_TX_OFFLOAD_UDP_CKSUM | \
68d30ea906Sjfb8856606 DEV_TX_OFFLOAD_TCP_CKSUM | \
69d30ea906Sjfb8856606 DEV_TX_OFFLOAD_MULTI_SEGS)
70d30ea906Sjfb8856606
71d30ea906Sjfb8856606 static const char * const valid_args[] = {
72d30ea906Sjfb8856606 MRVL_IFACE_NAME_ARG,
73d30ea906Sjfb8856606 MRVL_CFG_ARG,
74d30ea906Sjfb8856606 NULL
75d30ea906Sjfb8856606 };
76d30ea906Sjfb8856606
77d30ea906Sjfb8856606 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
78d30ea906Sjfb8856606 static struct pp2_hif *hifs[RTE_MAX_LCORE];
79d30ea906Sjfb8856606 static int used_bpools[PP2_NUM_PKT_PROC] = {
80d30ea906Sjfb8856606 [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
81d30ea906Sjfb8856606 };
82d30ea906Sjfb8856606
83d30ea906Sjfb8856606 static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
84d30ea906Sjfb8856606 static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
85d30ea906Sjfb8856606 static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
86d30ea906Sjfb8856606
87d30ea906Sjfb8856606 struct mrvl_ifnames {
88d30ea906Sjfb8856606 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
89d30ea906Sjfb8856606 int idx;
90d30ea906Sjfb8856606 };
91d30ea906Sjfb8856606
92d30ea906Sjfb8856606 /*
93d30ea906Sjfb8856606 * To use buffer harvesting based on loopback port shadow queue structure
94d30ea906Sjfb8856606 * was introduced for buffers information bookkeeping.
95d30ea906Sjfb8856606 *
96d30ea906Sjfb8856606 * Before sending the packet, related buffer information (pp2_buff_inf) is
97d30ea906Sjfb8856606 * stored in shadow queue. After packet is transmitted no longer used
98d30ea906Sjfb8856606 * packet buffer is released back to it's original hardware pool,
99d30ea906Sjfb8856606 * on condition it originated from interface.
100d30ea906Sjfb8856606 * In case it was generated by application itself i.e: mbuf->port field is
101d30ea906Sjfb8856606 * 0xff then its released to software mempool.
102d30ea906Sjfb8856606 */
103d30ea906Sjfb8856606 struct mrvl_shadow_txq {
104d30ea906Sjfb8856606 int head; /* write index - used when sending buffers */
105d30ea906Sjfb8856606 int tail; /* read index - used when releasing buffers */
106d30ea906Sjfb8856606 u16 size; /* queue occupied size */
107d30ea906Sjfb8856606 u16 num_to_release; /* number of descriptors sent, that can be
108d30ea906Sjfb8856606 * released
109d30ea906Sjfb8856606 */
110d30ea906Sjfb8856606 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
111d30ea906Sjfb8856606 };
112d30ea906Sjfb8856606
113d30ea906Sjfb8856606 struct mrvl_rxq {
114d30ea906Sjfb8856606 struct mrvl_priv *priv;
115d30ea906Sjfb8856606 struct rte_mempool *mp;
116d30ea906Sjfb8856606 int queue_id;
117d30ea906Sjfb8856606 int port_id;
118d30ea906Sjfb8856606 int cksum_enabled;
119d30ea906Sjfb8856606 uint64_t bytes_recv;
120d30ea906Sjfb8856606 uint64_t drop_mac;
121d30ea906Sjfb8856606 };
122d30ea906Sjfb8856606
123d30ea906Sjfb8856606 struct mrvl_txq {
124d30ea906Sjfb8856606 struct mrvl_priv *priv;
125d30ea906Sjfb8856606 int queue_id;
126d30ea906Sjfb8856606 int port_id;
127d30ea906Sjfb8856606 uint64_t bytes_sent;
128d30ea906Sjfb8856606 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
129d30ea906Sjfb8856606 int tx_deferred_start;
130d30ea906Sjfb8856606 };
131d30ea906Sjfb8856606
132d30ea906Sjfb8856606 static int mrvl_lcore_first;
133d30ea906Sjfb8856606 static int mrvl_lcore_last;
134d30ea906Sjfb8856606 static int mrvl_dev_num;
135d30ea906Sjfb8856606
136d30ea906Sjfb8856606 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
137d30ea906Sjfb8856606 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
138d30ea906Sjfb8856606 struct pp2_hif *hif, unsigned int core_id,
139d30ea906Sjfb8856606 struct mrvl_shadow_txq *sq, int qid, int force);
140d30ea906Sjfb8856606
141d30ea906Sjfb8856606 static uint16_t mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
142d30ea906Sjfb8856606 uint16_t nb_pkts);
143d30ea906Sjfb8856606 static uint16_t mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
144d30ea906Sjfb8856606 uint16_t nb_pkts);
1454418919fSjohnjiang static int rte_pmd_mrvl_remove(struct rte_vdev_device *vdev);
1464418919fSjohnjiang static void mrvl_deinit_pp2(void);
1474418919fSjohnjiang static void mrvl_deinit_hifs(void);
148d30ea906Sjfb8856606
149d30ea906Sjfb8856606
150d30ea906Sjfb8856606 #define MRVL_XSTATS_TBL_ENTRY(name) { \
151d30ea906Sjfb8856606 #name, offsetof(struct pp2_ppio_statistics, name), \
152d30ea906Sjfb8856606 sizeof(((struct pp2_ppio_statistics *)0)->name) \
153d30ea906Sjfb8856606 }
154d30ea906Sjfb8856606
155d30ea906Sjfb8856606 /* Table with xstats data */
156d30ea906Sjfb8856606 static struct {
157d30ea906Sjfb8856606 const char *name;
158d30ea906Sjfb8856606 unsigned int offset;
159d30ea906Sjfb8856606 unsigned int size;
160d30ea906Sjfb8856606 } mrvl_xstats_tbl[] = {
161d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(rx_bytes),
162d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(rx_packets),
163d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
164d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(rx_errors),
165d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
166d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
167d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
168d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
169d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
170d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(tx_bytes),
171d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(tx_packets),
172d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
173d30ea906Sjfb8856606 MRVL_XSTATS_TBL_ENTRY(tx_errors)
174d30ea906Sjfb8856606 };
175d30ea906Sjfb8856606
176d30ea906Sjfb8856606 static inline void
mrvl_fill_shadowq(struct mrvl_shadow_txq * sq,struct rte_mbuf * buf)177d30ea906Sjfb8856606 mrvl_fill_shadowq(struct mrvl_shadow_txq *sq, struct rte_mbuf *buf)
178d30ea906Sjfb8856606 {
179d30ea906Sjfb8856606 sq->ent[sq->head].buff.cookie = (uint64_t)buf;
180d30ea906Sjfb8856606 sq->ent[sq->head].buff.addr = buf ?
181d30ea906Sjfb8856606 rte_mbuf_data_iova_default(buf) : 0;
182d30ea906Sjfb8856606
183d30ea906Sjfb8856606 sq->ent[sq->head].bpool =
184d30ea906Sjfb8856606 (unlikely(!buf || buf->port >= RTE_MAX_ETHPORTS ||
185d30ea906Sjfb8856606 buf->refcnt > 1)) ? NULL :
186d30ea906Sjfb8856606 mrvl_port_to_bpool_lookup[buf->port];
187d30ea906Sjfb8856606
188d30ea906Sjfb8856606 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
189d30ea906Sjfb8856606 sq->size++;
190d30ea906Sjfb8856606 }
191d30ea906Sjfb8856606
192d30ea906Sjfb8856606 static inline void
mrvl_fill_desc(struct pp2_ppio_desc * desc,struct rte_mbuf * buf)193d30ea906Sjfb8856606 mrvl_fill_desc(struct pp2_ppio_desc *desc, struct rte_mbuf *buf)
194d30ea906Sjfb8856606 {
195d30ea906Sjfb8856606 pp2_ppio_outq_desc_reset(desc);
196d30ea906Sjfb8856606 pp2_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));
197d30ea906Sjfb8856606 pp2_ppio_outq_desc_set_pkt_offset(desc, 0);
198d30ea906Sjfb8856606 pp2_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));
199d30ea906Sjfb8856606 }
200d30ea906Sjfb8856606
201d30ea906Sjfb8856606 static inline int
mrvl_get_bpool_size(int pp2_id,int pool_id)202d30ea906Sjfb8856606 mrvl_get_bpool_size(int pp2_id, int pool_id)
203d30ea906Sjfb8856606 {
204d30ea906Sjfb8856606 int i;
205d30ea906Sjfb8856606 int size = 0;
206d30ea906Sjfb8856606
207d30ea906Sjfb8856606 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
208d30ea906Sjfb8856606 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
209d30ea906Sjfb8856606
210d30ea906Sjfb8856606 return size;
211d30ea906Sjfb8856606 }
212d30ea906Sjfb8856606
213d30ea906Sjfb8856606 static inline int
mrvl_reserve_bit(int * bitmap,int max)214d30ea906Sjfb8856606 mrvl_reserve_bit(int *bitmap, int max)
215d30ea906Sjfb8856606 {
216d30ea906Sjfb8856606 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
217d30ea906Sjfb8856606
218d30ea906Sjfb8856606 if (n >= max)
219d30ea906Sjfb8856606 return -1;
220d30ea906Sjfb8856606
221d30ea906Sjfb8856606 *bitmap |= 1 << n;
222d30ea906Sjfb8856606
223d30ea906Sjfb8856606 return n;
224d30ea906Sjfb8856606 }
225d30ea906Sjfb8856606
226d30ea906Sjfb8856606 static int
mrvl_init_hif(int core_id)227d30ea906Sjfb8856606 mrvl_init_hif(int core_id)
228d30ea906Sjfb8856606 {
229d30ea906Sjfb8856606 struct pp2_hif_params params;
230d30ea906Sjfb8856606 char match[MRVL_MATCH_LEN];
231d30ea906Sjfb8856606 int ret;
232d30ea906Sjfb8856606
233d30ea906Sjfb8856606 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
234d30ea906Sjfb8856606 if (ret < 0) {
235d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
236d30ea906Sjfb8856606 return ret;
237d30ea906Sjfb8856606 }
238d30ea906Sjfb8856606
239d30ea906Sjfb8856606 snprintf(match, sizeof(match), "hif-%d", ret);
240d30ea906Sjfb8856606 memset(¶ms, 0, sizeof(params));
241d30ea906Sjfb8856606 params.match = match;
242d30ea906Sjfb8856606 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
243d30ea906Sjfb8856606 ret = pp2_hif_init(¶ms, &hifs[core_id]);
244d30ea906Sjfb8856606 if (ret) {
245d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
246d30ea906Sjfb8856606 return ret;
247d30ea906Sjfb8856606 }
248d30ea906Sjfb8856606
249d30ea906Sjfb8856606 return 0;
250d30ea906Sjfb8856606 }
251d30ea906Sjfb8856606
252d30ea906Sjfb8856606 static inline struct pp2_hif*
mrvl_get_hif(struct mrvl_priv * priv,int core_id)253d30ea906Sjfb8856606 mrvl_get_hif(struct mrvl_priv *priv, int core_id)
254d30ea906Sjfb8856606 {
255d30ea906Sjfb8856606 int ret;
256d30ea906Sjfb8856606
257d30ea906Sjfb8856606 if (likely(hifs[core_id] != NULL))
258d30ea906Sjfb8856606 return hifs[core_id];
259d30ea906Sjfb8856606
260d30ea906Sjfb8856606 rte_spinlock_lock(&priv->lock);
261d30ea906Sjfb8856606
262d30ea906Sjfb8856606 ret = mrvl_init_hif(core_id);
263d30ea906Sjfb8856606 if (ret < 0) {
264d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
265d30ea906Sjfb8856606 goto out;
266d30ea906Sjfb8856606 }
267d30ea906Sjfb8856606
268d30ea906Sjfb8856606 if (core_id < mrvl_lcore_first)
269d30ea906Sjfb8856606 mrvl_lcore_first = core_id;
270d30ea906Sjfb8856606
271d30ea906Sjfb8856606 if (core_id > mrvl_lcore_last)
272d30ea906Sjfb8856606 mrvl_lcore_last = core_id;
273d30ea906Sjfb8856606 out:
274d30ea906Sjfb8856606 rte_spinlock_unlock(&priv->lock);
275d30ea906Sjfb8856606
276d30ea906Sjfb8856606 return hifs[core_id];
277d30ea906Sjfb8856606 }
278d30ea906Sjfb8856606
279d30ea906Sjfb8856606 /**
280d30ea906Sjfb8856606 * Set tx burst function according to offload flag
281d30ea906Sjfb8856606 *
282d30ea906Sjfb8856606 * @param dev
283d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
284d30ea906Sjfb8856606 */
285d30ea906Sjfb8856606 static void
mrvl_set_tx_function(struct rte_eth_dev * dev)286d30ea906Sjfb8856606 mrvl_set_tx_function(struct rte_eth_dev *dev)
287d30ea906Sjfb8856606 {
288d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
289d30ea906Sjfb8856606
290d30ea906Sjfb8856606 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
291d30ea906Sjfb8856606 if (priv->multiseg) {
292d30ea906Sjfb8856606 RTE_LOG(INFO, PMD, "Using multi-segment tx callback\n");
293d30ea906Sjfb8856606 dev->tx_pkt_burst = mrvl_tx_sg_pkt_burst;
294d30ea906Sjfb8856606 } else {
295d30ea906Sjfb8856606 RTE_LOG(INFO, PMD, "Using single-segment tx callback\n");
296d30ea906Sjfb8856606 dev->tx_pkt_burst = mrvl_tx_pkt_burst;
297d30ea906Sjfb8856606 }
298d30ea906Sjfb8856606 }
299d30ea906Sjfb8856606
300d30ea906Sjfb8856606 /**
301d30ea906Sjfb8856606 * Configure rss based on dpdk rss configuration.
302d30ea906Sjfb8856606 *
303d30ea906Sjfb8856606 * @param priv
304d30ea906Sjfb8856606 * Pointer to private structure.
305d30ea906Sjfb8856606 * @param rss_conf
306d30ea906Sjfb8856606 * Pointer to RSS configuration.
307d30ea906Sjfb8856606 *
308d30ea906Sjfb8856606 * @return
309d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
310d30ea906Sjfb8856606 */
311d30ea906Sjfb8856606 static int
mrvl_configure_rss(struct mrvl_priv * priv,struct rte_eth_rss_conf * rss_conf)312d30ea906Sjfb8856606 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
313d30ea906Sjfb8856606 {
314d30ea906Sjfb8856606 if (rss_conf->rss_key)
315d30ea906Sjfb8856606 MRVL_LOG(WARNING, "Changing hash key is not supported");
316d30ea906Sjfb8856606
317d30ea906Sjfb8856606 if (rss_conf->rss_hf == 0) {
318d30ea906Sjfb8856606 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
319d30ea906Sjfb8856606 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
320d30ea906Sjfb8856606 priv->ppio_params.inqs_params.hash_type =
321d30ea906Sjfb8856606 PP2_PPIO_HASH_T_2_TUPLE;
322d30ea906Sjfb8856606 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
323d30ea906Sjfb8856606 priv->ppio_params.inqs_params.hash_type =
324d30ea906Sjfb8856606 PP2_PPIO_HASH_T_5_TUPLE;
325d30ea906Sjfb8856606 priv->rss_hf_tcp = 1;
326d30ea906Sjfb8856606 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
327d30ea906Sjfb8856606 priv->ppio_params.inqs_params.hash_type =
328d30ea906Sjfb8856606 PP2_PPIO_HASH_T_5_TUPLE;
329d30ea906Sjfb8856606 priv->rss_hf_tcp = 0;
330d30ea906Sjfb8856606 } else {
331d30ea906Sjfb8856606 return -EINVAL;
332d30ea906Sjfb8856606 }
333d30ea906Sjfb8856606
334d30ea906Sjfb8856606 return 0;
335d30ea906Sjfb8856606 }
336d30ea906Sjfb8856606
337d30ea906Sjfb8856606 /**
338d30ea906Sjfb8856606 * Ethernet device configuration.
339d30ea906Sjfb8856606 *
340d30ea906Sjfb8856606 * Prepare the driver for a given number of TX and RX queues and
341d30ea906Sjfb8856606 * configure RSS.
342d30ea906Sjfb8856606 *
343d30ea906Sjfb8856606 * @param dev
344d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
345d30ea906Sjfb8856606 *
346d30ea906Sjfb8856606 * @return
347d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
348d30ea906Sjfb8856606 */
349d30ea906Sjfb8856606 static int
mrvl_dev_configure(struct rte_eth_dev * dev)350d30ea906Sjfb8856606 mrvl_dev_configure(struct rte_eth_dev *dev)
351d30ea906Sjfb8856606 {
352d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
353d30ea906Sjfb8856606 int ret;
354d30ea906Sjfb8856606
355d30ea906Sjfb8856606 if (priv->ppio) {
356d30ea906Sjfb8856606 MRVL_LOG(INFO, "Device reconfiguration is not supported");
357d30ea906Sjfb8856606 return -EINVAL;
358d30ea906Sjfb8856606 }
359d30ea906Sjfb8856606
360d30ea906Sjfb8856606 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
361d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
362d30ea906Sjfb8856606 MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
363d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.mq_mode);
364d30ea906Sjfb8856606 return -EINVAL;
365d30ea906Sjfb8856606 }
366d30ea906Sjfb8856606
367d30ea906Sjfb8856606 if (dev->data->dev_conf.rxmode.split_hdr_size) {
368d30ea906Sjfb8856606 MRVL_LOG(INFO, "Split headers not supported");
369d30ea906Sjfb8856606 return -EINVAL;
370d30ea906Sjfb8856606 }
371d30ea906Sjfb8856606
372d30ea906Sjfb8856606 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
373d30ea906Sjfb8856606 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
374d30ea906Sjfb8856606 MRVL_PP2_ETH_HDRS_LEN;
375d30ea906Sjfb8856606
376d30ea906Sjfb8856606 if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
377d30ea906Sjfb8856606 priv->multiseg = 1;
378d30ea906Sjfb8856606
379d30ea906Sjfb8856606 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
380d30ea906Sjfb8856606 dev->data->nb_rx_queues);
381d30ea906Sjfb8856606 if (ret < 0)
382d30ea906Sjfb8856606 return ret;
383d30ea906Sjfb8856606
384d30ea906Sjfb8856606 ret = mrvl_configure_txqs(priv, dev->data->port_id,
385d30ea906Sjfb8856606 dev->data->nb_tx_queues);
386d30ea906Sjfb8856606 if (ret < 0)
387d30ea906Sjfb8856606 return ret;
388d30ea906Sjfb8856606
389d30ea906Sjfb8856606 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
390d30ea906Sjfb8856606 priv->ppio_params.maintain_stats = 1;
391d30ea906Sjfb8856606 priv->nb_rx_queues = dev->data->nb_rx_queues;
392d30ea906Sjfb8856606
393d30ea906Sjfb8856606 ret = mrvl_tm_init(dev);
394d30ea906Sjfb8856606 if (ret < 0)
395d30ea906Sjfb8856606 return ret;
396d30ea906Sjfb8856606
397d30ea906Sjfb8856606 if (dev->data->nb_rx_queues == 1 &&
398d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
399d30ea906Sjfb8856606 MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
400d30ea906Sjfb8856606 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
401d30ea906Sjfb8856606
402d30ea906Sjfb8856606 return 0;
403d30ea906Sjfb8856606 }
404d30ea906Sjfb8856606
405d30ea906Sjfb8856606 return mrvl_configure_rss(priv,
406d30ea906Sjfb8856606 &dev->data->dev_conf.rx_adv_conf.rss_conf);
407d30ea906Sjfb8856606 }
408d30ea906Sjfb8856606
409d30ea906Sjfb8856606 /**
410d30ea906Sjfb8856606 * DPDK callback to change the MTU.
411d30ea906Sjfb8856606 *
412d30ea906Sjfb8856606 * Setting the MTU affects hardware MRU (packets larger than the MRU
413d30ea906Sjfb8856606 * will be dropped).
414d30ea906Sjfb8856606 *
415d30ea906Sjfb8856606 * @param dev
416d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
417d30ea906Sjfb8856606 * @param mtu
418d30ea906Sjfb8856606 * New MTU.
419d30ea906Sjfb8856606 *
420d30ea906Sjfb8856606 * @return
421d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
422d30ea906Sjfb8856606 */
423d30ea906Sjfb8856606 static int
mrvl_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)424d30ea906Sjfb8856606 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
425d30ea906Sjfb8856606 {
426d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
427d30ea906Sjfb8856606 uint16_t mru;
428d30ea906Sjfb8856606 uint16_t mbuf_data_size = 0; /* SW buffer size */
429d30ea906Sjfb8856606 int ret;
430d30ea906Sjfb8856606
431d30ea906Sjfb8856606 mru = MRVL_PP2_MTU_TO_MRU(mtu);
432d30ea906Sjfb8856606 /*
433d30ea906Sjfb8856606 * min_rx_buf_size is equal to mbuf data size
434d30ea906Sjfb8856606 * if pmd didn't set it differently
435d30ea906Sjfb8856606 */
436d30ea906Sjfb8856606 mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
437d30ea906Sjfb8856606 /* Prevent PMD from:
438d30ea906Sjfb8856606 * - setting mru greater than the mbuf size resulting in
439d30ea906Sjfb8856606 * hw and sw buffer size mismatch
440d30ea906Sjfb8856606 * - setting mtu that requires the support of scattered packets
441d30ea906Sjfb8856606 * when this feature has not been enabled/supported so far
442d30ea906Sjfb8856606 * (TODO check scattered_rx flag here once scattered RX is supported).
443d30ea906Sjfb8856606 */
444d30ea906Sjfb8856606 if (mru + MRVL_PKT_OFFS > mbuf_data_size) {
445d30ea906Sjfb8856606 mru = mbuf_data_size - MRVL_PKT_OFFS;
446d30ea906Sjfb8856606 mtu = MRVL_PP2_MRU_TO_MTU(mru);
447d30ea906Sjfb8856606 MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted "
448d30ea906Sjfb8856606 "by current mbuf size: %u. Set MTU to %u, MRU to %u",
449d30ea906Sjfb8856606 mbuf_data_size, mtu, mru);
450d30ea906Sjfb8856606 }
451d30ea906Sjfb8856606
4524418919fSjohnjiang if (mtu < RTE_ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {
453d30ea906Sjfb8856606 MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
454d30ea906Sjfb8856606 return -EINVAL;
455d30ea906Sjfb8856606 }
456d30ea906Sjfb8856606
457d30ea906Sjfb8856606 dev->data->mtu = mtu;
458d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
459d30ea906Sjfb8856606
460d30ea906Sjfb8856606 if (!priv->ppio)
461d30ea906Sjfb8856606 return 0;
462d30ea906Sjfb8856606
463d30ea906Sjfb8856606 ret = pp2_ppio_set_mru(priv->ppio, mru);
464d30ea906Sjfb8856606 if (ret) {
465d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to change MRU");
466d30ea906Sjfb8856606 return ret;
467d30ea906Sjfb8856606 }
468d30ea906Sjfb8856606
469d30ea906Sjfb8856606 ret = pp2_ppio_set_mtu(priv->ppio, mtu);
470d30ea906Sjfb8856606 if (ret) {
471d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to change MTU");
472d30ea906Sjfb8856606 return ret;
473d30ea906Sjfb8856606 }
474d30ea906Sjfb8856606
475d30ea906Sjfb8856606 return 0;
476d30ea906Sjfb8856606 }
477d30ea906Sjfb8856606
478d30ea906Sjfb8856606 /**
479d30ea906Sjfb8856606 * DPDK callback to bring the link up.
480d30ea906Sjfb8856606 *
481d30ea906Sjfb8856606 * @param dev
482d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
483d30ea906Sjfb8856606 *
484d30ea906Sjfb8856606 * @return
485d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
486d30ea906Sjfb8856606 */
487d30ea906Sjfb8856606 static int
mrvl_dev_set_link_up(struct rte_eth_dev * dev)488d30ea906Sjfb8856606 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
489d30ea906Sjfb8856606 {
490d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
491d30ea906Sjfb8856606 int ret;
492d30ea906Sjfb8856606
493d30ea906Sjfb8856606 if (!priv->ppio)
494d30ea906Sjfb8856606 return -EPERM;
495d30ea906Sjfb8856606
496d30ea906Sjfb8856606 ret = pp2_ppio_enable(priv->ppio);
497d30ea906Sjfb8856606 if (ret)
498d30ea906Sjfb8856606 return ret;
499d30ea906Sjfb8856606
500d30ea906Sjfb8856606 /*
501d30ea906Sjfb8856606 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
502d30ea906Sjfb8856606 * as pp2_ppio_enable() changes port->t_mode from default 0 to
503d30ea906Sjfb8856606 * PP2_TRAFFIC_INGRESS_EGRESS.
504d30ea906Sjfb8856606 *
505d30ea906Sjfb8856606 * Set mtu to default DPDK value here.
506d30ea906Sjfb8856606 */
507d30ea906Sjfb8856606 ret = mrvl_mtu_set(dev, dev->data->mtu);
508d30ea906Sjfb8856606 if (ret)
509d30ea906Sjfb8856606 pp2_ppio_disable(priv->ppio);
510d30ea906Sjfb8856606
511d30ea906Sjfb8856606 return ret;
512d30ea906Sjfb8856606 }
513d30ea906Sjfb8856606
514d30ea906Sjfb8856606 /**
515d30ea906Sjfb8856606 * DPDK callback to bring the link down.
516d30ea906Sjfb8856606 *
517d30ea906Sjfb8856606 * @param dev
518d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
519d30ea906Sjfb8856606 *
520d30ea906Sjfb8856606 * @return
521d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
522d30ea906Sjfb8856606 */
523d30ea906Sjfb8856606 static int
mrvl_dev_set_link_down(struct rte_eth_dev * dev)524d30ea906Sjfb8856606 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
525d30ea906Sjfb8856606 {
526d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
527d30ea906Sjfb8856606
528d30ea906Sjfb8856606 if (!priv->ppio)
529d30ea906Sjfb8856606 return -EPERM;
530d30ea906Sjfb8856606
531d30ea906Sjfb8856606 return pp2_ppio_disable(priv->ppio);
532d30ea906Sjfb8856606 }
533d30ea906Sjfb8856606
534d30ea906Sjfb8856606 /**
535d30ea906Sjfb8856606 * DPDK callback to start tx queue.
536d30ea906Sjfb8856606 *
537d30ea906Sjfb8856606 * @param dev
538d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
539d30ea906Sjfb8856606 * @param queue_id
540d30ea906Sjfb8856606 * Transmit queue index.
541d30ea906Sjfb8856606 *
542d30ea906Sjfb8856606 * @return
543d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
544d30ea906Sjfb8856606 */
545d30ea906Sjfb8856606 static int
mrvl_tx_queue_start(struct rte_eth_dev * dev,uint16_t queue_id)546d30ea906Sjfb8856606 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
547d30ea906Sjfb8856606 {
548d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
549d30ea906Sjfb8856606 int ret;
550d30ea906Sjfb8856606
551d30ea906Sjfb8856606 if (!priv)
552d30ea906Sjfb8856606 return -EPERM;
553d30ea906Sjfb8856606
554d30ea906Sjfb8856606 /* passing 1 enables given tx queue */
555d30ea906Sjfb8856606 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
556d30ea906Sjfb8856606 if (ret) {
557d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
558d30ea906Sjfb8856606 return ret;
559d30ea906Sjfb8856606 }
560d30ea906Sjfb8856606
561d30ea906Sjfb8856606 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
562d30ea906Sjfb8856606
563d30ea906Sjfb8856606 return 0;
564d30ea906Sjfb8856606 }
565d30ea906Sjfb8856606
566d30ea906Sjfb8856606 /**
567d30ea906Sjfb8856606 * DPDK callback to stop tx queue.
568d30ea906Sjfb8856606 *
569d30ea906Sjfb8856606 * @param dev
570d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
571d30ea906Sjfb8856606 * @param queue_id
572d30ea906Sjfb8856606 * Transmit queue index.
573d30ea906Sjfb8856606 *
574d30ea906Sjfb8856606 * @return
575d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
576d30ea906Sjfb8856606 */
577d30ea906Sjfb8856606 static int
mrvl_tx_queue_stop(struct rte_eth_dev * dev,uint16_t queue_id)578d30ea906Sjfb8856606 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
579d30ea906Sjfb8856606 {
580d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
581d30ea906Sjfb8856606 int ret;
582d30ea906Sjfb8856606
583d30ea906Sjfb8856606 if (!priv->ppio)
584d30ea906Sjfb8856606 return -EPERM;
585d30ea906Sjfb8856606
586d30ea906Sjfb8856606 /* passing 0 disables given tx queue */
587d30ea906Sjfb8856606 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
588d30ea906Sjfb8856606 if (ret) {
589d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
590d30ea906Sjfb8856606 return ret;
591d30ea906Sjfb8856606 }
592d30ea906Sjfb8856606
593d30ea906Sjfb8856606 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
594d30ea906Sjfb8856606
595d30ea906Sjfb8856606 return 0;
596d30ea906Sjfb8856606 }
597d30ea906Sjfb8856606
598d30ea906Sjfb8856606 /**
599d30ea906Sjfb8856606 * DPDK callback to start the device.
600d30ea906Sjfb8856606 *
601d30ea906Sjfb8856606 * @param dev
602d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
603d30ea906Sjfb8856606 *
604d30ea906Sjfb8856606 * @return
605d30ea906Sjfb8856606 * 0 on success, negative errno value on failure.
606d30ea906Sjfb8856606 */
607d30ea906Sjfb8856606 static int
mrvl_dev_start(struct rte_eth_dev * dev)608d30ea906Sjfb8856606 mrvl_dev_start(struct rte_eth_dev *dev)
609d30ea906Sjfb8856606 {
610d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
611d30ea906Sjfb8856606 char match[MRVL_MATCH_LEN];
612d30ea906Sjfb8856606 int ret = 0, i, def_init_size;
613d30ea906Sjfb8856606
614d30ea906Sjfb8856606 if (priv->ppio)
615d30ea906Sjfb8856606 return mrvl_dev_set_link_up(dev);
616d30ea906Sjfb8856606
617d30ea906Sjfb8856606 snprintf(match, sizeof(match), "ppio-%d:%d",
618d30ea906Sjfb8856606 priv->pp_id, priv->ppio_id);
619d30ea906Sjfb8856606 priv->ppio_params.match = match;
620d30ea906Sjfb8856606
621d30ea906Sjfb8856606 /*
622d30ea906Sjfb8856606 * Calculate the minimum bpool size for refill feature as follows:
623d30ea906Sjfb8856606 * 2 default burst sizes multiply by number of rx queues.
624d30ea906Sjfb8856606 * If the bpool size will be below this value, new buffers will
625d30ea906Sjfb8856606 * be added to the pool.
626d30ea906Sjfb8856606 */
627d30ea906Sjfb8856606 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
628d30ea906Sjfb8856606
629d30ea906Sjfb8856606 /* In case initial bpool size configured in queues setup is
630d30ea906Sjfb8856606 * smaller than minimum size add more buffers
631d30ea906Sjfb8856606 */
632d30ea906Sjfb8856606 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
633d30ea906Sjfb8856606 if (priv->bpool_init_size < def_init_size) {
634d30ea906Sjfb8856606 int buffs_to_add = def_init_size - priv->bpool_init_size;
635d30ea906Sjfb8856606
636d30ea906Sjfb8856606 priv->bpool_init_size += buffs_to_add;
637d30ea906Sjfb8856606 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
638d30ea906Sjfb8856606 if (ret)
639d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to add buffers to bpool");
640d30ea906Sjfb8856606 }
641d30ea906Sjfb8856606
642d30ea906Sjfb8856606 /*
643d30ea906Sjfb8856606 * Calculate the maximum bpool size for refill feature as follows:
644d30ea906Sjfb8856606 * maximum number of descriptors in rx queue multiply by number
645d30ea906Sjfb8856606 * of rx queues plus minimum bpool size.
646d30ea906Sjfb8856606 * In case the bpool size will exceed this value, superfluous buffers
647d30ea906Sjfb8856606 * will be removed
648d30ea906Sjfb8856606 */
649d30ea906Sjfb8856606 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
650d30ea906Sjfb8856606 priv->bpool_min_size;
651d30ea906Sjfb8856606
652d30ea906Sjfb8856606 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
653d30ea906Sjfb8856606 if (ret) {
654d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to init ppio");
655d30ea906Sjfb8856606 return ret;
656d30ea906Sjfb8856606 }
657d30ea906Sjfb8856606
658d30ea906Sjfb8856606 /*
659d30ea906Sjfb8856606 * In case there are some some stale uc/mc mac addresses flush them
660d30ea906Sjfb8856606 * here. It cannot be done during mrvl_dev_close() as port information
661d30ea906Sjfb8856606 * is already gone at that point (due to pp2_ppio_deinit() in
662d30ea906Sjfb8856606 * mrvl_dev_stop()).
663d30ea906Sjfb8856606 */
664d30ea906Sjfb8856606 if (!priv->uc_mc_flushed) {
665d30ea906Sjfb8856606 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
666d30ea906Sjfb8856606 if (ret) {
667d30ea906Sjfb8856606 MRVL_LOG(ERR,
668d30ea906Sjfb8856606 "Failed to flush uc/mc filter list");
669d30ea906Sjfb8856606 goto out;
670d30ea906Sjfb8856606 }
671d30ea906Sjfb8856606 priv->uc_mc_flushed = 1;
672d30ea906Sjfb8856606 }
673d30ea906Sjfb8856606
674d30ea906Sjfb8856606 if (!priv->vlan_flushed) {
675d30ea906Sjfb8856606 ret = pp2_ppio_flush_vlan(priv->ppio);
676d30ea906Sjfb8856606 if (ret) {
677d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to flush vlan list");
678d30ea906Sjfb8856606 /*
679d30ea906Sjfb8856606 * TODO
680d30ea906Sjfb8856606 * once pp2_ppio_flush_vlan() is supported jump to out
681d30ea906Sjfb8856606 * goto out;
682d30ea906Sjfb8856606 */
683d30ea906Sjfb8856606 }
684d30ea906Sjfb8856606 priv->vlan_flushed = 1;
685d30ea906Sjfb8856606 }
686d30ea906Sjfb8856606 ret = mrvl_mtu_set(dev, dev->data->mtu);
687d30ea906Sjfb8856606 if (ret)
688d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu);
689d30ea906Sjfb8856606
690d30ea906Sjfb8856606 /* For default QoS config, don't start classifier. */
691d30ea906Sjfb8856606 if (mrvl_qos_cfg &&
692d30ea906Sjfb8856606 mrvl_qos_cfg->port[dev->data->port_id].use_global_defaults == 0) {
693d30ea906Sjfb8856606 ret = mrvl_start_qos_mapping(priv);
694d30ea906Sjfb8856606 if (ret) {
695d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to setup QoS mapping");
696d30ea906Sjfb8856606 goto out;
697d30ea906Sjfb8856606 }
698d30ea906Sjfb8856606 }
699d30ea906Sjfb8856606
700d30ea906Sjfb8856606 ret = mrvl_dev_set_link_up(dev);
701d30ea906Sjfb8856606 if (ret) {
702d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to set link up");
703d30ea906Sjfb8856606 goto out;
704d30ea906Sjfb8856606 }
705d30ea906Sjfb8856606
706d30ea906Sjfb8856606 /* start tx queues */
707d30ea906Sjfb8856606 for (i = 0; i < dev->data->nb_tx_queues; i++) {
708d30ea906Sjfb8856606 struct mrvl_txq *txq = dev->data->tx_queues[i];
709d30ea906Sjfb8856606
710d30ea906Sjfb8856606 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
711d30ea906Sjfb8856606
712d30ea906Sjfb8856606 if (!txq->tx_deferred_start)
713d30ea906Sjfb8856606 continue;
714d30ea906Sjfb8856606
715d30ea906Sjfb8856606 /*
716d30ea906Sjfb8856606 * All txqs are started by default. Stop them
717d30ea906Sjfb8856606 * so that tx_deferred_start works as expected.
718d30ea906Sjfb8856606 */
719d30ea906Sjfb8856606 ret = mrvl_tx_queue_stop(dev, i);
720d30ea906Sjfb8856606 if (ret)
721d30ea906Sjfb8856606 goto out;
722d30ea906Sjfb8856606 }
723d30ea906Sjfb8856606
724d30ea906Sjfb8856606 mrvl_flow_init(dev);
725d30ea906Sjfb8856606 mrvl_mtr_init(dev);
726d30ea906Sjfb8856606 mrvl_set_tx_function(dev);
727d30ea906Sjfb8856606
728d30ea906Sjfb8856606 return 0;
729d30ea906Sjfb8856606 out:
730d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to start device");
731d30ea906Sjfb8856606 pp2_ppio_deinit(priv->ppio);
732d30ea906Sjfb8856606 return ret;
733d30ea906Sjfb8856606 }
734d30ea906Sjfb8856606
735d30ea906Sjfb8856606 /**
736d30ea906Sjfb8856606 * Flush receive queues.
737d30ea906Sjfb8856606 *
738d30ea906Sjfb8856606 * @param dev
739d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
740d30ea906Sjfb8856606 */
741d30ea906Sjfb8856606 static void
mrvl_flush_rx_queues(struct rte_eth_dev * dev)742d30ea906Sjfb8856606 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
743d30ea906Sjfb8856606 {
744d30ea906Sjfb8856606 int i;
745d30ea906Sjfb8856606
746d30ea906Sjfb8856606 MRVL_LOG(INFO, "Flushing rx queues");
747d30ea906Sjfb8856606 for (i = 0; i < dev->data->nb_rx_queues; i++) {
748d30ea906Sjfb8856606 int ret, num;
749d30ea906Sjfb8856606
750d30ea906Sjfb8856606 do {
751d30ea906Sjfb8856606 struct mrvl_rxq *q = dev->data->rx_queues[i];
752d30ea906Sjfb8856606 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
753d30ea906Sjfb8856606
754d30ea906Sjfb8856606 num = MRVL_PP2_RXD_MAX;
755d30ea906Sjfb8856606 ret = pp2_ppio_recv(q->priv->ppio,
756d30ea906Sjfb8856606 q->priv->rxq_map[q->queue_id].tc,
757d30ea906Sjfb8856606 q->priv->rxq_map[q->queue_id].inq,
758d30ea906Sjfb8856606 descs, (uint16_t *)&num);
759d30ea906Sjfb8856606 } while (ret == 0 && num);
760d30ea906Sjfb8856606 }
761d30ea906Sjfb8856606 }
762d30ea906Sjfb8856606
763d30ea906Sjfb8856606 /**
764d30ea906Sjfb8856606 * Flush transmit shadow queues.
765d30ea906Sjfb8856606 *
766d30ea906Sjfb8856606 * @param dev
767d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
768d30ea906Sjfb8856606 */
769d30ea906Sjfb8856606 static void
mrvl_flush_tx_shadow_queues(struct rte_eth_dev * dev)770d30ea906Sjfb8856606 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
771d30ea906Sjfb8856606 {
772d30ea906Sjfb8856606 int i, j;
773d30ea906Sjfb8856606 struct mrvl_txq *txq;
774d30ea906Sjfb8856606
775d30ea906Sjfb8856606 MRVL_LOG(INFO, "Flushing tx shadow queues");
776d30ea906Sjfb8856606 for (i = 0; i < dev->data->nb_tx_queues; i++) {
777d30ea906Sjfb8856606 txq = (struct mrvl_txq *)dev->data->tx_queues[i];
778d30ea906Sjfb8856606
779d30ea906Sjfb8856606 for (j = 0; j < RTE_MAX_LCORE; j++) {
780d30ea906Sjfb8856606 struct mrvl_shadow_txq *sq;
781d30ea906Sjfb8856606
782d30ea906Sjfb8856606 if (!hifs[j])
783d30ea906Sjfb8856606 continue;
784d30ea906Sjfb8856606
785d30ea906Sjfb8856606 sq = &txq->shadow_txqs[j];
786d30ea906Sjfb8856606 mrvl_free_sent_buffers(txq->priv->ppio,
787d30ea906Sjfb8856606 hifs[j], j, sq, txq->queue_id, 1);
788d30ea906Sjfb8856606 while (sq->tail != sq->head) {
789d30ea906Sjfb8856606 uint64_t addr = cookie_addr_high |
790d30ea906Sjfb8856606 sq->ent[sq->tail].buff.cookie;
791d30ea906Sjfb8856606 rte_pktmbuf_free(
792d30ea906Sjfb8856606 (struct rte_mbuf *)addr);
793d30ea906Sjfb8856606 sq->tail = (sq->tail + 1) &
794d30ea906Sjfb8856606 MRVL_PP2_TX_SHADOWQ_MASK;
795d30ea906Sjfb8856606 }
796d30ea906Sjfb8856606 memset(sq, 0, sizeof(*sq));
797d30ea906Sjfb8856606 }
798d30ea906Sjfb8856606 }
799d30ea906Sjfb8856606 }
800d30ea906Sjfb8856606
801d30ea906Sjfb8856606 /**
802d30ea906Sjfb8856606 * Flush hardware bpool (buffer-pool).
803d30ea906Sjfb8856606 *
804d30ea906Sjfb8856606 * @param dev
805d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
806d30ea906Sjfb8856606 */
807d30ea906Sjfb8856606 static void
mrvl_flush_bpool(struct rte_eth_dev * dev)808d30ea906Sjfb8856606 mrvl_flush_bpool(struct rte_eth_dev *dev)
809d30ea906Sjfb8856606 {
810d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
811d30ea906Sjfb8856606 struct pp2_hif *hif;
812d30ea906Sjfb8856606 uint32_t num;
813d30ea906Sjfb8856606 int ret;
814d30ea906Sjfb8856606 unsigned int core_id = rte_lcore_id();
815d30ea906Sjfb8856606
816d30ea906Sjfb8856606 if (core_id == LCORE_ID_ANY)
817*2d9fd380Sjfb8856606 core_id = rte_get_main_lcore();
818d30ea906Sjfb8856606
819d30ea906Sjfb8856606 hif = mrvl_get_hif(priv, core_id);
820d30ea906Sjfb8856606
821d30ea906Sjfb8856606 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
822d30ea906Sjfb8856606 if (ret) {
823d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to get bpool buffers number");
824d30ea906Sjfb8856606 return;
825d30ea906Sjfb8856606 }
826d30ea906Sjfb8856606
827d30ea906Sjfb8856606 while (num--) {
828d30ea906Sjfb8856606 struct pp2_buff_inf inf;
829d30ea906Sjfb8856606 uint64_t addr;
830d30ea906Sjfb8856606
831d30ea906Sjfb8856606 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
832d30ea906Sjfb8856606 if (ret)
833d30ea906Sjfb8856606 break;
834d30ea906Sjfb8856606
835d30ea906Sjfb8856606 addr = cookie_addr_high | inf.cookie;
836d30ea906Sjfb8856606 rte_pktmbuf_free((struct rte_mbuf *)addr);
837d30ea906Sjfb8856606 }
838d30ea906Sjfb8856606 }
839d30ea906Sjfb8856606
840d30ea906Sjfb8856606 /**
841d30ea906Sjfb8856606 * DPDK callback to stop the device.
842d30ea906Sjfb8856606 *
843d30ea906Sjfb8856606 * @param dev
844d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
845d30ea906Sjfb8856606 */
846*2d9fd380Sjfb8856606 static int
mrvl_dev_stop(struct rte_eth_dev * dev)847d30ea906Sjfb8856606 mrvl_dev_stop(struct rte_eth_dev *dev)
848d30ea906Sjfb8856606 {
849*2d9fd380Sjfb8856606 return mrvl_dev_set_link_down(dev);
850d30ea906Sjfb8856606 }
851d30ea906Sjfb8856606
852d30ea906Sjfb8856606 /**
853d30ea906Sjfb8856606 * DPDK callback to close the device.
854d30ea906Sjfb8856606 *
855d30ea906Sjfb8856606 * @param dev
856d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
857d30ea906Sjfb8856606 */
858*2d9fd380Sjfb8856606 static int
mrvl_dev_close(struct rte_eth_dev * dev)859d30ea906Sjfb8856606 mrvl_dev_close(struct rte_eth_dev *dev)
860d30ea906Sjfb8856606 {
861d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
862d30ea906Sjfb8856606 size_t i;
863d30ea906Sjfb8856606
864*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
865*2d9fd380Sjfb8856606 return 0;
866*2d9fd380Sjfb8856606
867d30ea906Sjfb8856606 mrvl_flush_rx_queues(dev);
868d30ea906Sjfb8856606 mrvl_flush_tx_shadow_queues(dev);
869d30ea906Sjfb8856606 mrvl_flow_deinit(dev);
870d30ea906Sjfb8856606 mrvl_mtr_deinit(dev);
871d30ea906Sjfb8856606
872d30ea906Sjfb8856606 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
873d30ea906Sjfb8856606 struct pp2_ppio_tc_params *tc_params =
874d30ea906Sjfb8856606 &priv->ppio_params.inqs_params.tcs_params[i];
875d30ea906Sjfb8856606
876d30ea906Sjfb8856606 if (tc_params->inqs_params) {
877d30ea906Sjfb8856606 rte_free(tc_params->inqs_params);
878d30ea906Sjfb8856606 tc_params->inqs_params = NULL;
879d30ea906Sjfb8856606 }
880d30ea906Sjfb8856606 }
881d30ea906Sjfb8856606
882d30ea906Sjfb8856606 if (priv->cls_tbl) {
883d30ea906Sjfb8856606 pp2_cls_tbl_deinit(priv->cls_tbl);
884d30ea906Sjfb8856606 priv->cls_tbl = NULL;
885d30ea906Sjfb8856606 }
886d30ea906Sjfb8856606
887d30ea906Sjfb8856606 if (priv->qos_tbl) {
888d30ea906Sjfb8856606 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
889d30ea906Sjfb8856606 priv->qos_tbl = NULL;
890d30ea906Sjfb8856606 }
891d30ea906Sjfb8856606
892d30ea906Sjfb8856606 mrvl_flush_bpool(dev);
893d30ea906Sjfb8856606 mrvl_tm_deinit(dev);
894d30ea906Sjfb8856606
895d30ea906Sjfb8856606 if (priv->ppio) {
896d30ea906Sjfb8856606 pp2_ppio_deinit(priv->ppio);
897d30ea906Sjfb8856606 priv->ppio = NULL;
898d30ea906Sjfb8856606 }
899d30ea906Sjfb8856606
900d30ea906Sjfb8856606 /* policer must be released after ppio deinitialization */
901d30ea906Sjfb8856606 if (priv->default_policer) {
902d30ea906Sjfb8856606 pp2_cls_plcr_deinit(priv->default_policer);
903d30ea906Sjfb8856606 priv->default_policer = NULL;
904d30ea906Sjfb8856606 }
9054418919fSjohnjiang
9064418919fSjohnjiang
9074418919fSjohnjiang if (priv->bpool) {
9084418919fSjohnjiang pp2_bpool_deinit(priv->bpool);
9094418919fSjohnjiang used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
9104418919fSjohnjiang priv->bpool = NULL;
9114418919fSjohnjiang }
9124418919fSjohnjiang
9134418919fSjohnjiang mrvl_dev_num--;
9144418919fSjohnjiang
9154418919fSjohnjiang if (mrvl_dev_num == 0) {
9164418919fSjohnjiang MRVL_LOG(INFO, "Perform MUSDK deinit");
9174418919fSjohnjiang mrvl_deinit_hifs();
9184418919fSjohnjiang mrvl_deinit_pp2();
9194418919fSjohnjiang rte_mvep_deinit(MVEP_MOD_T_PP2);
9204418919fSjohnjiang }
921*2d9fd380Sjfb8856606
922*2d9fd380Sjfb8856606 return 0;
923d30ea906Sjfb8856606 }
924d30ea906Sjfb8856606
925d30ea906Sjfb8856606 /**
926d30ea906Sjfb8856606 * DPDK callback to retrieve physical link information.
927d30ea906Sjfb8856606 *
928d30ea906Sjfb8856606 * @param dev
929d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
930d30ea906Sjfb8856606 * @param wait_to_complete
931d30ea906Sjfb8856606 * Wait for request completion (ignored).
932d30ea906Sjfb8856606 *
933d30ea906Sjfb8856606 * @return
934d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
935d30ea906Sjfb8856606 */
936d30ea906Sjfb8856606 static int
mrvl_link_update(struct rte_eth_dev * dev,int wait_to_complete __rte_unused)937d30ea906Sjfb8856606 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
938d30ea906Sjfb8856606 {
939d30ea906Sjfb8856606 /*
940d30ea906Sjfb8856606 * TODO
941d30ea906Sjfb8856606 * once MUSDK provides necessary API use it here
942d30ea906Sjfb8856606 */
943d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
944d30ea906Sjfb8856606 struct ethtool_cmd edata;
945d30ea906Sjfb8856606 struct ifreq req;
946d30ea906Sjfb8856606 int ret, fd, link_up;
947d30ea906Sjfb8856606
948d30ea906Sjfb8856606 if (!priv->ppio)
949d30ea906Sjfb8856606 return -EPERM;
950d30ea906Sjfb8856606
951d30ea906Sjfb8856606 edata.cmd = ETHTOOL_GSET;
952d30ea906Sjfb8856606
953d30ea906Sjfb8856606 strcpy(req.ifr_name, dev->data->name);
954d30ea906Sjfb8856606 req.ifr_data = (void *)&edata;
955d30ea906Sjfb8856606
956d30ea906Sjfb8856606 fd = socket(AF_INET, SOCK_DGRAM, 0);
957d30ea906Sjfb8856606 if (fd == -1)
958d30ea906Sjfb8856606 return -EFAULT;
959d30ea906Sjfb8856606
960d30ea906Sjfb8856606 ret = ioctl(fd, SIOCETHTOOL, &req);
961d30ea906Sjfb8856606 if (ret == -1) {
962d30ea906Sjfb8856606 close(fd);
963d30ea906Sjfb8856606 return -EFAULT;
964d30ea906Sjfb8856606 }
965d30ea906Sjfb8856606
966d30ea906Sjfb8856606 close(fd);
967d30ea906Sjfb8856606
968d30ea906Sjfb8856606 switch (ethtool_cmd_speed(&edata)) {
969d30ea906Sjfb8856606 case SPEED_10:
970d30ea906Sjfb8856606 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
971d30ea906Sjfb8856606 break;
972d30ea906Sjfb8856606 case SPEED_100:
973d30ea906Sjfb8856606 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
974d30ea906Sjfb8856606 break;
975d30ea906Sjfb8856606 case SPEED_1000:
976d30ea906Sjfb8856606 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
977d30ea906Sjfb8856606 break;
978d30ea906Sjfb8856606 case SPEED_10000:
979d30ea906Sjfb8856606 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
980d30ea906Sjfb8856606 break;
981d30ea906Sjfb8856606 default:
982d30ea906Sjfb8856606 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
983d30ea906Sjfb8856606 }
984d30ea906Sjfb8856606
985d30ea906Sjfb8856606 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
986d30ea906Sjfb8856606 ETH_LINK_HALF_DUPLEX;
987d30ea906Sjfb8856606 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
988d30ea906Sjfb8856606 ETH_LINK_FIXED;
989d30ea906Sjfb8856606 pp2_ppio_get_link_state(priv->ppio, &link_up);
990d30ea906Sjfb8856606 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
991d30ea906Sjfb8856606
992d30ea906Sjfb8856606 return 0;
993d30ea906Sjfb8856606 }
994d30ea906Sjfb8856606
995d30ea906Sjfb8856606 /**
996d30ea906Sjfb8856606 * DPDK callback to enable promiscuous mode.
997d30ea906Sjfb8856606 *
998d30ea906Sjfb8856606 * @param dev
999d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
10004418919fSjohnjiang *
10014418919fSjohnjiang * @return
10024418919fSjohnjiang * 0 on success, negative error value otherwise.
1003d30ea906Sjfb8856606 */
10044418919fSjohnjiang static int
mrvl_promiscuous_enable(struct rte_eth_dev * dev)1005d30ea906Sjfb8856606 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
1006d30ea906Sjfb8856606 {
1007d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1008d30ea906Sjfb8856606 int ret;
1009d30ea906Sjfb8856606
1010d30ea906Sjfb8856606 if (!priv->ppio)
10114418919fSjohnjiang return 0;
1012d30ea906Sjfb8856606
1013d30ea906Sjfb8856606 if (priv->isolated)
10144418919fSjohnjiang return 0;
1015d30ea906Sjfb8856606
1016d30ea906Sjfb8856606 ret = pp2_ppio_set_promisc(priv->ppio, 1);
10174418919fSjohnjiang if (ret) {
1018d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to enable promiscuous mode");
10194418919fSjohnjiang return -EAGAIN;
10204418919fSjohnjiang }
10214418919fSjohnjiang
10224418919fSjohnjiang return 0;
1023d30ea906Sjfb8856606 }
1024d30ea906Sjfb8856606
1025d30ea906Sjfb8856606 /**
1026d30ea906Sjfb8856606 * DPDK callback to enable allmulti mode.
1027d30ea906Sjfb8856606 *
1028d30ea906Sjfb8856606 * @param dev
1029d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
10304418919fSjohnjiang *
10314418919fSjohnjiang * @return
10324418919fSjohnjiang * 0 on success, negative error value otherwise.
1033d30ea906Sjfb8856606 */
10344418919fSjohnjiang static int
mrvl_allmulticast_enable(struct rte_eth_dev * dev)1035d30ea906Sjfb8856606 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
1036d30ea906Sjfb8856606 {
1037d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1038d30ea906Sjfb8856606 int ret;
1039d30ea906Sjfb8856606
1040d30ea906Sjfb8856606 if (!priv->ppio)
10414418919fSjohnjiang return 0;
1042d30ea906Sjfb8856606
1043d30ea906Sjfb8856606 if (priv->isolated)
10444418919fSjohnjiang return 0;
1045d30ea906Sjfb8856606
1046d30ea906Sjfb8856606 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
10474418919fSjohnjiang if (ret) {
1048d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed enable all-multicast mode");
10494418919fSjohnjiang return -EAGAIN;
10504418919fSjohnjiang }
10514418919fSjohnjiang
10524418919fSjohnjiang return 0;
1053d30ea906Sjfb8856606 }
1054d30ea906Sjfb8856606
1055d30ea906Sjfb8856606 /**
1056d30ea906Sjfb8856606 * DPDK callback to disable promiscuous mode.
1057d30ea906Sjfb8856606 *
1058d30ea906Sjfb8856606 * @param dev
1059d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
10604418919fSjohnjiang *
10614418919fSjohnjiang * @return
10624418919fSjohnjiang * 0 on success, negative error value otherwise.
1063d30ea906Sjfb8856606 */
10644418919fSjohnjiang static int
mrvl_promiscuous_disable(struct rte_eth_dev * dev)1065d30ea906Sjfb8856606 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
1066d30ea906Sjfb8856606 {
1067d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1068d30ea906Sjfb8856606 int ret;
1069d30ea906Sjfb8856606
1070d30ea906Sjfb8856606 if (!priv->ppio)
10714418919fSjohnjiang return 0;
1072d30ea906Sjfb8856606
1073d30ea906Sjfb8856606 ret = pp2_ppio_set_promisc(priv->ppio, 0);
10744418919fSjohnjiang if (ret) {
1075d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to disable promiscuous mode");
10764418919fSjohnjiang return -EAGAIN;
10774418919fSjohnjiang }
10784418919fSjohnjiang
10794418919fSjohnjiang return 0;
1080d30ea906Sjfb8856606 }
1081d30ea906Sjfb8856606
1082d30ea906Sjfb8856606 /**
1083d30ea906Sjfb8856606 * DPDK callback to disable allmulticast mode.
1084d30ea906Sjfb8856606 *
1085d30ea906Sjfb8856606 * @param dev
1086d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
10874418919fSjohnjiang *
10884418919fSjohnjiang * @return
10894418919fSjohnjiang * 0 on success, negative error value otherwise.
1090d30ea906Sjfb8856606 */
10914418919fSjohnjiang static int
mrvl_allmulticast_disable(struct rte_eth_dev * dev)1092d30ea906Sjfb8856606 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
1093d30ea906Sjfb8856606 {
1094d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1095d30ea906Sjfb8856606 int ret;
1096d30ea906Sjfb8856606
1097d30ea906Sjfb8856606 if (!priv->ppio)
10984418919fSjohnjiang return 0;
1099d30ea906Sjfb8856606
1100d30ea906Sjfb8856606 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
11014418919fSjohnjiang if (ret) {
1102d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to disable all-multicast mode");
11034418919fSjohnjiang return -EAGAIN;
11044418919fSjohnjiang }
11054418919fSjohnjiang
11064418919fSjohnjiang return 0;
1107d30ea906Sjfb8856606 }
1108d30ea906Sjfb8856606
1109d30ea906Sjfb8856606 /**
1110d30ea906Sjfb8856606 * DPDK callback to remove a MAC address.
1111d30ea906Sjfb8856606 *
1112d30ea906Sjfb8856606 * @param dev
1113d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1114d30ea906Sjfb8856606 * @param index
1115d30ea906Sjfb8856606 * MAC address index.
1116d30ea906Sjfb8856606 */
1117d30ea906Sjfb8856606 static void
mrvl_mac_addr_remove(struct rte_eth_dev * dev,uint32_t index)1118d30ea906Sjfb8856606 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1119d30ea906Sjfb8856606 {
1120d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
11214418919fSjohnjiang char buf[RTE_ETHER_ADDR_FMT_SIZE];
1122d30ea906Sjfb8856606 int ret;
1123d30ea906Sjfb8856606
1124d30ea906Sjfb8856606 if (!priv->ppio)
1125d30ea906Sjfb8856606 return;
1126d30ea906Sjfb8856606
1127d30ea906Sjfb8856606 if (priv->isolated)
1128d30ea906Sjfb8856606 return;
1129d30ea906Sjfb8856606
1130d30ea906Sjfb8856606 ret = pp2_ppio_remove_mac_addr(priv->ppio,
1131d30ea906Sjfb8856606 dev->data->mac_addrs[index].addr_bytes);
1132d30ea906Sjfb8856606 if (ret) {
11334418919fSjohnjiang rte_ether_format_addr(buf, sizeof(buf),
1134d30ea906Sjfb8856606 &dev->data->mac_addrs[index]);
1135d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to remove mac %s", buf);
1136d30ea906Sjfb8856606 }
1137d30ea906Sjfb8856606 }
1138d30ea906Sjfb8856606
1139d30ea906Sjfb8856606 /**
1140d30ea906Sjfb8856606 * DPDK callback to add a MAC address.
1141d30ea906Sjfb8856606 *
1142d30ea906Sjfb8856606 * @param dev
1143d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1144d30ea906Sjfb8856606 * @param mac_addr
1145d30ea906Sjfb8856606 * MAC address to register.
1146d30ea906Sjfb8856606 * @param index
1147d30ea906Sjfb8856606 * MAC address index.
1148d30ea906Sjfb8856606 * @param vmdq
1149d30ea906Sjfb8856606 * VMDq pool index to associate address with (unused).
1150d30ea906Sjfb8856606 *
1151d30ea906Sjfb8856606 * @return
1152d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1153d30ea906Sjfb8856606 */
1154d30ea906Sjfb8856606 static int
mrvl_mac_addr_add(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t vmdq __rte_unused)11554418919fSjohnjiang mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1156d30ea906Sjfb8856606 uint32_t index, uint32_t vmdq __rte_unused)
1157d30ea906Sjfb8856606 {
1158d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
11594418919fSjohnjiang char buf[RTE_ETHER_ADDR_FMT_SIZE];
1160d30ea906Sjfb8856606 int ret;
1161d30ea906Sjfb8856606
1162d30ea906Sjfb8856606 if (priv->isolated)
1163d30ea906Sjfb8856606 return -ENOTSUP;
1164d30ea906Sjfb8856606
1165d30ea906Sjfb8856606 if (index == 0)
1166d30ea906Sjfb8856606 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
1167d30ea906Sjfb8856606 return -1;
1168d30ea906Sjfb8856606
1169d30ea906Sjfb8856606 if (!priv->ppio)
1170d30ea906Sjfb8856606 return 0;
1171d30ea906Sjfb8856606
1172d30ea906Sjfb8856606 /*
1173d30ea906Sjfb8856606 * Maximum number of uc addresses can be tuned via kernel module mvpp2x
1174d30ea906Sjfb8856606 * parameter uc_filter_max. Maximum number of mc addresses is then
1175d30ea906Sjfb8856606 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
1176d30ea906Sjfb8856606 * 21 respectively.
1177d30ea906Sjfb8856606 *
1178d30ea906Sjfb8856606 * If more than uc_filter_max uc addresses were added to filter list
1179d30ea906Sjfb8856606 * then NIC will switch to promiscuous mode automatically.
1180d30ea906Sjfb8856606 *
1181d30ea906Sjfb8856606 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
1182d30ea906Sjfb8856606 * were added to filter list then NIC will switch to all-multicast mode
1183d30ea906Sjfb8856606 * automatically.
1184d30ea906Sjfb8856606 */
1185d30ea906Sjfb8856606 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
1186d30ea906Sjfb8856606 if (ret) {
11874418919fSjohnjiang rte_ether_format_addr(buf, sizeof(buf), mac_addr);
1188d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to add mac %s", buf);
1189d30ea906Sjfb8856606 return -1;
1190d30ea906Sjfb8856606 }
1191d30ea906Sjfb8856606
1192d30ea906Sjfb8856606 return 0;
1193d30ea906Sjfb8856606 }
1194d30ea906Sjfb8856606
1195d30ea906Sjfb8856606 /**
1196d30ea906Sjfb8856606 * DPDK callback to set the primary MAC address.
1197d30ea906Sjfb8856606 *
1198d30ea906Sjfb8856606 * @param dev
1199d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1200d30ea906Sjfb8856606 * @param mac_addr
1201d30ea906Sjfb8856606 * MAC address to register.
1202d30ea906Sjfb8856606 *
1203d30ea906Sjfb8856606 * @return
1204d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1205d30ea906Sjfb8856606 */
1206d30ea906Sjfb8856606 static int
mrvl_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)12074418919fSjohnjiang mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1208d30ea906Sjfb8856606 {
1209d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1210d30ea906Sjfb8856606 int ret;
1211d30ea906Sjfb8856606
1212d30ea906Sjfb8856606 if (!priv->ppio)
1213d30ea906Sjfb8856606 return 0;
1214d30ea906Sjfb8856606
1215d30ea906Sjfb8856606 if (priv->isolated)
1216d30ea906Sjfb8856606 return -ENOTSUP;
1217d30ea906Sjfb8856606
1218d30ea906Sjfb8856606 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
1219d30ea906Sjfb8856606 if (ret) {
12204418919fSjohnjiang char buf[RTE_ETHER_ADDR_FMT_SIZE];
12214418919fSjohnjiang rte_ether_format_addr(buf, sizeof(buf), mac_addr);
1222d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to set mac to %s", buf);
1223d30ea906Sjfb8856606 }
1224d30ea906Sjfb8856606
1225d30ea906Sjfb8856606 return ret;
1226d30ea906Sjfb8856606 }
1227d30ea906Sjfb8856606
1228d30ea906Sjfb8856606 /**
1229d30ea906Sjfb8856606 * DPDK callback to get device statistics.
1230d30ea906Sjfb8856606 *
1231d30ea906Sjfb8856606 * @param dev
1232d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1233d30ea906Sjfb8856606 * @param stats
1234d30ea906Sjfb8856606 * Stats structure output buffer.
1235d30ea906Sjfb8856606 *
1236d30ea906Sjfb8856606 * @return
1237d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1238d30ea906Sjfb8856606 */
1239d30ea906Sjfb8856606 static int
mrvl_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)1240d30ea906Sjfb8856606 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1241d30ea906Sjfb8856606 {
1242d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1243d30ea906Sjfb8856606 struct pp2_ppio_statistics ppio_stats;
1244d30ea906Sjfb8856606 uint64_t drop_mac = 0;
1245d30ea906Sjfb8856606 unsigned int i, idx, ret;
1246d30ea906Sjfb8856606
1247d30ea906Sjfb8856606 if (!priv->ppio)
1248d30ea906Sjfb8856606 return -EPERM;
1249d30ea906Sjfb8856606
1250d30ea906Sjfb8856606 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1251d30ea906Sjfb8856606 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1252d30ea906Sjfb8856606 struct pp2_ppio_inq_statistics rx_stats;
1253d30ea906Sjfb8856606
1254d30ea906Sjfb8856606 if (!rxq)
1255d30ea906Sjfb8856606 continue;
1256d30ea906Sjfb8856606
1257d30ea906Sjfb8856606 idx = rxq->queue_id;
1258d30ea906Sjfb8856606 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1259d30ea906Sjfb8856606 MRVL_LOG(ERR,
1260d30ea906Sjfb8856606 "rx queue %d stats out of range (0 - %d)",
1261d30ea906Sjfb8856606 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1262d30ea906Sjfb8856606 continue;
1263d30ea906Sjfb8856606 }
1264d30ea906Sjfb8856606
1265d30ea906Sjfb8856606 ret = pp2_ppio_inq_get_statistics(priv->ppio,
1266d30ea906Sjfb8856606 priv->rxq_map[idx].tc,
1267d30ea906Sjfb8856606 priv->rxq_map[idx].inq,
1268d30ea906Sjfb8856606 &rx_stats, 0);
1269d30ea906Sjfb8856606 if (unlikely(ret)) {
1270d30ea906Sjfb8856606 MRVL_LOG(ERR,
1271d30ea906Sjfb8856606 "Failed to update rx queue %d stats", idx);
1272d30ea906Sjfb8856606 break;
1273d30ea906Sjfb8856606 }
1274d30ea906Sjfb8856606
1275d30ea906Sjfb8856606 stats->q_ibytes[idx] = rxq->bytes_recv;
1276d30ea906Sjfb8856606 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
1277d30ea906Sjfb8856606 stats->q_errors[idx] = rx_stats.drop_early +
1278d30ea906Sjfb8856606 rx_stats.drop_fullq +
1279d30ea906Sjfb8856606 rx_stats.drop_bm +
1280d30ea906Sjfb8856606 rxq->drop_mac;
1281d30ea906Sjfb8856606 stats->ibytes += rxq->bytes_recv;
1282d30ea906Sjfb8856606 drop_mac += rxq->drop_mac;
1283d30ea906Sjfb8856606 }
1284d30ea906Sjfb8856606
1285d30ea906Sjfb8856606 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1286d30ea906Sjfb8856606 struct mrvl_txq *txq = dev->data->tx_queues[i];
1287d30ea906Sjfb8856606 struct pp2_ppio_outq_statistics tx_stats;
1288d30ea906Sjfb8856606
1289d30ea906Sjfb8856606 if (!txq)
1290d30ea906Sjfb8856606 continue;
1291d30ea906Sjfb8856606
1292d30ea906Sjfb8856606 idx = txq->queue_id;
1293d30ea906Sjfb8856606 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1294d30ea906Sjfb8856606 MRVL_LOG(ERR,
1295d30ea906Sjfb8856606 "tx queue %d stats out of range (0 - %d)",
1296d30ea906Sjfb8856606 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1297d30ea906Sjfb8856606 }
1298d30ea906Sjfb8856606
1299d30ea906Sjfb8856606 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
1300d30ea906Sjfb8856606 &tx_stats, 0);
1301d30ea906Sjfb8856606 if (unlikely(ret)) {
1302d30ea906Sjfb8856606 MRVL_LOG(ERR,
1303d30ea906Sjfb8856606 "Failed to update tx queue %d stats", idx);
1304d30ea906Sjfb8856606 break;
1305d30ea906Sjfb8856606 }
1306d30ea906Sjfb8856606
1307d30ea906Sjfb8856606 stats->q_opackets[idx] = tx_stats.deq_desc;
1308d30ea906Sjfb8856606 stats->q_obytes[idx] = txq->bytes_sent;
1309d30ea906Sjfb8856606 stats->obytes += txq->bytes_sent;
1310d30ea906Sjfb8856606 }
1311d30ea906Sjfb8856606
1312d30ea906Sjfb8856606 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1313d30ea906Sjfb8856606 if (unlikely(ret)) {
1314d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to update port statistics");
1315d30ea906Sjfb8856606 return ret;
1316d30ea906Sjfb8856606 }
1317d30ea906Sjfb8856606
1318d30ea906Sjfb8856606 stats->ipackets += ppio_stats.rx_packets - drop_mac;
1319d30ea906Sjfb8856606 stats->opackets += ppio_stats.tx_packets;
1320d30ea906Sjfb8856606 stats->imissed += ppio_stats.rx_fullq_dropped +
1321d30ea906Sjfb8856606 ppio_stats.rx_bm_dropped +
1322d30ea906Sjfb8856606 ppio_stats.rx_early_dropped +
1323d30ea906Sjfb8856606 ppio_stats.rx_fifo_dropped +
1324d30ea906Sjfb8856606 ppio_stats.rx_cls_dropped;
1325d30ea906Sjfb8856606 stats->ierrors = drop_mac;
1326d30ea906Sjfb8856606
1327d30ea906Sjfb8856606 return 0;
1328d30ea906Sjfb8856606 }
1329d30ea906Sjfb8856606
1330d30ea906Sjfb8856606 /**
1331d30ea906Sjfb8856606 * DPDK callback to clear device statistics.
1332d30ea906Sjfb8856606 *
1333d30ea906Sjfb8856606 * @param dev
1334d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
13354418919fSjohnjiang *
13364418919fSjohnjiang * @return
13374418919fSjohnjiang * 0 on success, negative error value otherwise.
1338d30ea906Sjfb8856606 */
13394418919fSjohnjiang static int
mrvl_stats_reset(struct rte_eth_dev * dev)1340d30ea906Sjfb8856606 mrvl_stats_reset(struct rte_eth_dev *dev)
1341d30ea906Sjfb8856606 {
1342d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1343d30ea906Sjfb8856606 int i;
1344d30ea906Sjfb8856606
1345d30ea906Sjfb8856606 if (!priv->ppio)
13464418919fSjohnjiang return 0;
1347d30ea906Sjfb8856606
1348d30ea906Sjfb8856606 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1349d30ea906Sjfb8856606 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1350d30ea906Sjfb8856606
1351d30ea906Sjfb8856606 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
1352d30ea906Sjfb8856606 priv->rxq_map[i].inq, NULL, 1);
1353d30ea906Sjfb8856606 rxq->bytes_recv = 0;
1354d30ea906Sjfb8856606 rxq->drop_mac = 0;
1355d30ea906Sjfb8856606 }
1356d30ea906Sjfb8856606
1357d30ea906Sjfb8856606 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1358d30ea906Sjfb8856606 struct mrvl_txq *txq = dev->data->tx_queues[i];
1359d30ea906Sjfb8856606
1360d30ea906Sjfb8856606 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
1361d30ea906Sjfb8856606 txq->bytes_sent = 0;
1362d30ea906Sjfb8856606 }
1363d30ea906Sjfb8856606
13644418919fSjohnjiang return pp2_ppio_get_statistics(priv->ppio, NULL, 1);
1365d30ea906Sjfb8856606 }
1366d30ea906Sjfb8856606
1367d30ea906Sjfb8856606 /**
1368d30ea906Sjfb8856606 * DPDK callback to get extended statistics.
1369d30ea906Sjfb8856606 *
1370d30ea906Sjfb8856606 * @param dev
1371d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1372d30ea906Sjfb8856606 * @param stats
1373d30ea906Sjfb8856606 * Pointer to xstats table.
1374d30ea906Sjfb8856606 * @param n
1375d30ea906Sjfb8856606 * Number of entries in xstats table.
1376d30ea906Sjfb8856606 * @return
1377d30ea906Sjfb8856606 * Negative value on error, number of read xstats otherwise.
1378d30ea906Sjfb8856606 */
1379d30ea906Sjfb8856606 static int
mrvl_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * stats,unsigned int n)1380d30ea906Sjfb8856606 mrvl_xstats_get(struct rte_eth_dev *dev,
1381d30ea906Sjfb8856606 struct rte_eth_xstat *stats, unsigned int n)
1382d30ea906Sjfb8856606 {
1383d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1384d30ea906Sjfb8856606 struct pp2_ppio_statistics ppio_stats;
1385d30ea906Sjfb8856606 unsigned int i;
1386d30ea906Sjfb8856606
1387d30ea906Sjfb8856606 if (!stats)
1388d30ea906Sjfb8856606 return 0;
1389d30ea906Sjfb8856606
1390d30ea906Sjfb8856606 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1391d30ea906Sjfb8856606 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
1392d30ea906Sjfb8856606 uint64_t val;
1393d30ea906Sjfb8856606
1394d30ea906Sjfb8856606 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
1395d30ea906Sjfb8856606 val = *(uint32_t *)((uint8_t *)&ppio_stats +
1396d30ea906Sjfb8856606 mrvl_xstats_tbl[i].offset);
1397d30ea906Sjfb8856606 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
1398d30ea906Sjfb8856606 val = *(uint64_t *)((uint8_t *)&ppio_stats +
1399d30ea906Sjfb8856606 mrvl_xstats_tbl[i].offset);
1400d30ea906Sjfb8856606 else
1401d30ea906Sjfb8856606 return -EINVAL;
1402d30ea906Sjfb8856606
1403d30ea906Sjfb8856606 stats[i].id = i;
1404d30ea906Sjfb8856606 stats[i].value = val;
1405d30ea906Sjfb8856606 }
1406d30ea906Sjfb8856606
1407d30ea906Sjfb8856606 return n;
1408d30ea906Sjfb8856606 }
1409d30ea906Sjfb8856606
1410d30ea906Sjfb8856606 /**
1411d30ea906Sjfb8856606 * DPDK callback to reset extended statistics.
1412d30ea906Sjfb8856606 *
1413d30ea906Sjfb8856606 * @param dev
1414d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
14154418919fSjohnjiang *
14164418919fSjohnjiang * @return
14174418919fSjohnjiang * 0 on success, negative error value otherwise.
1418d30ea906Sjfb8856606 */
14194418919fSjohnjiang static int
mrvl_xstats_reset(struct rte_eth_dev * dev)1420d30ea906Sjfb8856606 mrvl_xstats_reset(struct rte_eth_dev *dev)
1421d30ea906Sjfb8856606 {
14224418919fSjohnjiang return mrvl_stats_reset(dev);
1423d30ea906Sjfb8856606 }
1424d30ea906Sjfb8856606
1425d30ea906Sjfb8856606 /**
1426d30ea906Sjfb8856606 * DPDK callback to get extended statistics names.
1427d30ea906Sjfb8856606 *
1428d30ea906Sjfb8856606 * @param dev (unused)
1429d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1430d30ea906Sjfb8856606 * @param xstats_names
1431d30ea906Sjfb8856606 * Pointer to xstats names table.
1432d30ea906Sjfb8856606 * @param size
1433d30ea906Sjfb8856606 * Size of the xstats names table.
1434d30ea906Sjfb8856606 * @return
1435d30ea906Sjfb8856606 * Number of read names.
1436d30ea906Sjfb8856606 */
1437d30ea906Sjfb8856606 static int
mrvl_xstats_get_names(struct rte_eth_dev * dev __rte_unused,struct rte_eth_xstat_name * xstats_names,unsigned int size)1438d30ea906Sjfb8856606 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1439d30ea906Sjfb8856606 struct rte_eth_xstat_name *xstats_names,
1440d30ea906Sjfb8856606 unsigned int size)
1441d30ea906Sjfb8856606 {
1442d30ea906Sjfb8856606 unsigned int i;
1443d30ea906Sjfb8856606
1444d30ea906Sjfb8856606 if (!xstats_names)
1445d30ea906Sjfb8856606 return RTE_DIM(mrvl_xstats_tbl);
1446d30ea906Sjfb8856606
1447d30ea906Sjfb8856606 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
14484418919fSjohnjiang strlcpy(xstats_names[i].name, mrvl_xstats_tbl[i].name,
14494418919fSjohnjiang RTE_ETH_XSTATS_NAME_SIZE);
1450d30ea906Sjfb8856606
1451d30ea906Sjfb8856606 return size;
1452d30ea906Sjfb8856606 }
1453d30ea906Sjfb8856606
1454d30ea906Sjfb8856606 /**
1455d30ea906Sjfb8856606 * DPDK callback to get information about the device.
1456d30ea906Sjfb8856606 *
1457d30ea906Sjfb8856606 * @param dev
1458d30ea906Sjfb8856606 * Pointer to Ethernet device structure (unused).
1459d30ea906Sjfb8856606 * @param info
1460d30ea906Sjfb8856606 * Info structure output buffer.
1461d30ea906Sjfb8856606 */
14624418919fSjohnjiang static int
mrvl_dev_infos_get(struct rte_eth_dev * dev __rte_unused,struct rte_eth_dev_info * info)1463d30ea906Sjfb8856606 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
1464d30ea906Sjfb8856606 struct rte_eth_dev_info *info)
1465d30ea906Sjfb8856606 {
1466d30ea906Sjfb8856606 info->speed_capa = ETH_LINK_SPEED_10M |
1467d30ea906Sjfb8856606 ETH_LINK_SPEED_100M |
1468d30ea906Sjfb8856606 ETH_LINK_SPEED_1G |
1469d30ea906Sjfb8856606 ETH_LINK_SPEED_10G;
1470d30ea906Sjfb8856606
1471d30ea906Sjfb8856606 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
1472d30ea906Sjfb8856606 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
1473d30ea906Sjfb8856606 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
1474d30ea906Sjfb8856606
1475d30ea906Sjfb8856606 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
1476d30ea906Sjfb8856606 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
1477d30ea906Sjfb8856606 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
1478d30ea906Sjfb8856606
1479d30ea906Sjfb8856606 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
1480d30ea906Sjfb8856606 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
1481d30ea906Sjfb8856606 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1482d30ea906Sjfb8856606
1483d30ea906Sjfb8856606 info->rx_offload_capa = MRVL_RX_OFFLOADS;
1484d30ea906Sjfb8856606 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
1485d30ea906Sjfb8856606
1486d30ea906Sjfb8856606 info->tx_offload_capa = MRVL_TX_OFFLOADS;
1487d30ea906Sjfb8856606 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
1488d30ea906Sjfb8856606
1489d30ea906Sjfb8856606 info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1490d30ea906Sjfb8856606 ETH_RSS_NONFRAG_IPV4_TCP |
1491d30ea906Sjfb8856606 ETH_RSS_NONFRAG_IPV4_UDP;
1492d30ea906Sjfb8856606
1493d30ea906Sjfb8856606 /* By default packets are dropped if no descriptors are available */
1494d30ea906Sjfb8856606 info->default_rxconf.rx_drop_en = 1;
1495d30ea906Sjfb8856606
1496d30ea906Sjfb8856606 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
14974418919fSjohnjiang
14984418919fSjohnjiang return 0;
1499d30ea906Sjfb8856606 }
1500d30ea906Sjfb8856606
1501d30ea906Sjfb8856606 /**
1502d30ea906Sjfb8856606 * Return supported packet types.
1503d30ea906Sjfb8856606 *
1504d30ea906Sjfb8856606 * @param dev
1505d30ea906Sjfb8856606 * Pointer to Ethernet device structure (unused).
1506d30ea906Sjfb8856606 *
1507d30ea906Sjfb8856606 * @return
1508d30ea906Sjfb8856606 * Const pointer to the table with supported packet types.
1509d30ea906Sjfb8856606 */
1510d30ea906Sjfb8856606 static const uint32_t *
mrvl_dev_supported_ptypes_get(struct rte_eth_dev * dev __rte_unused)1511d30ea906Sjfb8856606 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1512d30ea906Sjfb8856606 {
1513d30ea906Sjfb8856606 static const uint32_t ptypes[] = {
1514d30ea906Sjfb8856606 RTE_PTYPE_L2_ETHER,
1515d30ea906Sjfb8856606 RTE_PTYPE_L2_ETHER_VLAN,
1516d30ea906Sjfb8856606 RTE_PTYPE_L2_ETHER_QINQ,
1517d30ea906Sjfb8856606 RTE_PTYPE_L3_IPV4,
1518d30ea906Sjfb8856606 RTE_PTYPE_L3_IPV4_EXT,
1519d30ea906Sjfb8856606 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1520d30ea906Sjfb8856606 RTE_PTYPE_L3_IPV6,
1521d30ea906Sjfb8856606 RTE_PTYPE_L3_IPV6_EXT,
1522d30ea906Sjfb8856606 RTE_PTYPE_L2_ETHER_ARP,
1523d30ea906Sjfb8856606 RTE_PTYPE_L4_TCP,
1524d30ea906Sjfb8856606 RTE_PTYPE_L4_UDP
1525d30ea906Sjfb8856606 };
1526d30ea906Sjfb8856606
1527d30ea906Sjfb8856606 return ptypes;
1528d30ea906Sjfb8856606 }
1529d30ea906Sjfb8856606
1530d30ea906Sjfb8856606 /**
1531d30ea906Sjfb8856606 * DPDK callback to get information about specific receive queue.
1532d30ea906Sjfb8856606 *
1533d30ea906Sjfb8856606 * @param dev
1534d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1535d30ea906Sjfb8856606 * @param rx_queue_id
1536d30ea906Sjfb8856606 * Receive queue index.
1537d30ea906Sjfb8856606 * @param qinfo
1538d30ea906Sjfb8856606 * Receive queue information structure.
1539d30ea906Sjfb8856606 */
mrvl_rxq_info_get(struct rte_eth_dev * dev,uint16_t rx_queue_id,struct rte_eth_rxq_info * qinfo)1540d30ea906Sjfb8856606 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1541d30ea906Sjfb8856606 struct rte_eth_rxq_info *qinfo)
1542d30ea906Sjfb8856606 {
1543d30ea906Sjfb8856606 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1544d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1545d30ea906Sjfb8856606 int inq = priv->rxq_map[rx_queue_id].inq;
1546d30ea906Sjfb8856606 int tc = priv->rxq_map[rx_queue_id].tc;
1547d30ea906Sjfb8856606 struct pp2_ppio_tc_params *tc_params =
1548d30ea906Sjfb8856606 &priv->ppio_params.inqs_params.tcs_params[tc];
1549d30ea906Sjfb8856606
1550d30ea906Sjfb8856606 qinfo->mp = q->mp;
1551d30ea906Sjfb8856606 qinfo->nb_desc = tc_params->inqs_params[inq].size;
1552d30ea906Sjfb8856606 }
1553d30ea906Sjfb8856606
1554d30ea906Sjfb8856606 /**
1555d30ea906Sjfb8856606 * DPDK callback to get information about specific transmit queue.
1556d30ea906Sjfb8856606 *
1557d30ea906Sjfb8856606 * @param dev
1558d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1559d30ea906Sjfb8856606 * @param tx_queue_id
1560d30ea906Sjfb8856606 * Transmit queue index.
1561d30ea906Sjfb8856606 * @param qinfo
1562d30ea906Sjfb8856606 * Transmit queue information structure.
1563d30ea906Sjfb8856606 */
mrvl_txq_info_get(struct rte_eth_dev * dev,uint16_t tx_queue_id,struct rte_eth_txq_info * qinfo)1564d30ea906Sjfb8856606 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1565d30ea906Sjfb8856606 struct rte_eth_txq_info *qinfo)
1566d30ea906Sjfb8856606 {
1567d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1568d30ea906Sjfb8856606 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
1569d30ea906Sjfb8856606
1570d30ea906Sjfb8856606 qinfo->nb_desc =
1571d30ea906Sjfb8856606 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1572d30ea906Sjfb8856606 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1573d30ea906Sjfb8856606 }
1574d30ea906Sjfb8856606
1575d30ea906Sjfb8856606 /**
1576d30ea906Sjfb8856606 * DPDK callback to Configure a VLAN filter.
1577d30ea906Sjfb8856606 *
1578d30ea906Sjfb8856606 * @param dev
1579d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1580d30ea906Sjfb8856606 * @param vlan_id
1581d30ea906Sjfb8856606 * VLAN ID to filter.
1582d30ea906Sjfb8856606 * @param on
1583d30ea906Sjfb8856606 * Toggle filter.
1584d30ea906Sjfb8856606 *
1585d30ea906Sjfb8856606 * @return
1586d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1587d30ea906Sjfb8856606 */
1588d30ea906Sjfb8856606 static int
mrvl_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1589d30ea906Sjfb8856606 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1590d30ea906Sjfb8856606 {
1591d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1592d30ea906Sjfb8856606
1593d30ea906Sjfb8856606 if (!priv->ppio)
1594d30ea906Sjfb8856606 return -EPERM;
1595d30ea906Sjfb8856606
1596d30ea906Sjfb8856606 if (priv->isolated)
1597d30ea906Sjfb8856606 return -ENOTSUP;
1598d30ea906Sjfb8856606
1599d30ea906Sjfb8856606 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1600d30ea906Sjfb8856606 pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1601d30ea906Sjfb8856606 }
1602d30ea906Sjfb8856606
1603d30ea906Sjfb8856606 /**
1604d30ea906Sjfb8856606 * Release buffers to hardware bpool (buffer-pool)
1605d30ea906Sjfb8856606 *
1606d30ea906Sjfb8856606 * @param rxq
1607d30ea906Sjfb8856606 * Receive queue pointer.
1608d30ea906Sjfb8856606 * @param num
1609d30ea906Sjfb8856606 * Number of buffers to release to bpool.
1610d30ea906Sjfb8856606 *
1611d30ea906Sjfb8856606 * @return
1612d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1613d30ea906Sjfb8856606 */
1614d30ea906Sjfb8856606 static int
mrvl_fill_bpool(struct mrvl_rxq * rxq,int num)1615d30ea906Sjfb8856606 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1616d30ea906Sjfb8856606 {
1617d30ea906Sjfb8856606 struct buff_release_entry entries[MRVL_PP2_RXD_MAX];
1618d30ea906Sjfb8856606 struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX];
1619d30ea906Sjfb8856606 int i, ret;
1620d30ea906Sjfb8856606 unsigned int core_id;
1621d30ea906Sjfb8856606 struct pp2_hif *hif;
1622d30ea906Sjfb8856606 struct pp2_bpool *bpool;
1623d30ea906Sjfb8856606
1624d30ea906Sjfb8856606 core_id = rte_lcore_id();
1625d30ea906Sjfb8856606 if (core_id == LCORE_ID_ANY)
1626*2d9fd380Sjfb8856606 core_id = rte_get_main_lcore();
1627d30ea906Sjfb8856606
1628d30ea906Sjfb8856606 hif = mrvl_get_hif(rxq->priv, core_id);
1629d30ea906Sjfb8856606 if (!hif)
1630d30ea906Sjfb8856606 return -1;
1631d30ea906Sjfb8856606
1632d30ea906Sjfb8856606 bpool = rxq->priv->bpool;
1633d30ea906Sjfb8856606
1634d30ea906Sjfb8856606 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1635d30ea906Sjfb8856606 if (ret)
1636d30ea906Sjfb8856606 return ret;
1637d30ea906Sjfb8856606
1638d30ea906Sjfb8856606 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1639d30ea906Sjfb8856606 cookie_addr_high =
1640d30ea906Sjfb8856606 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1641d30ea906Sjfb8856606
1642d30ea906Sjfb8856606 for (i = 0; i < num; i++) {
1643d30ea906Sjfb8856606 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1644d30ea906Sjfb8856606 != cookie_addr_high) {
1645d30ea906Sjfb8856606 MRVL_LOG(ERR,
1646d30ea906Sjfb8856606 "mbuf virtual addr high 0x%lx out of range",
1647d30ea906Sjfb8856606 (uint64_t)mbufs[i] >> 32);
1648d30ea906Sjfb8856606 goto out;
1649d30ea906Sjfb8856606 }
1650d30ea906Sjfb8856606
1651d30ea906Sjfb8856606 entries[i].buff.addr =
1652d30ea906Sjfb8856606 rte_mbuf_data_iova_default(mbufs[i]);
1653d30ea906Sjfb8856606 entries[i].buff.cookie = (uint64_t)mbufs[i];
1654d30ea906Sjfb8856606 entries[i].bpool = bpool;
1655d30ea906Sjfb8856606 }
1656d30ea906Sjfb8856606
1657d30ea906Sjfb8856606 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1658d30ea906Sjfb8856606 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1659d30ea906Sjfb8856606
1660d30ea906Sjfb8856606 if (i != num)
1661d30ea906Sjfb8856606 goto out;
1662d30ea906Sjfb8856606
1663d30ea906Sjfb8856606 return 0;
1664d30ea906Sjfb8856606 out:
1665d30ea906Sjfb8856606 for (; i < num; i++)
1666d30ea906Sjfb8856606 rte_pktmbuf_free(mbufs[i]);
1667d30ea906Sjfb8856606
1668d30ea906Sjfb8856606 return -1;
1669d30ea906Sjfb8856606 }
1670d30ea906Sjfb8856606
1671d30ea906Sjfb8856606 /**
1672d30ea906Sjfb8856606 * DPDK callback to configure the receive queue.
1673d30ea906Sjfb8856606 *
1674d30ea906Sjfb8856606 * @param dev
1675d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1676d30ea906Sjfb8856606 * @param idx
1677d30ea906Sjfb8856606 * RX queue index.
1678d30ea906Sjfb8856606 * @param desc
1679d30ea906Sjfb8856606 * Number of descriptors to configure in queue.
1680d30ea906Sjfb8856606 * @param socket
1681d30ea906Sjfb8856606 * NUMA socket on which memory must be allocated.
1682d30ea906Sjfb8856606 * @param conf
1683d30ea906Sjfb8856606 * Thresholds parameters.
1684d30ea906Sjfb8856606 * @param mp
1685d30ea906Sjfb8856606 * Memory pool for buffer allocations.
1686d30ea906Sjfb8856606 *
1687d30ea906Sjfb8856606 * @return
1688d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1689d30ea906Sjfb8856606 */
1690d30ea906Sjfb8856606 static int
mrvl_rx_queue_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,unsigned int socket,const struct rte_eth_rxconf * conf,struct rte_mempool * mp)1691d30ea906Sjfb8856606 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1692d30ea906Sjfb8856606 unsigned int socket,
1693d30ea906Sjfb8856606 const struct rte_eth_rxconf *conf,
1694d30ea906Sjfb8856606 struct rte_mempool *mp)
1695d30ea906Sjfb8856606 {
1696d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1697d30ea906Sjfb8856606 struct mrvl_rxq *rxq;
1698d30ea906Sjfb8856606 uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
1699d30ea906Sjfb8856606 uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1700d30ea906Sjfb8856606 int ret, tc, inq;
1701d30ea906Sjfb8856606 uint64_t offloads;
1702d30ea906Sjfb8856606
1703d30ea906Sjfb8856606 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1704d30ea906Sjfb8856606
1705d30ea906Sjfb8856606 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
1706d30ea906Sjfb8856606 /*
1707d30ea906Sjfb8856606 * Unknown TC mapping, mapping will not have a correct queue.
1708d30ea906Sjfb8856606 */
1709d30ea906Sjfb8856606 MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
1710d30ea906Sjfb8856606 idx, priv->ppio_id);
1711d30ea906Sjfb8856606 return -EFAULT;
1712d30ea906Sjfb8856606 }
1713d30ea906Sjfb8856606
1714d30ea906Sjfb8856606 frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS;
1715d30ea906Sjfb8856606 if (frame_size < max_rx_pkt_len) {
1716d30ea906Sjfb8856606 MRVL_LOG(WARNING,
1717d30ea906Sjfb8856606 "Mbuf size must be increased to %u bytes to hold up "
1718d30ea906Sjfb8856606 "to %u bytes of data.",
1719d30ea906Sjfb8856606 buf_size + max_rx_pkt_len - frame_size,
1720d30ea906Sjfb8856606 max_rx_pkt_len);
1721d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1722d30ea906Sjfb8856606 MRVL_LOG(INFO, "Setting max rx pkt len to %u",
1723d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1724d30ea906Sjfb8856606 }
1725d30ea906Sjfb8856606
1726d30ea906Sjfb8856606 if (dev->data->rx_queues[idx]) {
1727d30ea906Sjfb8856606 rte_free(dev->data->rx_queues[idx]);
1728d30ea906Sjfb8856606 dev->data->rx_queues[idx] = NULL;
1729d30ea906Sjfb8856606 }
1730d30ea906Sjfb8856606
1731d30ea906Sjfb8856606 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
1732d30ea906Sjfb8856606 if (!rxq)
1733d30ea906Sjfb8856606 return -ENOMEM;
1734d30ea906Sjfb8856606
1735d30ea906Sjfb8856606 rxq->priv = priv;
1736d30ea906Sjfb8856606 rxq->mp = mp;
1737d30ea906Sjfb8856606 rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
1738d30ea906Sjfb8856606 rxq->queue_id = idx;
1739d30ea906Sjfb8856606 rxq->port_id = dev->data->port_id;
1740d30ea906Sjfb8856606 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
1741d30ea906Sjfb8856606
1742d30ea906Sjfb8856606 tc = priv->rxq_map[rxq->queue_id].tc,
1743d30ea906Sjfb8856606 inq = priv->rxq_map[rxq->queue_id].inq;
1744d30ea906Sjfb8856606 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
1745d30ea906Sjfb8856606 desc;
1746d30ea906Sjfb8856606
1747d30ea906Sjfb8856606 ret = mrvl_fill_bpool(rxq, desc);
1748d30ea906Sjfb8856606 if (ret) {
1749d30ea906Sjfb8856606 rte_free(rxq);
1750d30ea906Sjfb8856606 return ret;
1751d30ea906Sjfb8856606 }
1752d30ea906Sjfb8856606
1753d30ea906Sjfb8856606 priv->bpool_init_size += desc;
1754d30ea906Sjfb8856606
1755d30ea906Sjfb8856606 dev->data->rx_queues[idx] = rxq;
1756d30ea906Sjfb8856606
1757d30ea906Sjfb8856606 return 0;
1758d30ea906Sjfb8856606 }
1759d30ea906Sjfb8856606
1760d30ea906Sjfb8856606 /**
1761d30ea906Sjfb8856606 * DPDK callback to release the receive queue.
1762d30ea906Sjfb8856606 *
1763d30ea906Sjfb8856606 * @param rxq
1764d30ea906Sjfb8856606 * Generic receive queue pointer.
1765d30ea906Sjfb8856606 */
1766d30ea906Sjfb8856606 static void
mrvl_rx_queue_release(void * rxq)1767d30ea906Sjfb8856606 mrvl_rx_queue_release(void *rxq)
1768d30ea906Sjfb8856606 {
1769d30ea906Sjfb8856606 struct mrvl_rxq *q = rxq;
1770d30ea906Sjfb8856606 struct pp2_ppio_tc_params *tc_params;
1771d30ea906Sjfb8856606 int i, num, tc, inq;
1772d30ea906Sjfb8856606 struct pp2_hif *hif;
1773d30ea906Sjfb8856606 unsigned int core_id = rte_lcore_id();
1774d30ea906Sjfb8856606
1775d30ea906Sjfb8856606 if (core_id == LCORE_ID_ANY)
1776*2d9fd380Sjfb8856606 core_id = rte_get_main_lcore();
1777d30ea906Sjfb8856606
1778d30ea906Sjfb8856606 if (!q)
1779d30ea906Sjfb8856606 return;
1780d30ea906Sjfb8856606
1781d30ea906Sjfb8856606 hif = mrvl_get_hif(q->priv, core_id);
1782d30ea906Sjfb8856606
1783d30ea906Sjfb8856606 if (!hif)
1784d30ea906Sjfb8856606 return;
1785d30ea906Sjfb8856606
1786d30ea906Sjfb8856606 tc = q->priv->rxq_map[q->queue_id].tc;
1787d30ea906Sjfb8856606 inq = q->priv->rxq_map[q->queue_id].inq;
1788d30ea906Sjfb8856606 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
1789d30ea906Sjfb8856606 num = tc_params->inqs_params[inq].size;
1790d30ea906Sjfb8856606 for (i = 0; i < num; i++) {
1791d30ea906Sjfb8856606 struct pp2_buff_inf inf;
1792d30ea906Sjfb8856606 uint64_t addr;
1793d30ea906Sjfb8856606
1794d30ea906Sjfb8856606 pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
1795d30ea906Sjfb8856606 addr = cookie_addr_high | inf.cookie;
1796d30ea906Sjfb8856606 rte_pktmbuf_free((struct rte_mbuf *)addr);
1797d30ea906Sjfb8856606 }
1798d30ea906Sjfb8856606
1799d30ea906Sjfb8856606 rte_free(q);
1800d30ea906Sjfb8856606 }
1801d30ea906Sjfb8856606
1802d30ea906Sjfb8856606 /**
1803d30ea906Sjfb8856606 * DPDK callback to configure the transmit queue.
1804d30ea906Sjfb8856606 *
1805d30ea906Sjfb8856606 * @param dev
1806d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1807d30ea906Sjfb8856606 * @param idx
1808d30ea906Sjfb8856606 * Transmit queue index.
1809d30ea906Sjfb8856606 * @param desc
1810d30ea906Sjfb8856606 * Number of descriptors to configure in the queue.
1811d30ea906Sjfb8856606 * @param socket
1812d30ea906Sjfb8856606 * NUMA socket on which memory must be allocated.
1813d30ea906Sjfb8856606 * @param conf
1814d30ea906Sjfb8856606 * Tx queue configuration parameters.
1815d30ea906Sjfb8856606 *
1816d30ea906Sjfb8856606 * @return
1817d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1818d30ea906Sjfb8856606 */
1819d30ea906Sjfb8856606 static int
mrvl_tx_queue_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,unsigned int socket,const struct rte_eth_txconf * conf)1820d30ea906Sjfb8856606 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1821d30ea906Sjfb8856606 unsigned int socket,
1822d30ea906Sjfb8856606 const struct rte_eth_txconf *conf)
1823d30ea906Sjfb8856606 {
1824d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1825d30ea906Sjfb8856606 struct mrvl_txq *txq;
1826d30ea906Sjfb8856606
1827d30ea906Sjfb8856606 if (dev->data->tx_queues[idx]) {
1828d30ea906Sjfb8856606 rte_free(dev->data->tx_queues[idx]);
1829d30ea906Sjfb8856606 dev->data->tx_queues[idx] = NULL;
1830d30ea906Sjfb8856606 }
1831d30ea906Sjfb8856606
1832d30ea906Sjfb8856606 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
1833d30ea906Sjfb8856606 if (!txq)
1834d30ea906Sjfb8856606 return -ENOMEM;
1835d30ea906Sjfb8856606
1836d30ea906Sjfb8856606 txq->priv = priv;
1837d30ea906Sjfb8856606 txq->queue_id = idx;
1838d30ea906Sjfb8856606 txq->port_id = dev->data->port_id;
1839d30ea906Sjfb8856606 txq->tx_deferred_start = conf->tx_deferred_start;
1840d30ea906Sjfb8856606 dev->data->tx_queues[idx] = txq;
1841d30ea906Sjfb8856606
1842d30ea906Sjfb8856606 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
1843d30ea906Sjfb8856606
1844d30ea906Sjfb8856606 return 0;
1845d30ea906Sjfb8856606 }
1846d30ea906Sjfb8856606
1847d30ea906Sjfb8856606 /**
1848d30ea906Sjfb8856606 * DPDK callback to release the transmit queue.
1849d30ea906Sjfb8856606 *
1850d30ea906Sjfb8856606 * @param txq
1851d30ea906Sjfb8856606 * Generic transmit queue pointer.
1852d30ea906Sjfb8856606 */
1853d30ea906Sjfb8856606 static void
mrvl_tx_queue_release(void * txq)1854d30ea906Sjfb8856606 mrvl_tx_queue_release(void *txq)
1855d30ea906Sjfb8856606 {
1856d30ea906Sjfb8856606 struct mrvl_txq *q = txq;
1857d30ea906Sjfb8856606
1858d30ea906Sjfb8856606 if (!q)
1859d30ea906Sjfb8856606 return;
1860d30ea906Sjfb8856606
1861d30ea906Sjfb8856606 rte_free(q);
1862d30ea906Sjfb8856606 }
1863d30ea906Sjfb8856606
1864d30ea906Sjfb8856606 /**
1865d30ea906Sjfb8856606 * DPDK callback to get flow control configuration.
1866d30ea906Sjfb8856606 *
1867d30ea906Sjfb8856606 * @param dev
1868d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1869d30ea906Sjfb8856606 * @param fc_conf
1870d30ea906Sjfb8856606 * Pointer to the flow control configuration.
1871d30ea906Sjfb8856606 *
1872d30ea906Sjfb8856606 * @return
1873d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1874d30ea906Sjfb8856606 */
1875d30ea906Sjfb8856606 static int
mrvl_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)1876d30ea906Sjfb8856606 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1877d30ea906Sjfb8856606 {
1878d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1879d30ea906Sjfb8856606 int ret, en;
1880d30ea906Sjfb8856606
1881d30ea906Sjfb8856606 if (!priv)
1882d30ea906Sjfb8856606 return -EPERM;
1883d30ea906Sjfb8856606
1884d30ea906Sjfb8856606 ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
1885d30ea906Sjfb8856606 if (ret) {
1886d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to read rx pause state");
1887d30ea906Sjfb8856606 return ret;
1888d30ea906Sjfb8856606 }
1889d30ea906Sjfb8856606
1890d30ea906Sjfb8856606 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
1891d30ea906Sjfb8856606
1892d30ea906Sjfb8856606 return 0;
1893d30ea906Sjfb8856606 }
1894d30ea906Sjfb8856606
1895d30ea906Sjfb8856606 /**
1896d30ea906Sjfb8856606 * DPDK callback to set flow control configuration.
1897d30ea906Sjfb8856606 *
1898d30ea906Sjfb8856606 * @param dev
1899d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1900d30ea906Sjfb8856606 * @param fc_conf
1901d30ea906Sjfb8856606 * Pointer to the flow control configuration.
1902d30ea906Sjfb8856606 *
1903d30ea906Sjfb8856606 * @return
1904d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1905d30ea906Sjfb8856606 */
1906d30ea906Sjfb8856606 static int
mrvl_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)1907d30ea906Sjfb8856606 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1908d30ea906Sjfb8856606 {
1909d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1910d30ea906Sjfb8856606
1911d30ea906Sjfb8856606 if (!priv)
1912d30ea906Sjfb8856606 return -EPERM;
1913d30ea906Sjfb8856606
1914d30ea906Sjfb8856606 if (fc_conf->high_water ||
1915d30ea906Sjfb8856606 fc_conf->low_water ||
1916d30ea906Sjfb8856606 fc_conf->pause_time ||
1917d30ea906Sjfb8856606 fc_conf->mac_ctrl_frame_fwd ||
1918d30ea906Sjfb8856606 fc_conf->autoneg) {
1919d30ea906Sjfb8856606 MRVL_LOG(ERR, "Flowctrl parameter is not supported");
1920d30ea906Sjfb8856606
1921d30ea906Sjfb8856606 return -EINVAL;
1922d30ea906Sjfb8856606 }
1923d30ea906Sjfb8856606
1924d30ea906Sjfb8856606 if (fc_conf->mode == RTE_FC_NONE ||
1925d30ea906Sjfb8856606 fc_conf->mode == RTE_FC_RX_PAUSE) {
1926d30ea906Sjfb8856606 int ret, en;
1927d30ea906Sjfb8856606
1928d30ea906Sjfb8856606 en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
1929d30ea906Sjfb8856606 ret = pp2_ppio_set_rx_pause(priv->ppio, en);
1930d30ea906Sjfb8856606 if (ret)
1931d30ea906Sjfb8856606 MRVL_LOG(ERR,
1932d30ea906Sjfb8856606 "Failed to change flowctrl on RX side");
1933d30ea906Sjfb8856606
1934d30ea906Sjfb8856606 return ret;
1935d30ea906Sjfb8856606 }
1936d30ea906Sjfb8856606
1937d30ea906Sjfb8856606 return 0;
1938d30ea906Sjfb8856606 }
1939d30ea906Sjfb8856606
1940d30ea906Sjfb8856606 /**
1941d30ea906Sjfb8856606 * Update RSS hash configuration
1942d30ea906Sjfb8856606 *
1943d30ea906Sjfb8856606 * @param dev
1944d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1945d30ea906Sjfb8856606 * @param rss_conf
1946d30ea906Sjfb8856606 * Pointer to RSS configuration.
1947d30ea906Sjfb8856606 *
1948d30ea906Sjfb8856606 * @return
1949d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
1950d30ea906Sjfb8856606 */
1951d30ea906Sjfb8856606 static int
mrvl_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1952d30ea906Sjfb8856606 mrvl_rss_hash_update(struct rte_eth_dev *dev,
1953d30ea906Sjfb8856606 struct rte_eth_rss_conf *rss_conf)
1954d30ea906Sjfb8856606 {
1955d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1956d30ea906Sjfb8856606
1957d30ea906Sjfb8856606 if (priv->isolated)
1958d30ea906Sjfb8856606 return -ENOTSUP;
1959d30ea906Sjfb8856606
1960d30ea906Sjfb8856606 return mrvl_configure_rss(priv, rss_conf);
1961d30ea906Sjfb8856606 }
1962d30ea906Sjfb8856606
1963d30ea906Sjfb8856606 /**
1964d30ea906Sjfb8856606 * DPDK callback to get RSS hash configuration.
1965d30ea906Sjfb8856606 *
1966d30ea906Sjfb8856606 * @param dev
1967d30ea906Sjfb8856606 * Pointer to Ethernet device structure.
1968d30ea906Sjfb8856606 * @rss_conf
1969d30ea906Sjfb8856606 * Pointer to RSS configuration.
1970d30ea906Sjfb8856606 *
1971d30ea906Sjfb8856606 * @return
1972d30ea906Sjfb8856606 * Always 0.
1973d30ea906Sjfb8856606 */
1974d30ea906Sjfb8856606 static int
mrvl_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1975d30ea906Sjfb8856606 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
1976d30ea906Sjfb8856606 struct rte_eth_rss_conf *rss_conf)
1977d30ea906Sjfb8856606 {
1978d30ea906Sjfb8856606 struct mrvl_priv *priv = dev->data->dev_private;
1979d30ea906Sjfb8856606 enum pp2_ppio_hash_type hash_type =
1980d30ea906Sjfb8856606 priv->ppio_params.inqs_params.hash_type;
1981d30ea906Sjfb8856606
1982d30ea906Sjfb8856606 rss_conf->rss_key = NULL;
1983d30ea906Sjfb8856606
1984d30ea906Sjfb8856606 if (hash_type == PP2_PPIO_HASH_T_NONE)
1985d30ea906Sjfb8856606 rss_conf->rss_hf = 0;
1986d30ea906Sjfb8856606 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
1987d30ea906Sjfb8856606 rss_conf->rss_hf = ETH_RSS_IPV4;
1988d30ea906Sjfb8856606 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
1989d30ea906Sjfb8856606 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
1990d30ea906Sjfb8856606 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
1991d30ea906Sjfb8856606 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
1992d30ea906Sjfb8856606
1993d30ea906Sjfb8856606 return 0;
1994d30ea906Sjfb8856606 }
1995d30ea906Sjfb8856606
1996d30ea906Sjfb8856606 /**
1997d30ea906Sjfb8856606 * DPDK callback to get rte_flow callbacks.
1998d30ea906Sjfb8856606 *
1999d30ea906Sjfb8856606 * @param dev
2000d30ea906Sjfb8856606 * Pointer to the device structure.
2001d30ea906Sjfb8856606 * @param filer_type
2002d30ea906Sjfb8856606 * Flow filter type.
2003d30ea906Sjfb8856606 * @param filter_op
2004d30ea906Sjfb8856606 * Flow filter operation.
2005d30ea906Sjfb8856606 * @param arg
2006d30ea906Sjfb8856606 * Pointer to pass the flow ops.
2007d30ea906Sjfb8856606 *
2008d30ea906Sjfb8856606 * @return
2009d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
2010d30ea906Sjfb8856606 */
2011d30ea906Sjfb8856606 static int
mrvl_eth_filter_ctrl(struct rte_eth_dev * dev __rte_unused,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)2012d30ea906Sjfb8856606 mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
2013d30ea906Sjfb8856606 enum rte_filter_type filter_type,
2014d30ea906Sjfb8856606 enum rte_filter_op filter_op, void *arg)
2015d30ea906Sjfb8856606 {
2016d30ea906Sjfb8856606 switch (filter_type) {
2017d30ea906Sjfb8856606 case RTE_ETH_FILTER_GENERIC:
2018d30ea906Sjfb8856606 if (filter_op != RTE_ETH_FILTER_GET)
2019d30ea906Sjfb8856606 return -EINVAL;
2020d30ea906Sjfb8856606 *(const void **)arg = &mrvl_flow_ops;
2021d30ea906Sjfb8856606 return 0;
2022d30ea906Sjfb8856606 default:
2023d30ea906Sjfb8856606 MRVL_LOG(WARNING, "Filter type (%d) not supported",
2024d30ea906Sjfb8856606 filter_type);
2025d30ea906Sjfb8856606 return -EINVAL;
2026d30ea906Sjfb8856606 }
2027d30ea906Sjfb8856606 }
2028d30ea906Sjfb8856606
2029d30ea906Sjfb8856606 /**
2030d30ea906Sjfb8856606 * DPDK callback to get rte_mtr callbacks.
2031d30ea906Sjfb8856606 *
2032d30ea906Sjfb8856606 * @param dev
2033d30ea906Sjfb8856606 * Pointer to the device structure.
2034d30ea906Sjfb8856606 * @param ops
2035d30ea906Sjfb8856606 * Pointer to pass the mtr ops.
2036d30ea906Sjfb8856606 *
2037d30ea906Sjfb8856606 * @return
2038d30ea906Sjfb8856606 * Always 0.
2039d30ea906Sjfb8856606 */
2040d30ea906Sjfb8856606 static int
mrvl_mtr_ops_get(struct rte_eth_dev * dev __rte_unused,void * ops)2041d30ea906Sjfb8856606 mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2042d30ea906Sjfb8856606 {
2043d30ea906Sjfb8856606 *(const void **)ops = &mrvl_mtr_ops;
2044d30ea906Sjfb8856606
2045d30ea906Sjfb8856606 return 0;
2046d30ea906Sjfb8856606 }
2047d30ea906Sjfb8856606
2048d30ea906Sjfb8856606 /**
2049d30ea906Sjfb8856606 * DPDK callback to get rte_tm callbacks.
2050d30ea906Sjfb8856606 *
2051d30ea906Sjfb8856606 * @param dev
2052d30ea906Sjfb8856606 * Pointer to the device structure.
2053d30ea906Sjfb8856606 * @param ops
2054d30ea906Sjfb8856606 * Pointer to pass the tm ops.
2055d30ea906Sjfb8856606 *
2056d30ea906Sjfb8856606 * @return
2057d30ea906Sjfb8856606 * Always 0.
2058d30ea906Sjfb8856606 */
2059d30ea906Sjfb8856606 static int
mrvl_tm_ops_get(struct rte_eth_dev * dev __rte_unused,void * ops)2060d30ea906Sjfb8856606 mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2061d30ea906Sjfb8856606 {
2062d30ea906Sjfb8856606 *(const void **)ops = &mrvl_tm_ops;
2063d30ea906Sjfb8856606
2064d30ea906Sjfb8856606 return 0;
2065d30ea906Sjfb8856606 }
2066d30ea906Sjfb8856606
2067d30ea906Sjfb8856606 static const struct eth_dev_ops mrvl_ops = {
2068d30ea906Sjfb8856606 .dev_configure = mrvl_dev_configure,
2069d30ea906Sjfb8856606 .dev_start = mrvl_dev_start,
2070d30ea906Sjfb8856606 .dev_stop = mrvl_dev_stop,
2071d30ea906Sjfb8856606 .dev_set_link_up = mrvl_dev_set_link_up,
2072d30ea906Sjfb8856606 .dev_set_link_down = mrvl_dev_set_link_down,
2073d30ea906Sjfb8856606 .dev_close = mrvl_dev_close,
2074d30ea906Sjfb8856606 .link_update = mrvl_link_update,
2075d30ea906Sjfb8856606 .promiscuous_enable = mrvl_promiscuous_enable,
2076d30ea906Sjfb8856606 .allmulticast_enable = mrvl_allmulticast_enable,
2077d30ea906Sjfb8856606 .promiscuous_disable = mrvl_promiscuous_disable,
2078d30ea906Sjfb8856606 .allmulticast_disable = mrvl_allmulticast_disable,
2079d30ea906Sjfb8856606 .mac_addr_remove = mrvl_mac_addr_remove,
2080d30ea906Sjfb8856606 .mac_addr_add = mrvl_mac_addr_add,
2081d30ea906Sjfb8856606 .mac_addr_set = mrvl_mac_addr_set,
2082d30ea906Sjfb8856606 .mtu_set = mrvl_mtu_set,
2083d30ea906Sjfb8856606 .stats_get = mrvl_stats_get,
2084d30ea906Sjfb8856606 .stats_reset = mrvl_stats_reset,
2085d30ea906Sjfb8856606 .xstats_get = mrvl_xstats_get,
2086d30ea906Sjfb8856606 .xstats_reset = mrvl_xstats_reset,
2087d30ea906Sjfb8856606 .xstats_get_names = mrvl_xstats_get_names,
2088d30ea906Sjfb8856606 .dev_infos_get = mrvl_dev_infos_get,
2089d30ea906Sjfb8856606 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
2090d30ea906Sjfb8856606 .rxq_info_get = mrvl_rxq_info_get,
2091d30ea906Sjfb8856606 .txq_info_get = mrvl_txq_info_get,
2092d30ea906Sjfb8856606 .vlan_filter_set = mrvl_vlan_filter_set,
2093d30ea906Sjfb8856606 .tx_queue_start = mrvl_tx_queue_start,
2094d30ea906Sjfb8856606 .tx_queue_stop = mrvl_tx_queue_stop,
2095d30ea906Sjfb8856606 .rx_queue_setup = mrvl_rx_queue_setup,
2096d30ea906Sjfb8856606 .rx_queue_release = mrvl_rx_queue_release,
2097d30ea906Sjfb8856606 .tx_queue_setup = mrvl_tx_queue_setup,
2098d30ea906Sjfb8856606 .tx_queue_release = mrvl_tx_queue_release,
2099d30ea906Sjfb8856606 .flow_ctrl_get = mrvl_flow_ctrl_get,
2100d30ea906Sjfb8856606 .flow_ctrl_set = mrvl_flow_ctrl_set,
2101d30ea906Sjfb8856606 .rss_hash_update = mrvl_rss_hash_update,
2102d30ea906Sjfb8856606 .rss_hash_conf_get = mrvl_rss_hash_conf_get,
2103d30ea906Sjfb8856606 .filter_ctrl = mrvl_eth_filter_ctrl,
2104d30ea906Sjfb8856606 .mtr_ops_get = mrvl_mtr_ops_get,
2105d30ea906Sjfb8856606 .tm_ops_get = mrvl_tm_ops_get,
2106d30ea906Sjfb8856606 };
2107d30ea906Sjfb8856606
2108d30ea906Sjfb8856606 /**
2109d30ea906Sjfb8856606 * Return packet type information and l3/l4 offsets.
2110d30ea906Sjfb8856606 *
2111d30ea906Sjfb8856606 * @param desc
2112d30ea906Sjfb8856606 * Pointer to the received packet descriptor.
2113d30ea906Sjfb8856606 * @param l3_offset
2114d30ea906Sjfb8856606 * l3 packet offset.
2115d30ea906Sjfb8856606 * @param l4_offset
2116d30ea906Sjfb8856606 * l4 packet offset.
2117d30ea906Sjfb8856606 *
2118d30ea906Sjfb8856606 * @return
2119d30ea906Sjfb8856606 * Packet type information.
2120d30ea906Sjfb8856606 */
2121d30ea906Sjfb8856606 static inline uint64_t
mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc * desc,uint8_t * l3_offset,uint8_t * l4_offset)2122d30ea906Sjfb8856606 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
2123d30ea906Sjfb8856606 uint8_t *l3_offset, uint8_t *l4_offset)
2124d30ea906Sjfb8856606 {
2125d30ea906Sjfb8856606 enum pp2_inq_l3_type l3_type;
2126d30ea906Sjfb8856606 enum pp2_inq_l4_type l4_type;
2127d30ea906Sjfb8856606 enum pp2_inq_vlan_tag vlan_tag;
2128d30ea906Sjfb8856606 uint64_t packet_type;
2129d30ea906Sjfb8856606
2130d30ea906Sjfb8856606 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
2131d30ea906Sjfb8856606 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
2132d30ea906Sjfb8856606 pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag);
2133d30ea906Sjfb8856606
2134d30ea906Sjfb8856606 packet_type = RTE_PTYPE_L2_ETHER;
2135d30ea906Sjfb8856606
2136d30ea906Sjfb8856606 switch (vlan_tag) {
2137d30ea906Sjfb8856606 case PP2_INQ_VLAN_TAG_SINGLE:
2138d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
2139d30ea906Sjfb8856606 break;
2140d30ea906Sjfb8856606 case PP2_INQ_VLAN_TAG_DOUBLE:
2141d30ea906Sjfb8856606 case PP2_INQ_VLAN_TAG_TRIPLE:
2142d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L2_ETHER_QINQ;
2143d30ea906Sjfb8856606 break;
2144d30ea906Sjfb8856606 default:
2145d30ea906Sjfb8856606 break;
2146d30ea906Sjfb8856606 }
2147d30ea906Sjfb8856606
2148d30ea906Sjfb8856606 switch (l3_type) {
2149d30ea906Sjfb8856606 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
2150d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L3_IPV4;
2151d30ea906Sjfb8856606 break;
2152d30ea906Sjfb8856606 case PP2_INQ_L3_TYPE_IPV4_OK:
2153d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
2154d30ea906Sjfb8856606 break;
2155d30ea906Sjfb8856606 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
2156d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
2157d30ea906Sjfb8856606 break;
2158d30ea906Sjfb8856606 case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
2159d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L3_IPV6;
2160d30ea906Sjfb8856606 break;
2161d30ea906Sjfb8856606 case PP2_INQ_L3_TYPE_IPV6_EXT:
2162d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
2163d30ea906Sjfb8856606 break;
2164d30ea906Sjfb8856606 case PP2_INQ_L3_TYPE_ARP:
2165d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L2_ETHER_ARP;
2166d30ea906Sjfb8856606 /*
2167d30ea906Sjfb8856606 * In case of ARP l4_offset is set to wrong value.
2168d30ea906Sjfb8856606 * Set it to proper one so that later on mbuf->l3_len can be
2169d30ea906Sjfb8856606 * calculated subtracting l4_offset and l3_offset.
2170d30ea906Sjfb8856606 */
2171d30ea906Sjfb8856606 *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
2172d30ea906Sjfb8856606 break;
2173d30ea906Sjfb8856606 default:
2174d30ea906Sjfb8856606 MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
2175d30ea906Sjfb8856606 break;
2176d30ea906Sjfb8856606 }
2177d30ea906Sjfb8856606
2178d30ea906Sjfb8856606 switch (l4_type) {
2179d30ea906Sjfb8856606 case PP2_INQ_L4_TYPE_TCP:
2180d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L4_TCP;
2181d30ea906Sjfb8856606 break;
2182d30ea906Sjfb8856606 case PP2_INQ_L4_TYPE_UDP:
2183d30ea906Sjfb8856606 packet_type |= RTE_PTYPE_L4_UDP;
2184d30ea906Sjfb8856606 break;
2185d30ea906Sjfb8856606 default:
2186d30ea906Sjfb8856606 MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
2187d30ea906Sjfb8856606 break;
2188d30ea906Sjfb8856606 }
2189d30ea906Sjfb8856606
2190d30ea906Sjfb8856606 return packet_type;
2191d30ea906Sjfb8856606 }
2192d30ea906Sjfb8856606
2193d30ea906Sjfb8856606 /**
2194d30ea906Sjfb8856606 * Get offload information from the received packet descriptor.
2195d30ea906Sjfb8856606 *
2196d30ea906Sjfb8856606 * @param desc
2197d30ea906Sjfb8856606 * Pointer to the received packet descriptor.
2198d30ea906Sjfb8856606 *
2199d30ea906Sjfb8856606 * @return
2200d30ea906Sjfb8856606 * Mbuf offload flags.
2201d30ea906Sjfb8856606 */
2202d30ea906Sjfb8856606 static inline uint64_t
mrvl_desc_to_ol_flags(struct pp2_ppio_desc * desc)2203d30ea906Sjfb8856606 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
2204d30ea906Sjfb8856606 {
2205d30ea906Sjfb8856606 uint64_t flags;
2206d30ea906Sjfb8856606 enum pp2_inq_desc_status status;
2207d30ea906Sjfb8856606
2208d30ea906Sjfb8856606 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
2209d30ea906Sjfb8856606 if (unlikely(status != PP2_DESC_ERR_OK))
2210d30ea906Sjfb8856606 flags = PKT_RX_IP_CKSUM_BAD;
2211d30ea906Sjfb8856606 else
2212d30ea906Sjfb8856606 flags = PKT_RX_IP_CKSUM_GOOD;
2213d30ea906Sjfb8856606
2214d30ea906Sjfb8856606 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
2215d30ea906Sjfb8856606 if (unlikely(status != PP2_DESC_ERR_OK))
2216d30ea906Sjfb8856606 flags |= PKT_RX_L4_CKSUM_BAD;
2217d30ea906Sjfb8856606 else
2218d30ea906Sjfb8856606 flags |= PKT_RX_L4_CKSUM_GOOD;
2219d30ea906Sjfb8856606
2220d30ea906Sjfb8856606 return flags;
2221d30ea906Sjfb8856606 }
2222d30ea906Sjfb8856606
2223d30ea906Sjfb8856606 /**
2224d30ea906Sjfb8856606 * DPDK callback for receive.
2225d30ea906Sjfb8856606 *
2226d30ea906Sjfb8856606 * @param rxq
2227d30ea906Sjfb8856606 * Generic pointer to the receive queue.
2228d30ea906Sjfb8856606 * @param rx_pkts
2229d30ea906Sjfb8856606 * Array to store received packets.
2230d30ea906Sjfb8856606 * @param nb_pkts
2231d30ea906Sjfb8856606 * Maximum number of packets in array.
2232d30ea906Sjfb8856606 *
2233d30ea906Sjfb8856606 * @return
2234d30ea906Sjfb8856606 * Number of packets successfully received.
2235d30ea906Sjfb8856606 */
2236d30ea906Sjfb8856606 static uint16_t
mrvl_rx_pkt_burst(void * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)2237d30ea906Sjfb8856606 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2238d30ea906Sjfb8856606 {
2239d30ea906Sjfb8856606 struct mrvl_rxq *q = rxq;
2240d30ea906Sjfb8856606 struct pp2_ppio_desc descs[nb_pkts];
2241d30ea906Sjfb8856606 struct pp2_bpool *bpool;
2242d30ea906Sjfb8856606 int i, ret, rx_done = 0;
2243d30ea906Sjfb8856606 int num;
2244d30ea906Sjfb8856606 struct pp2_hif *hif;
2245d30ea906Sjfb8856606 unsigned int core_id = rte_lcore_id();
2246d30ea906Sjfb8856606
2247d30ea906Sjfb8856606 hif = mrvl_get_hif(q->priv, core_id);
2248d30ea906Sjfb8856606
2249d30ea906Sjfb8856606 if (unlikely(!q->priv->ppio || !hif))
2250d30ea906Sjfb8856606 return 0;
2251d30ea906Sjfb8856606
2252d30ea906Sjfb8856606 bpool = q->priv->bpool;
2253d30ea906Sjfb8856606
2254d30ea906Sjfb8856606 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
2255d30ea906Sjfb8856606 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
2256d30ea906Sjfb8856606 if (unlikely(ret < 0)) {
2257d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to receive packets");
2258d30ea906Sjfb8856606 return 0;
2259d30ea906Sjfb8856606 }
2260d30ea906Sjfb8856606 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
2261d30ea906Sjfb8856606
2262d30ea906Sjfb8856606 for (i = 0; i < nb_pkts; i++) {
2263d30ea906Sjfb8856606 struct rte_mbuf *mbuf;
2264d30ea906Sjfb8856606 uint8_t l3_offset, l4_offset;
2265d30ea906Sjfb8856606 enum pp2_inq_desc_status status;
2266d30ea906Sjfb8856606 uint64_t addr;
2267d30ea906Sjfb8856606
2268d30ea906Sjfb8856606 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2269d30ea906Sjfb8856606 struct pp2_ppio_desc *pref_desc;
2270d30ea906Sjfb8856606 u64 pref_addr;
2271d30ea906Sjfb8856606
2272d30ea906Sjfb8856606 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
2273d30ea906Sjfb8856606 pref_addr = cookie_addr_high |
2274d30ea906Sjfb8856606 pp2_ppio_inq_desc_get_cookie(pref_desc);
2275d30ea906Sjfb8856606 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
2276d30ea906Sjfb8856606 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
2277d30ea906Sjfb8856606 }
2278d30ea906Sjfb8856606
2279d30ea906Sjfb8856606 addr = cookie_addr_high |
2280d30ea906Sjfb8856606 pp2_ppio_inq_desc_get_cookie(&descs[i]);
2281d30ea906Sjfb8856606 mbuf = (struct rte_mbuf *)addr;
2282d30ea906Sjfb8856606 rte_pktmbuf_reset(mbuf);
2283d30ea906Sjfb8856606
2284d30ea906Sjfb8856606 /* drop packet in case of mac, overrun or resource error */
2285d30ea906Sjfb8856606 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
2286d30ea906Sjfb8856606 if (unlikely(status != PP2_DESC_ERR_OK)) {
2287d30ea906Sjfb8856606 struct pp2_buff_inf binf = {
2288d30ea906Sjfb8856606 .addr = rte_mbuf_data_iova_default(mbuf),
2289d30ea906Sjfb8856606 .cookie = (uint64_t)mbuf,
2290d30ea906Sjfb8856606 };
2291d30ea906Sjfb8856606
2292d30ea906Sjfb8856606 pp2_bpool_put_buff(hif, bpool, &binf);
2293d30ea906Sjfb8856606 mrvl_port_bpool_size
2294d30ea906Sjfb8856606 [bpool->pp2_id][bpool->id][core_id]++;
2295d30ea906Sjfb8856606 q->drop_mac++;
2296d30ea906Sjfb8856606 continue;
2297d30ea906Sjfb8856606 }
2298d30ea906Sjfb8856606
2299d30ea906Sjfb8856606 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
2300d30ea906Sjfb8856606 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
2301d30ea906Sjfb8856606 mbuf->data_len = mbuf->pkt_len;
2302d30ea906Sjfb8856606 mbuf->port = q->port_id;
2303d30ea906Sjfb8856606 mbuf->packet_type =
2304d30ea906Sjfb8856606 mrvl_desc_to_packet_type_and_offset(&descs[i],
2305d30ea906Sjfb8856606 &l3_offset,
2306d30ea906Sjfb8856606 &l4_offset);
2307d30ea906Sjfb8856606 mbuf->l2_len = l3_offset;
2308d30ea906Sjfb8856606 mbuf->l3_len = l4_offset - l3_offset;
2309d30ea906Sjfb8856606
2310d30ea906Sjfb8856606 if (likely(q->cksum_enabled))
2311d30ea906Sjfb8856606 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
2312d30ea906Sjfb8856606
2313d30ea906Sjfb8856606 rx_pkts[rx_done++] = mbuf;
2314d30ea906Sjfb8856606 q->bytes_recv += mbuf->pkt_len;
2315d30ea906Sjfb8856606 }
2316d30ea906Sjfb8856606
2317d30ea906Sjfb8856606 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
2318d30ea906Sjfb8856606 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
2319d30ea906Sjfb8856606
2320d30ea906Sjfb8856606 if (unlikely(num <= q->priv->bpool_min_size ||
2321d30ea906Sjfb8856606 (!rx_done && num < q->priv->bpool_init_size))) {
2322d30ea906Sjfb8856606 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
2323d30ea906Sjfb8856606 if (ret)
2324d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to fill bpool");
2325d30ea906Sjfb8856606 } else if (unlikely(num > q->priv->bpool_max_size)) {
2326d30ea906Sjfb8856606 int i;
2327d30ea906Sjfb8856606 int pkt_to_remove = num - q->priv->bpool_init_size;
2328d30ea906Sjfb8856606 struct rte_mbuf *mbuf;
2329d30ea906Sjfb8856606 struct pp2_buff_inf buff;
2330d30ea906Sjfb8856606
2331d30ea906Sjfb8856606 MRVL_LOG(DEBUG,
2332d30ea906Sjfb8856606 "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
2333d30ea906Sjfb8856606 bpool->pp2_id, q->priv->ppio->port_id,
2334d30ea906Sjfb8856606 bpool->id, pkt_to_remove, num,
2335d30ea906Sjfb8856606 q->priv->bpool_init_size);
2336d30ea906Sjfb8856606
2337d30ea906Sjfb8856606 for (i = 0; i < pkt_to_remove; i++) {
2338d30ea906Sjfb8856606 ret = pp2_bpool_get_buff(hif, bpool, &buff);
2339d30ea906Sjfb8856606 if (ret)
2340d30ea906Sjfb8856606 break;
2341d30ea906Sjfb8856606 mbuf = (struct rte_mbuf *)
2342d30ea906Sjfb8856606 (cookie_addr_high | buff.cookie);
2343d30ea906Sjfb8856606 rte_pktmbuf_free(mbuf);
2344d30ea906Sjfb8856606 }
2345d30ea906Sjfb8856606 mrvl_port_bpool_size
2346d30ea906Sjfb8856606 [bpool->pp2_id][bpool->id][core_id] -= i;
2347d30ea906Sjfb8856606 }
2348d30ea906Sjfb8856606 rte_spinlock_unlock(&q->priv->lock);
2349d30ea906Sjfb8856606 }
2350d30ea906Sjfb8856606
2351d30ea906Sjfb8856606 return rx_done;
2352d30ea906Sjfb8856606 }
2353d30ea906Sjfb8856606
2354d30ea906Sjfb8856606 /**
2355d30ea906Sjfb8856606 * Prepare offload information.
2356d30ea906Sjfb8856606 *
2357d30ea906Sjfb8856606 * @param ol_flags
2358d30ea906Sjfb8856606 * Offload flags.
2359d30ea906Sjfb8856606 * @param packet_type
2360d30ea906Sjfb8856606 * Packet type bitfield.
2361d30ea906Sjfb8856606 * @param l3_type
2362d30ea906Sjfb8856606 * Pointer to the pp2_ouq_l3_type structure.
2363d30ea906Sjfb8856606 * @param l4_type
2364d30ea906Sjfb8856606 * Pointer to the pp2_outq_l4_type structure.
2365d30ea906Sjfb8856606 * @param gen_l3_cksum
2366d30ea906Sjfb8856606 * Will be set to 1 in case l3 checksum is computed.
2367d30ea906Sjfb8856606 * @param l4_cksum
2368d30ea906Sjfb8856606 * Will be set to 1 in case l4 checksum is computed.
2369d30ea906Sjfb8856606 *
2370d30ea906Sjfb8856606 * @return
2371d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
2372d30ea906Sjfb8856606 */
2373d30ea906Sjfb8856606 static inline int
mrvl_prepare_proto_info(uint64_t ol_flags,uint32_t packet_type,enum pp2_outq_l3_type * l3_type,enum pp2_outq_l4_type * l4_type,int * gen_l3_cksum,int * gen_l4_cksum)2374d30ea906Sjfb8856606 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
2375d30ea906Sjfb8856606 enum pp2_outq_l3_type *l3_type,
2376d30ea906Sjfb8856606 enum pp2_outq_l4_type *l4_type,
2377d30ea906Sjfb8856606 int *gen_l3_cksum,
2378d30ea906Sjfb8856606 int *gen_l4_cksum)
2379d30ea906Sjfb8856606 {
2380d30ea906Sjfb8856606 /*
2381d30ea906Sjfb8856606 * Based on ol_flags prepare information
2382d30ea906Sjfb8856606 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
2383d30ea906Sjfb8856606 * for offloading.
2384d30ea906Sjfb8856606 */
2385d30ea906Sjfb8856606 if (ol_flags & PKT_TX_IPV4) {
2386d30ea906Sjfb8856606 *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
2387d30ea906Sjfb8856606 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
2388d30ea906Sjfb8856606 } else if (ol_flags & PKT_TX_IPV6) {
2389d30ea906Sjfb8856606 *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
2390d30ea906Sjfb8856606 /* no checksum for ipv6 header */
2391d30ea906Sjfb8856606 *gen_l3_cksum = 0;
2392d30ea906Sjfb8856606 } else {
2393d30ea906Sjfb8856606 /* if something different then stop processing */
2394d30ea906Sjfb8856606 return -1;
2395d30ea906Sjfb8856606 }
2396d30ea906Sjfb8856606
2397d30ea906Sjfb8856606 ol_flags &= PKT_TX_L4_MASK;
2398d30ea906Sjfb8856606 if ((packet_type & RTE_PTYPE_L4_TCP) &&
2399d30ea906Sjfb8856606 ol_flags == PKT_TX_TCP_CKSUM) {
2400d30ea906Sjfb8856606 *l4_type = PP2_OUTQ_L4_TYPE_TCP;
2401d30ea906Sjfb8856606 *gen_l4_cksum = 1;
2402d30ea906Sjfb8856606 } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
2403d30ea906Sjfb8856606 ol_flags == PKT_TX_UDP_CKSUM) {
2404d30ea906Sjfb8856606 *l4_type = PP2_OUTQ_L4_TYPE_UDP;
2405d30ea906Sjfb8856606 *gen_l4_cksum = 1;
2406d30ea906Sjfb8856606 } else {
2407d30ea906Sjfb8856606 *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
2408d30ea906Sjfb8856606 /* no checksum for other type */
2409d30ea906Sjfb8856606 *gen_l4_cksum = 0;
2410d30ea906Sjfb8856606 }
2411d30ea906Sjfb8856606
2412d30ea906Sjfb8856606 return 0;
2413d30ea906Sjfb8856606 }
2414d30ea906Sjfb8856606
2415d30ea906Sjfb8856606 /**
2416d30ea906Sjfb8856606 * Release already sent buffers to bpool (buffer-pool).
2417d30ea906Sjfb8856606 *
2418d30ea906Sjfb8856606 * @param ppio
2419d30ea906Sjfb8856606 * Pointer to the port structure.
2420d30ea906Sjfb8856606 * @param hif
2421d30ea906Sjfb8856606 * Pointer to the MUSDK hardware interface.
2422d30ea906Sjfb8856606 * @param sq
2423d30ea906Sjfb8856606 * Pointer to the shadow queue.
2424d30ea906Sjfb8856606 * @param qid
2425d30ea906Sjfb8856606 * Queue id number.
2426d30ea906Sjfb8856606 * @param force
2427d30ea906Sjfb8856606 * Force releasing packets.
2428d30ea906Sjfb8856606 */
2429d30ea906Sjfb8856606 static inline void
mrvl_free_sent_buffers(struct pp2_ppio * ppio,struct pp2_hif * hif,unsigned int core_id,struct mrvl_shadow_txq * sq,int qid,int force)2430d30ea906Sjfb8856606 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
2431d30ea906Sjfb8856606 unsigned int core_id, struct mrvl_shadow_txq *sq,
2432d30ea906Sjfb8856606 int qid, int force)
2433d30ea906Sjfb8856606 {
2434d30ea906Sjfb8856606 struct buff_release_entry *entry;
2435d30ea906Sjfb8856606 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
2436d30ea906Sjfb8856606 int i;
2437d30ea906Sjfb8856606
2438d30ea906Sjfb8856606 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
2439d30ea906Sjfb8856606
2440d30ea906Sjfb8856606 sq->num_to_release += nb_done;
2441d30ea906Sjfb8856606
2442d30ea906Sjfb8856606 if (likely(!force &&
2443d30ea906Sjfb8856606 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
2444d30ea906Sjfb8856606 return;
2445d30ea906Sjfb8856606
2446d30ea906Sjfb8856606 nb_done = sq->num_to_release;
2447d30ea906Sjfb8856606 sq->num_to_release = 0;
2448d30ea906Sjfb8856606
2449d30ea906Sjfb8856606 for (i = 0; i < nb_done; i++) {
2450d30ea906Sjfb8856606 entry = &sq->ent[sq->tail + num];
2451d30ea906Sjfb8856606 if (unlikely(!entry->buff.addr)) {
2452d30ea906Sjfb8856606 MRVL_LOG(ERR,
2453d30ea906Sjfb8856606 "Shadow memory @%d: cookie(%lx), pa(%lx)!",
2454d30ea906Sjfb8856606 sq->tail, (u64)entry->buff.cookie,
2455d30ea906Sjfb8856606 (u64)entry->buff.addr);
2456d30ea906Sjfb8856606 skip_bufs = 1;
2457d30ea906Sjfb8856606 goto skip;
2458d30ea906Sjfb8856606 }
2459d30ea906Sjfb8856606
2460d30ea906Sjfb8856606 if (unlikely(!entry->bpool)) {
2461d30ea906Sjfb8856606 struct rte_mbuf *mbuf;
2462d30ea906Sjfb8856606
2463d30ea906Sjfb8856606 mbuf = (struct rte_mbuf *)
2464d30ea906Sjfb8856606 (cookie_addr_high | entry->buff.cookie);
2465d30ea906Sjfb8856606 rte_pktmbuf_free(mbuf);
2466d30ea906Sjfb8856606 skip_bufs = 1;
2467d30ea906Sjfb8856606 goto skip;
2468d30ea906Sjfb8856606 }
2469d30ea906Sjfb8856606
2470d30ea906Sjfb8856606 mrvl_port_bpool_size
2471d30ea906Sjfb8856606 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
2472d30ea906Sjfb8856606 num++;
2473d30ea906Sjfb8856606 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
2474d30ea906Sjfb8856606 goto skip;
2475d30ea906Sjfb8856606 continue;
2476d30ea906Sjfb8856606 skip:
2477d30ea906Sjfb8856606 if (likely(num))
2478d30ea906Sjfb8856606 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2479d30ea906Sjfb8856606 num += skip_bufs;
2480d30ea906Sjfb8856606 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2481d30ea906Sjfb8856606 sq->size -= num;
2482d30ea906Sjfb8856606 num = 0;
2483d30ea906Sjfb8856606 skip_bufs = 0;
2484d30ea906Sjfb8856606 }
2485d30ea906Sjfb8856606
2486d30ea906Sjfb8856606 if (likely(num)) {
2487d30ea906Sjfb8856606 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2488d30ea906Sjfb8856606 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2489d30ea906Sjfb8856606 sq->size -= num;
2490d30ea906Sjfb8856606 }
2491d30ea906Sjfb8856606 }
2492d30ea906Sjfb8856606
2493d30ea906Sjfb8856606 /**
2494d30ea906Sjfb8856606 * DPDK callback for transmit.
2495d30ea906Sjfb8856606 *
2496d30ea906Sjfb8856606 * @param txq
2497d30ea906Sjfb8856606 * Generic pointer transmit queue.
2498d30ea906Sjfb8856606 * @param tx_pkts
2499d30ea906Sjfb8856606 * Packets to transmit.
2500d30ea906Sjfb8856606 * @param nb_pkts
2501d30ea906Sjfb8856606 * Number of packets in array.
2502d30ea906Sjfb8856606 *
2503d30ea906Sjfb8856606 * @return
2504d30ea906Sjfb8856606 * Number of packets successfully transmitted.
2505d30ea906Sjfb8856606 */
2506d30ea906Sjfb8856606 static uint16_t
mrvl_tx_pkt_burst(void * txq,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2507d30ea906Sjfb8856606 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2508d30ea906Sjfb8856606 {
2509d30ea906Sjfb8856606 struct mrvl_txq *q = txq;
2510d30ea906Sjfb8856606 struct mrvl_shadow_txq *sq;
2511d30ea906Sjfb8856606 struct pp2_hif *hif;
2512d30ea906Sjfb8856606 struct pp2_ppio_desc descs[nb_pkts];
2513d30ea906Sjfb8856606 unsigned int core_id = rte_lcore_id();
2514d30ea906Sjfb8856606 int i, ret, bytes_sent = 0;
2515d30ea906Sjfb8856606 uint16_t num, sq_free_size;
2516d30ea906Sjfb8856606 uint64_t addr;
2517d30ea906Sjfb8856606
2518d30ea906Sjfb8856606 hif = mrvl_get_hif(q->priv, core_id);
2519d30ea906Sjfb8856606 sq = &q->shadow_txqs[core_id];
2520d30ea906Sjfb8856606
2521d30ea906Sjfb8856606 if (unlikely(!q->priv->ppio || !hif))
2522d30ea906Sjfb8856606 return 0;
2523d30ea906Sjfb8856606
2524d30ea906Sjfb8856606 if (sq->size)
2525d30ea906Sjfb8856606 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2526d30ea906Sjfb8856606 sq, q->queue_id, 0);
2527d30ea906Sjfb8856606
2528d30ea906Sjfb8856606 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2529d30ea906Sjfb8856606 if (unlikely(nb_pkts > sq_free_size)) {
2530d30ea906Sjfb8856606 MRVL_LOG(DEBUG,
2531d30ea906Sjfb8856606 "No room in shadow queue for %d packets! %d packets will be sent.",
2532d30ea906Sjfb8856606 nb_pkts, sq_free_size);
2533d30ea906Sjfb8856606 nb_pkts = sq_free_size;
2534d30ea906Sjfb8856606 }
2535d30ea906Sjfb8856606
2536d30ea906Sjfb8856606 for (i = 0; i < nb_pkts; i++) {
2537d30ea906Sjfb8856606 struct rte_mbuf *mbuf = tx_pkts[i];
2538d30ea906Sjfb8856606 int gen_l3_cksum, gen_l4_cksum;
2539d30ea906Sjfb8856606 enum pp2_outq_l3_type l3_type;
2540d30ea906Sjfb8856606 enum pp2_outq_l4_type l4_type;
2541d30ea906Sjfb8856606
2542d30ea906Sjfb8856606 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2543d30ea906Sjfb8856606 struct rte_mbuf *pref_pkt_hdr;
2544d30ea906Sjfb8856606
2545d30ea906Sjfb8856606 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2546d30ea906Sjfb8856606 rte_mbuf_prefetch_part1(pref_pkt_hdr);
2547d30ea906Sjfb8856606 rte_mbuf_prefetch_part2(pref_pkt_hdr);
2548d30ea906Sjfb8856606 }
2549d30ea906Sjfb8856606
2550d30ea906Sjfb8856606 mrvl_fill_shadowq(sq, mbuf);
2551d30ea906Sjfb8856606 mrvl_fill_desc(&descs[i], mbuf);
2552d30ea906Sjfb8856606
2553d30ea906Sjfb8856606 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2554d30ea906Sjfb8856606 /*
2555d30ea906Sjfb8856606 * in case unsupported ol_flags were passed
2556d30ea906Sjfb8856606 * do not update descriptor offload information
2557d30ea906Sjfb8856606 */
2558d30ea906Sjfb8856606 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
2559d30ea906Sjfb8856606 &l3_type, &l4_type, &gen_l3_cksum,
2560d30ea906Sjfb8856606 &gen_l4_cksum);
2561d30ea906Sjfb8856606 if (unlikely(ret))
2562d30ea906Sjfb8856606 continue;
2563d30ea906Sjfb8856606
2564d30ea906Sjfb8856606 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
2565d30ea906Sjfb8856606 mbuf->l2_len,
2566d30ea906Sjfb8856606 mbuf->l2_len + mbuf->l3_len,
2567d30ea906Sjfb8856606 gen_l3_cksum, gen_l4_cksum);
2568d30ea906Sjfb8856606 }
2569d30ea906Sjfb8856606
2570d30ea906Sjfb8856606 num = nb_pkts;
2571d30ea906Sjfb8856606 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
2572d30ea906Sjfb8856606 /* number of packets that were not sent */
2573d30ea906Sjfb8856606 if (unlikely(num > nb_pkts)) {
2574d30ea906Sjfb8856606 for (i = nb_pkts; i < num; i++) {
2575d30ea906Sjfb8856606 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2576d30ea906Sjfb8856606 MRVL_PP2_TX_SHADOWQ_MASK;
2577d30ea906Sjfb8856606 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
2578d30ea906Sjfb8856606 bytes_sent -=
2579d30ea906Sjfb8856606 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
2580d30ea906Sjfb8856606 }
2581d30ea906Sjfb8856606 sq->size -= num - nb_pkts;
2582d30ea906Sjfb8856606 }
2583d30ea906Sjfb8856606
2584d30ea906Sjfb8856606 q->bytes_sent += bytes_sent;
2585d30ea906Sjfb8856606
2586d30ea906Sjfb8856606 return nb_pkts;
2587d30ea906Sjfb8856606 }
2588d30ea906Sjfb8856606
2589d30ea906Sjfb8856606 /** DPDK callback for S/G transmit.
2590d30ea906Sjfb8856606 *
2591d30ea906Sjfb8856606 * @param txq
2592d30ea906Sjfb8856606 * Generic pointer transmit queue.
2593d30ea906Sjfb8856606 * @param tx_pkts
2594d30ea906Sjfb8856606 * Packets to transmit.
2595d30ea906Sjfb8856606 * @param nb_pkts
2596d30ea906Sjfb8856606 * Number of packets in array.
2597d30ea906Sjfb8856606 *
2598d30ea906Sjfb8856606 * @return
2599d30ea906Sjfb8856606 * Number of packets successfully transmitted.
2600d30ea906Sjfb8856606 */
2601d30ea906Sjfb8856606 static uint16_t
mrvl_tx_sg_pkt_burst(void * txq,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2602d30ea906Sjfb8856606 mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
2603d30ea906Sjfb8856606 uint16_t nb_pkts)
2604d30ea906Sjfb8856606 {
2605d30ea906Sjfb8856606 struct mrvl_txq *q = txq;
2606d30ea906Sjfb8856606 struct mrvl_shadow_txq *sq;
2607d30ea906Sjfb8856606 struct pp2_hif *hif;
2608d30ea906Sjfb8856606 struct pp2_ppio_desc descs[nb_pkts * PP2_PPIO_DESC_NUM_FRAGS];
2609d30ea906Sjfb8856606 struct pp2_ppio_sg_pkts pkts;
2610d30ea906Sjfb8856606 uint8_t frags[nb_pkts];
2611d30ea906Sjfb8856606 unsigned int core_id = rte_lcore_id();
2612d30ea906Sjfb8856606 int i, j, ret, bytes_sent = 0;
2613d30ea906Sjfb8856606 int tail, tail_first;
2614d30ea906Sjfb8856606 uint16_t num, sq_free_size;
2615d30ea906Sjfb8856606 uint16_t nb_segs, total_descs = 0;
2616d30ea906Sjfb8856606 uint64_t addr;
2617d30ea906Sjfb8856606
2618d30ea906Sjfb8856606 hif = mrvl_get_hif(q->priv, core_id);
2619d30ea906Sjfb8856606 sq = &q->shadow_txqs[core_id];
2620d30ea906Sjfb8856606 pkts.frags = frags;
2621d30ea906Sjfb8856606 pkts.num = 0;
2622d30ea906Sjfb8856606
2623d30ea906Sjfb8856606 if (unlikely(!q->priv->ppio || !hif))
2624d30ea906Sjfb8856606 return 0;
2625d30ea906Sjfb8856606
2626d30ea906Sjfb8856606 if (sq->size)
2627d30ea906Sjfb8856606 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2628d30ea906Sjfb8856606 sq, q->queue_id, 0);
2629d30ea906Sjfb8856606
2630d30ea906Sjfb8856606 /* Save shadow queue free size */
2631d30ea906Sjfb8856606 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2632d30ea906Sjfb8856606
2633d30ea906Sjfb8856606 tail = 0;
2634d30ea906Sjfb8856606 for (i = 0; i < nb_pkts; i++) {
2635d30ea906Sjfb8856606 struct rte_mbuf *mbuf = tx_pkts[i];
2636d30ea906Sjfb8856606 struct rte_mbuf *seg = NULL;
2637d30ea906Sjfb8856606 int gen_l3_cksum, gen_l4_cksum;
2638d30ea906Sjfb8856606 enum pp2_outq_l3_type l3_type;
2639d30ea906Sjfb8856606 enum pp2_outq_l4_type l4_type;
2640d30ea906Sjfb8856606
2641d30ea906Sjfb8856606 nb_segs = mbuf->nb_segs;
2642d30ea906Sjfb8856606 tail_first = tail;
2643d30ea906Sjfb8856606 total_descs += nb_segs;
2644d30ea906Sjfb8856606
2645d30ea906Sjfb8856606 /*
2646d30ea906Sjfb8856606 * Check if total_descs does not exceed
2647d30ea906Sjfb8856606 * shadow queue free size
2648d30ea906Sjfb8856606 */
2649d30ea906Sjfb8856606 if (unlikely(total_descs > sq_free_size)) {
2650d30ea906Sjfb8856606 total_descs -= nb_segs;
2651d30ea906Sjfb8856606 RTE_LOG(DEBUG, PMD,
2652d30ea906Sjfb8856606 "No room in shadow queue for %d packets! "
2653d30ea906Sjfb8856606 "%d packets will be sent.\n",
2654d30ea906Sjfb8856606 nb_pkts, i);
2655d30ea906Sjfb8856606 break;
2656d30ea906Sjfb8856606 }
2657d30ea906Sjfb8856606
2658d30ea906Sjfb8856606 /* Check if nb_segs does not exceed the max nb of desc per
2659d30ea906Sjfb8856606 * fragmented packet
2660d30ea906Sjfb8856606 */
2661d30ea906Sjfb8856606 if (nb_segs > PP2_PPIO_DESC_NUM_FRAGS) {
2662d30ea906Sjfb8856606 total_descs -= nb_segs;
2663d30ea906Sjfb8856606 RTE_LOG(ERR, PMD,
2664d30ea906Sjfb8856606 "Too many segments. Packet won't be sent.\n");
2665d30ea906Sjfb8856606 break;
2666d30ea906Sjfb8856606 }
2667d30ea906Sjfb8856606
2668d30ea906Sjfb8856606 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2669d30ea906Sjfb8856606 struct rte_mbuf *pref_pkt_hdr;
2670d30ea906Sjfb8856606
2671d30ea906Sjfb8856606 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2672d30ea906Sjfb8856606 rte_mbuf_prefetch_part1(pref_pkt_hdr);
2673d30ea906Sjfb8856606 rte_mbuf_prefetch_part2(pref_pkt_hdr);
2674d30ea906Sjfb8856606 }
2675d30ea906Sjfb8856606
2676d30ea906Sjfb8856606 pkts.frags[pkts.num] = nb_segs;
2677d30ea906Sjfb8856606 pkts.num++;
2678d30ea906Sjfb8856606
2679d30ea906Sjfb8856606 seg = mbuf;
2680d30ea906Sjfb8856606 for (j = 0; j < nb_segs - 1; j++) {
2681d30ea906Sjfb8856606 /* For the subsequent segments, set shadow queue
2682d30ea906Sjfb8856606 * buffer to NULL
2683d30ea906Sjfb8856606 */
2684d30ea906Sjfb8856606 mrvl_fill_shadowq(sq, NULL);
2685d30ea906Sjfb8856606 mrvl_fill_desc(&descs[tail], seg);
2686d30ea906Sjfb8856606
2687d30ea906Sjfb8856606 tail++;
2688d30ea906Sjfb8856606 seg = seg->next;
2689d30ea906Sjfb8856606 }
2690d30ea906Sjfb8856606 /* Put first mbuf info in last shadow queue entry */
2691d30ea906Sjfb8856606 mrvl_fill_shadowq(sq, mbuf);
2692d30ea906Sjfb8856606 /* Update descriptor with last segment */
2693d30ea906Sjfb8856606 mrvl_fill_desc(&descs[tail++], seg);
2694d30ea906Sjfb8856606
2695d30ea906Sjfb8856606 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2696d30ea906Sjfb8856606 /* In case unsupported ol_flags were passed
2697d30ea906Sjfb8856606 * do not update descriptor offload information
2698d30ea906Sjfb8856606 */
2699d30ea906Sjfb8856606 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
2700d30ea906Sjfb8856606 &l3_type, &l4_type, &gen_l3_cksum,
2701d30ea906Sjfb8856606 &gen_l4_cksum);
2702d30ea906Sjfb8856606 if (unlikely(ret))
2703d30ea906Sjfb8856606 continue;
2704d30ea906Sjfb8856606
2705d30ea906Sjfb8856606 pp2_ppio_outq_desc_set_proto_info(&descs[tail_first], l3_type,
2706d30ea906Sjfb8856606 l4_type, mbuf->l2_len,
2707d30ea906Sjfb8856606 mbuf->l2_len + mbuf->l3_len,
2708d30ea906Sjfb8856606 gen_l3_cksum, gen_l4_cksum);
2709d30ea906Sjfb8856606 }
2710d30ea906Sjfb8856606
2711d30ea906Sjfb8856606 num = total_descs;
2712d30ea906Sjfb8856606 pp2_ppio_send_sg(q->priv->ppio, hif, q->queue_id, descs,
2713d30ea906Sjfb8856606 &total_descs, &pkts);
2714d30ea906Sjfb8856606 /* number of packets that were not sent */
2715d30ea906Sjfb8856606 if (unlikely(num > total_descs)) {
2716d30ea906Sjfb8856606 for (i = total_descs; i < num; i++) {
2717d30ea906Sjfb8856606 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2718d30ea906Sjfb8856606 MRVL_PP2_TX_SHADOWQ_MASK;
2719d30ea906Sjfb8856606
2720d30ea906Sjfb8856606 addr = sq->ent[sq->head].buff.cookie;
2721d30ea906Sjfb8856606 if (addr)
2722d30ea906Sjfb8856606 bytes_sent -=
2723d30ea906Sjfb8856606 rte_pktmbuf_pkt_len((struct rte_mbuf *)
2724d30ea906Sjfb8856606 (cookie_addr_high | addr));
2725d30ea906Sjfb8856606 }
2726d30ea906Sjfb8856606 sq->size -= num - total_descs;
2727d30ea906Sjfb8856606 nb_pkts = pkts.num;
2728d30ea906Sjfb8856606 }
2729d30ea906Sjfb8856606
2730d30ea906Sjfb8856606 q->bytes_sent += bytes_sent;
2731d30ea906Sjfb8856606
2732d30ea906Sjfb8856606 return nb_pkts;
2733d30ea906Sjfb8856606 }
2734d30ea906Sjfb8856606
2735d30ea906Sjfb8856606 /**
2736d30ea906Sjfb8856606 * Initialize packet processor.
2737d30ea906Sjfb8856606 *
2738d30ea906Sjfb8856606 * @return
2739d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
2740d30ea906Sjfb8856606 */
2741d30ea906Sjfb8856606 static int
mrvl_init_pp2(void)2742d30ea906Sjfb8856606 mrvl_init_pp2(void)
2743d30ea906Sjfb8856606 {
2744d30ea906Sjfb8856606 struct pp2_init_params init_params;
2745d30ea906Sjfb8856606
2746d30ea906Sjfb8856606 memset(&init_params, 0, sizeof(init_params));
2747d30ea906Sjfb8856606 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
2748d30ea906Sjfb8856606 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
2749d30ea906Sjfb8856606 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
2750d30ea906Sjfb8856606
2751d30ea906Sjfb8856606 return pp2_init(&init_params);
2752d30ea906Sjfb8856606 }
2753d30ea906Sjfb8856606
2754d30ea906Sjfb8856606 /**
2755d30ea906Sjfb8856606 * Deinitialize packet processor.
2756d30ea906Sjfb8856606 *
2757d30ea906Sjfb8856606 * @return
2758d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
2759d30ea906Sjfb8856606 */
2760d30ea906Sjfb8856606 static void
mrvl_deinit_pp2(void)2761d30ea906Sjfb8856606 mrvl_deinit_pp2(void)
2762d30ea906Sjfb8856606 {
2763d30ea906Sjfb8856606 pp2_deinit();
2764d30ea906Sjfb8856606 }
2765d30ea906Sjfb8856606
2766d30ea906Sjfb8856606 /**
2767d30ea906Sjfb8856606 * Create private device structure.
2768d30ea906Sjfb8856606 *
2769d30ea906Sjfb8856606 * @param dev_name
2770d30ea906Sjfb8856606 * Pointer to the port name passed in the initialization parameters.
2771d30ea906Sjfb8856606 *
2772d30ea906Sjfb8856606 * @return
2773d30ea906Sjfb8856606 * Pointer to the newly allocated private device structure.
2774d30ea906Sjfb8856606 */
2775d30ea906Sjfb8856606 static struct mrvl_priv *
mrvl_priv_create(const char * dev_name)2776d30ea906Sjfb8856606 mrvl_priv_create(const char *dev_name)
2777d30ea906Sjfb8856606 {
2778d30ea906Sjfb8856606 struct pp2_bpool_params bpool_params;
2779d30ea906Sjfb8856606 char match[MRVL_MATCH_LEN];
2780d30ea906Sjfb8856606 struct mrvl_priv *priv;
2781d30ea906Sjfb8856606 int ret, bpool_bit;
2782d30ea906Sjfb8856606
2783d30ea906Sjfb8856606 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
2784d30ea906Sjfb8856606 if (!priv)
2785d30ea906Sjfb8856606 return NULL;
2786d30ea906Sjfb8856606
2787d30ea906Sjfb8856606 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
2788d30ea906Sjfb8856606 &priv->pp_id, &priv->ppio_id);
2789d30ea906Sjfb8856606 if (ret)
2790d30ea906Sjfb8856606 goto out_free_priv;
2791d30ea906Sjfb8856606
2792d30ea906Sjfb8856606 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
2793d30ea906Sjfb8856606 PP2_BPOOL_NUM_POOLS);
2794d30ea906Sjfb8856606 if (bpool_bit < 0)
2795d30ea906Sjfb8856606 goto out_free_priv;
2796d30ea906Sjfb8856606 priv->bpool_bit = bpool_bit;
2797d30ea906Sjfb8856606
2798d30ea906Sjfb8856606 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
2799d30ea906Sjfb8856606 priv->bpool_bit);
2800d30ea906Sjfb8856606 memset(&bpool_params, 0, sizeof(bpool_params));
2801d30ea906Sjfb8856606 bpool_params.match = match;
2802d30ea906Sjfb8856606 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
2803d30ea906Sjfb8856606 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
2804d30ea906Sjfb8856606 if (ret)
2805d30ea906Sjfb8856606 goto out_clear_bpool_bit;
2806d30ea906Sjfb8856606
2807d30ea906Sjfb8856606 priv->ppio_params.type = PP2_PPIO_T_NIC;
2808d30ea906Sjfb8856606 rte_spinlock_init(&priv->lock);
2809d30ea906Sjfb8856606
2810d30ea906Sjfb8856606 return priv;
2811d30ea906Sjfb8856606 out_clear_bpool_bit:
2812d30ea906Sjfb8856606 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2813d30ea906Sjfb8856606 out_free_priv:
2814d30ea906Sjfb8856606 rte_free(priv);
2815d30ea906Sjfb8856606 return NULL;
2816d30ea906Sjfb8856606 }
2817d30ea906Sjfb8856606
2818d30ea906Sjfb8856606 /**
2819d30ea906Sjfb8856606 * Create device representing Ethernet port.
2820d30ea906Sjfb8856606 *
2821d30ea906Sjfb8856606 * @param name
2822d30ea906Sjfb8856606 * Pointer to the port's name.
2823d30ea906Sjfb8856606 *
2824d30ea906Sjfb8856606 * @return
2825d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
2826d30ea906Sjfb8856606 */
2827d30ea906Sjfb8856606 static int
mrvl_eth_dev_create(struct rte_vdev_device * vdev,const char * name)2828d30ea906Sjfb8856606 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
2829d30ea906Sjfb8856606 {
2830d30ea906Sjfb8856606 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
2831d30ea906Sjfb8856606 struct rte_eth_dev *eth_dev;
2832d30ea906Sjfb8856606 struct mrvl_priv *priv;
2833d30ea906Sjfb8856606 struct ifreq req;
2834d30ea906Sjfb8856606
2835d30ea906Sjfb8856606 eth_dev = rte_eth_dev_allocate(name);
2836d30ea906Sjfb8856606 if (!eth_dev)
2837d30ea906Sjfb8856606 return -ENOMEM;
2838d30ea906Sjfb8856606
2839d30ea906Sjfb8856606 priv = mrvl_priv_create(name);
2840d30ea906Sjfb8856606 if (!priv) {
2841d30ea906Sjfb8856606 ret = -ENOMEM;
2842d30ea906Sjfb8856606 goto out_free;
2843d30ea906Sjfb8856606 }
2844d30ea906Sjfb8856606 eth_dev->data->dev_private = priv;
2845d30ea906Sjfb8856606
2846d30ea906Sjfb8856606 eth_dev->data->mac_addrs =
2847d30ea906Sjfb8856606 rte_zmalloc("mac_addrs",
28484418919fSjohnjiang RTE_ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
2849d30ea906Sjfb8856606 if (!eth_dev->data->mac_addrs) {
2850d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
2851d30ea906Sjfb8856606 ret = -ENOMEM;
2852d30ea906Sjfb8856606 goto out_free;
2853d30ea906Sjfb8856606 }
2854d30ea906Sjfb8856606
2855d30ea906Sjfb8856606 memset(&req, 0, sizeof(req));
2856d30ea906Sjfb8856606 strcpy(req.ifr_name, name);
2857d30ea906Sjfb8856606 ret = ioctl(fd, SIOCGIFHWADDR, &req);
2858d30ea906Sjfb8856606 if (ret)
2859d30ea906Sjfb8856606 goto out_free;
2860d30ea906Sjfb8856606
2861d30ea906Sjfb8856606 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
28624418919fSjohnjiang req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN);
2863d30ea906Sjfb8856606
2864d30ea906Sjfb8856606 eth_dev->device = &vdev->device;
2865d30ea906Sjfb8856606 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
2866d30ea906Sjfb8856606 mrvl_set_tx_function(eth_dev);
2867d30ea906Sjfb8856606 eth_dev->dev_ops = &mrvl_ops;
2868*2d9fd380Sjfb8856606 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
28694418919fSjohnjiang
2870d30ea906Sjfb8856606 rte_eth_dev_probing_finish(eth_dev);
2871d30ea906Sjfb8856606 return 0;
2872d30ea906Sjfb8856606 out_free:
2873d30ea906Sjfb8856606 rte_eth_dev_release_port(eth_dev);
2874d30ea906Sjfb8856606
2875d30ea906Sjfb8856606 return ret;
2876d30ea906Sjfb8856606 }
2877d30ea906Sjfb8856606
2878d30ea906Sjfb8856606 /**
2879d30ea906Sjfb8856606 * Callback used by rte_kvargs_process() during argument parsing.
2880d30ea906Sjfb8856606 *
2881d30ea906Sjfb8856606 * @param key
2882d30ea906Sjfb8856606 * Pointer to the parsed key (unused).
2883d30ea906Sjfb8856606 * @param value
2884d30ea906Sjfb8856606 * Pointer to the parsed value.
2885d30ea906Sjfb8856606 * @param extra_args
2886d30ea906Sjfb8856606 * Pointer to the extra arguments which contains address of the
2887d30ea906Sjfb8856606 * table of pointers to parsed interface names.
2888d30ea906Sjfb8856606 *
2889d30ea906Sjfb8856606 * @return
2890d30ea906Sjfb8856606 * Always 0.
2891d30ea906Sjfb8856606 */
2892d30ea906Sjfb8856606 static int
mrvl_get_ifnames(const char * key __rte_unused,const char * value,void * extra_args)2893d30ea906Sjfb8856606 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
2894d30ea906Sjfb8856606 void *extra_args)
2895d30ea906Sjfb8856606 {
2896d30ea906Sjfb8856606 struct mrvl_ifnames *ifnames = extra_args;
2897d30ea906Sjfb8856606
2898d30ea906Sjfb8856606 ifnames->names[ifnames->idx++] = value;
2899d30ea906Sjfb8856606
2900d30ea906Sjfb8856606 return 0;
2901d30ea906Sjfb8856606 }
2902d30ea906Sjfb8856606
2903d30ea906Sjfb8856606 /**
2904d30ea906Sjfb8856606 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
2905d30ea906Sjfb8856606 */
2906d30ea906Sjfb8856606 static void
mrvl_deinit_hifs(void)2907d30ea906Sjfb8856606 mrvl_deinit_hifs(void)
2908d30ea906Sjfb8856606 {
2909d30ea906Sjfb8856606 int i;
2910d30ea906Sjfb8856606
2911d30ea906Sjfb8856606 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
2912d30ea906Sjfb8856606 if (hifs[i])
2913d30ea906Sjfb8856606 pp2_hif_deinit(hifs[i]);
2914d30ea906Sjfb8856606 }
2915d30ea906Sjfb8856606 used_hifs = MRVL_MUSDK_HIFS_RESERVED;
2916d30ea906Sjfb8856606 memset(hifs, 0, sizeof(hifs));
2917d30ea906Sjfb8856606 }
2918d30ea906Sjfb8856606
2919d30ea906Sjfb8856606 /**
2920d30ea906Sjfb8856606 * DPDK callback to register the virtual device.
2921d30ea906Sjfb8856606 *
2922d30ea906Sjfb8856606 * @param vdev
2923d30ea906Sjfb8856606 * Pointer to the virtual device.
2924d30ea906Sjfb8856606 *
2925d30ea906Sjfb8856606 * @return
2926d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
2927d30ea906Sjfb8856606 */
2928d30ea906Sjfb8856606 static int
rte_pmd_mrvl_probe(struct rte_vdev_device * vdev)2929d30ea906Sjfb8856606 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
2930d30ea906Sjfb8856606 {
2931d30ea906Sjfb8856606 struct rte_kvargs *kvlist;
2932d30ea906Sjfb8856606 struct mrvl_ifnames ifnames;
2933d30ea906Sjfb8856606 int ret = -EINVAL;
2934d30ea906Sjfb8856606 uint32_t i, ifnum, cfgnum;
2935d30ea906Sjfb8856606 const char *params;
2936d30ea906Sjfb8856606
2937d30ea906Sjfb8856606 params = rte_vdev_device_args(vdev);
2938d30ea906Sjfb8856606 if (!params)
2939d30ea906Sjfb8856606 return -EINVAL;
2940d30ea906Sjfb8856606
2941d30ea906Sjfb8856606 kvlist = rte_kvargs_parse(params, valid_args);
2942d30ea906Sjfb8856606 if (!kvlist)
2943d30ea906Sjfb8856606 return -EINVAL;
2944d30ea906Sjfb8856606
2945d30ea906Sjfb8856606 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
2946d30ea906Sjfb8856606 if (ifnum > RTE_DIM(ifnames.names))
2947d30ea906Sjfb8856606 goto out_free_kvlist;
2948d30ea906Sjfb8856606
2949d30ea906Sjfb8856606 ifnames.idx = 0;
2950d30ea906Sjfb8856606 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
2951d30ea906Sjfb8856606 mrvl_get_ifnames, &ifnames);
2952d30ea906Sjfb8856606
2953d30ea906Sjfb8856606
2954d30ea906Sjfb8856606 /*
2955d30ea906Sjfb8856606 * The below system initialization should be done only once,
2956d30ea906Sjfb8856606 * on the first provided configuration file
2957d30ea906Sjfb8856606 */
2958d30ea906Sjfb8856606 if (!mrvl_qos_cfg) {
2959d30ea906Sjfb8856606 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
2960d30ea906Sjfb8856606 MRVL_LOG(INFO, "Parsing config file!");
2961d30ea906Sjfb8856606 if (cfgnum > 1) {
2962d30ea906Sjfb8856606 MRVL_LOG(ERR, "Cannot handle more than one config file!");
2963d30ea906Sjfb8856606 goto out_free_kvlist;
2964d30ea906Sjfb8856606 } else if (cfgnum == 1) {
2965d30ea906Sjfb8856606 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
2966d30ea906Sjfb8856606 mrvl_get_qoscfg, &mrvl_qos_cfg);
2967d30ea906Sjfb8856606 }
2968d30ea906Sjfb8856606 }
2969d30ea906Sjfb8856606
2970d30ea906Sjfb8856606 if (mrvl_dev_num)
2971d30ea906Sjfb8856606 goto init_devices;
2972d30ea906Sjfb8856606
2973d30ea906Sjfb8856606 MRVL_LOG(INFO, "Perform MUSDK initializations");
2974d30ea906Sjfb8856606
2975d30ea906Sjfb8856606 ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist);
2976d30ea906Sjfb8856606 if (ret)
2977d30ea906Sjfb8856606 goto out_free_kvlist;
2978d30ea906Sjfb8856606
2979d30ea906Sjfb8856606 ret = mrvl_init_pp2();
2980d30ea906Sjfb8856606 if (ret) {
2981d30ea906Sjfb8856606 MRVL_LOG(ERR, "Failed to init PP!");
2982d30ea906Sjfb8856606 rte_mvep_deinit(MVEP_MOD_T_PP2);
2983d30ea906Sjfb8856606 goto out_free_kvlist;
2984d30ea906Sjfb8856606 }
2985d30ea906Sjfb8856606
2986d30ea906Sjfb8856606 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
2987d30ea906Sjfb8856606 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
2988d30ea906Sjfb8856606
2989d30ea906Sjfb8856606 mrvl_lcore_first = RTE_MAX_LCORE;
2990d30ea906Sjfb8856606 mrvl_lcore_last = 0;
2991d30ea906Sjfb8856606
2992d30ea906Sjfb8856606 init_devices:
2993d30ea906Sjfb8856606 for (i = 0; i < ifnum; i++) {
2994d30ea906Sjfb8856606 MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
2995d30ea906Sjfb8856606 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
2996d30ea906Sjfb8856606 if (ret)
2997d30ea906Sjfb8856606 goto out_cleanup;
29984418919fSjohnjiang mrvl_dev_num++;
2999d30ea906Sjfb8856606 }
3000d30ea906Sjfb8856606
3001d30ea906Sjfb8856606 rte_kvargs_free(kvlist);
3002d30ea906Sjfb8856606
3003d30ea906Sjfb8856606 return 0;
3004d30ea906Sjfb8856606 out_cleanup:
30054418919fSjohnjiang rte_pmd_mrvl_remove(vdev);
3006d30ea906Sjfb8856606
3007d30ea906Sjfb8856606 out_free_kvlist:
3008d30ea906Sjfb8856606 rte_kvargs_free(kvlist);
3009d30ea906Sjfb8856606
3010d30ea906Sjfb8856606 return ret;
3011d30ea906Sjfb8856606 }
3012d30ea906Sjfb8856606
3013d30ea906Sjfb8856606 /**
3014d30ea906Sjfb8856606 * DPDK callback to remove virtual device.
3015d30ea906Sjfb8856606 *
3016d30ea906Sjfb8856606 * @param vdev
3017d30ea906Sjfb8856606 * Pointer to the removed virtual device.
3018d30ea906Sjfb8856606 *
3019d30ea906Sjfb8856606 * @return
3020d30ea906Sjfb8856606 * 0 on success, negative error value otherwise.
3021d30ea906Sjfb8856606 */
3022d30ea906Sjfb8856606 static int
rte_pmd_mrvl_remove(struct rte_vdev_device * vdev)3023d30ea906Sjfb8856606 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
3024d30ea906Sjfb8856606 {
30254418919fSjohnjiang uint16_t port_id;
3026*2d9fd380Sjfb8856606 int ret = 0;
3027d30ea906Sjfb8856606
30284418919fSjohnjiang RTE_ETH_FOREACH_DEV(port_id) {
30294418919fSjohnjiang if (rte_eth_devices[port_id].device != &vdev->device)
30304418919fSjohnjiang continue;
3031*2d9fd380Sjfb8856606 ret |= rte_eth_dev_close(port_id);
3032d30ea906Sjfb8856606 }
3033d30ea906Sjfb8856606
3034*2d9fd380Sjfb8856606 return ret == 0 ? 0 : -EIO;
3035d30ea906Sjfb8856606 }
3036d30ea906Sjfb8856606
3037d30ea906Sjfb8856606 static struct rte_vdev_driver pmd_mrvl_drv = {
3038d30ea906Sjfb8856606 .probe = rte_pmd_mrvl_probe,
3039d30ea906Sjfb8856606 .remove = rte_pmd_mrvl_remove,
3040d30ea906Sjfb8856606 };
3041d30ea906Sjfb8856606
3042d30ea906Sjfb8856606 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
3043d30ea906Sjfb8856606 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
3044*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(mrvl_logtype, pmd.net.mvpp2, NOTICE);
3045