xref: /dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c (revision d49bdfcf)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <cnxk_ethdev.h>
6 
7 int
cnxk_nix_info_get(struct rte_eth_dev * eth_dev,struct rte_eth_dev_info * devinfo)8 cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
9 {
10 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
11 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
12 	int max_rx_pktlen;
13 
14 	max_rx_pktlen = (roc_nix_max_pkt_len(&dev->nix) + RTE_ETHER_CRC_LEN -
15 			 CNXK_NIX_MAX_VTAG_ACT_SIZE);
16 
17 	devinfo->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
18 	devinfo->max_rx_pktlen = max_rx_pktlen;
19 	devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
20 	devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
21 	devinfo->max_mac_addrs = dev->max_mac_entries;
22 	devinfo->max_vfs = pci_dev->max_vfs;
23 	devinfo->max_mtu = devinfo->max_rx_pktlen -
24 				(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
25 	devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
26 
27 	devinfo->rx_offload_capa = dev->rx_offload_capa;
28 	devinfo->tx_offload_capa = dev->tx_offload_capa;
29 	devinfo->rx_queue_offload_capa = 0;
30 	devinfo->tx_queue_offload_capa = 0;
31 
32 	devinfo->reta_size = dev->nix.reta_sz;
33 	devinfo->hash_key_size = ROC_NIX_RSS_KEY_LEN;
34 	devinfo->flow_type_rss_offloads = CNXK_NIX_RSS_OFFLOAD;
35 
36 	devinfo->default_rxconf = (struct rte_eth_rxconf){
37 		.rx_drop_en = 0,
38 		.offloads = 0,
39 	};
40 
41 	devinfo->default_txconf = (struct rte_eth_txconf){
42 		.offloads = 0,
43 	};
44 
45 	devinfo->default_rxportconf = (struct rte_eth_dev_portconf){
46 		.ring_size = CNXK_NIX_RX_DEFAULT_RING_SZ,
47 	};
48 
49 	devinfo->rx_desc_lim = (struct rte_eth_desc_lim){
50 		.nb_max = UINT16_MAX,
51 		.nb_min = CNXK_NIX_RX_MIN_DESC,
52 		.nb_align = CNXK_NIX_RX_MIN_DESC_ALIGN,
53 		.nb_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
54 		.nb_mtu_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
55 	};
56 	devinfo->rx_desc_lim.nb_max =
57 		RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
58 				    CNXK_NIX_RX_MIN_DESC_ALIGN);
59 
60 	devinfo->tx_desc_lim = (struct rte_eth_desc_lim){
61 		.nb_max = UINT16_MAX,
62 		.nb_min = 1,
63 		.nb_align = 1,
64 		.nb_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
65 		.nb_mtu_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
66 	};
67 
68 	devinfo->speed_capa = dev->speed_capa;
69 	devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
70 			    RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
71 			    RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
72 	return 0;
73 }
74 
75 int
cnxk_nix_rx_burst_mode_get(struct rte_eth_dev * eth_dev,uint16_t queue_id,struct rte_eth_burst_mode * mode)76 cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
77 			   struct rte_eth_burst_mode *mode)
78 {
79 	ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
80 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
81 	const struct burst_info {
82 		uint64_t flags;
83 		const char *output;
84 	} rx_offload_map[] = {
85 		{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
86 		{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
87 		{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
88 		{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
89 		{RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
90 		{RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
91 		{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
92 		{RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
93 		{RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
94 		{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
95 		{RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
96 		{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
97 		{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
98 		{RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
99 		{RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
100 		{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
101 		{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
102 		{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
103 	};
104 	static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
105 						 "Scalar, Rx Offloads:"
106 	};
107 	uint32_t i;
108 
109 	PLT_SET_USED(queue_id);
110 
111 	/* Update burst mode info */
112 	rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
113 			 str_size - bytes);
114 	if (rc < 0)
115 		goto done;
116 
117 	bytes += rc;
118 
119 	/* Update Rx offload info */
120 	for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
121 		if (dev->rx_offloads & rx_offload_map[i].flags) {
122 			rc = rte_strscpy(mode->info + bytes,
123 					 rx_offload_map[i].output,
124 					 str_size - bytes);
125 			if (rc < 0)
126 				goto done;
127 
128 			bytes += rc;
129 		}
130 	}
131 
132 done:
133 	return 0;
134 }
135 
136 int
cnxk_nix_tx_burst_mode_get(struct rte_eth_dev * eth_dev,uint16_t queue_id,struct rte_eth_burst_mode * mode)137 cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
138 			   struct rte_eth_burst_mode *mode)
139 {
140 	ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
141 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
142 	const struct burst_info {
143 		uint64_t flags;
144 		const char *output;
145 	} tx_offload_map[] = {
146 		{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
147 		{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
148 		{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
149 		{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
150 		{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
151 		{RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
152 		{RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
153 		{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
154 		{RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
155 		{RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
156 		{RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
157 		{RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
158 		{RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
159 		{RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
160 		{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
161 		{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
162 		{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
163 		{RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
164 		{RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
165 		{RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
166 		{RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
167 		{RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
168 	};
169 	static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
170 						 "Scalar, Tx Offloads:"
171 	};
172 	uint32_t i;
173 
174 	PLT_SET_USED(queue_id);
175 
176 	/* Update burst mode info */
177 	rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
178 			 str_size - bytes);
179 	if (rc < 0)
180 		goto done;
181 
182 	bytes += rc;
183 
184 	/* Update Tx offload info */
185 	for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
186 		if (dev->tx_offloads & tx_offload_map[i].flags) {
187 			rc = rte_strscpy(mode->info + bytes,
188 					 tx_offload_map[i].output,
189 					 str_size - bytes);
190 			if (rc < 0)
191 				goto done;
192 
193 			bytes += rc;
194 		}
195 	}
196 
197 done:
198 	return 0;
199 }
200 
201 int
cnxk_nix_flow_ctrl_get(struct rte_eth_dev * eth_dev,struct rte_eth_fc_conf * fc_conf)202 cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
203 		       struct rte_eth_fc_conf *fc_conf)
204 {
205 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
206 	enum rte_eth_fc_mode mode_map[] = {
207 					   RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
208 					   RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
209 					  };
210 	struct roc_nix *nix = &dev->nix;
211 	int mode;
212 
213 	mode = roc_nix_fc_mode_get(nix);
214 	if (mode < 0)
215 		return mode;
216 
217 	memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
218 	fc_conf->mode = mode_map[mode];
219 	return 0;
220 }
221 
222 static int
nix_fc_cq_config_set(struct cnxk_eth_dev * dev,uint16_t qid,bool enable)223 nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
224 {
225 	struct roc_nix *nix = &dev->nix;
226 	struct roc_nix_fc_cfg fc_cfg;
227 	struct roc_nix_cq *cq;
228 
229 	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
230 	cq = &dev->cqs[qid];
231 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
232 	fc_cfg.cq_cfg.enable = enable;
233 	/* Map all CQs to last channel */
234 	fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
235 	fc_cfg.cq_cfg.rq = qid;
236 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
237 
238 	return roc_nix_fc_config_set(nix, &fc_cfg);
239 }
240 
241 int
cnxk_nix_flow_ctrl_set(struct rte_eth_dev * eth_dev,struct rte_eth_fc_conf * fc_conf)242 cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
243 		       struct rte_eth_fc_conf *fc_conf)
244 {
245 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
246 	enum roc_nix_fc_mode mode_map[] = {
247 					   ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
248 					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL
249 					  };
250 	struct rte_eth_dev_data *data = eth_dev->data;
251 	struct cnxk_fc_cfg *fc = &dev->fc_cfg;
252 	struct roc_nix *nix = &dev->nix;
253 	struct cnxk_eth_rxq_sp *rxq;
254 	struct cnxk_eth_txq_sp *txq;
255 	uint8_t rx_pause, tx_pause;
256 	int rc, i;
257 
258 	if (roc_nix_is_vf_or_sdp(nix) && !roc_nix_is_lbk(nix)) {
259 		plt_err("Flow control configuration is not allowed on VFs");
260 		return -ENOTSUP;
261 	}
262 
263 	if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
264 	    fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
265 		plt_info("Only MODE configuration is supported");
266 		return -EINVAL;
267 	}
268 
269 	if (fc_conf->mode == fc->mode)
270 		return 0;
271 
272 	rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
273 		    (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
274 	tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
275 		    (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
276 
277 	/* Check if TX pause frame is already enabled or not */
278 	if (fc->tx_pause ^ tx_pause) {
279 		if (roc_model_is_cn96_ax() && data->dev_started) {
280 			/* On Ax, CQ should be in disabled state
281 			 * while setting flow control configuration.
282 			 */
283 			plt_info("Stop the port=%d for setting flow control",
284 				 data->port_id);
285 			return 0;
286 		}
287 
288 		for (i = 0; i < data->nb_rx_queues; i++) {
289 			struct roc_nix_fc_cfg fc_cfg;
290 
291 			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
292 			rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
293 			      1;
294 			rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
295 			if (rc)
296 				return rc;
297 		}
298 	}
299 
300 	/* Check if RX pause frame is enabled or not */
301 	if (fc->rx_pause ^ rx_pause) {
302 		for (i = 0; i < data->nb_tx_queues; i++) {
303 			struct roc_nix_fc_cfg fc_cfg;
304 
305 			memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
306 			txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
307 			      1;
308 			fc_cfg.type = ROC_NIX_FC_TM_CFG;
309 			fc_cfg.tm_cfg.sq = txq->qid;
310 			fc_cfg.tm_cfg.enable = !!rx_pause;
311 			rc = roc_nix_fc_config_set(nix, &fc_cfg);
312 			if (rc)
313 				return rc;
314 		}
315 	}
316 
317 	rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
318 	if (rc)
319 		return rc;
320 
321 	fc->rx_pause = rx_pause;
322 	fc->tx_pause = tx_pause;
323 	fc->mode = fc_conf->mode;
324 
325 	return rc;
326 }
327 
328 int
cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev * eth_dev,struct rte_eth_pfc_queue_info * pfc_info)329 cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
330 					 struct rte_eth_pfc_queue_info *pfc_info)
331 {
332 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
333 
334 	pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
335 	pfc_info->mode_capa = RTE_ETH_FC_FULL;
336 	return 0;
337 }
338 
339 int
cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev * eth_dev,struct rte_eth_pfc_queue_conf * pfc_conf)340 cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
341 					 struct rte_eth_pfc_queue_conf *pfc_conf)
342 {
343 	struct cnxk_pfc_cfg conf;
344 	int rc;
345 
346 	memset(&conf, 0, sizeof(struct cnxk_pfc_cfg));
347 
348 	conf.fc_cfg.mode = pfc_conf->mode;
349 
350 	conf.pause_time = pfc_conf->tx_pause.pause_time;
351 	conf.rx_tc = pfc_conf->tx_pause.tc;
352 	conf.rx_qid = pfc_conf->tx_pause.rx_qid;
353 
354 	conf.tx_tc = pfc_conf->rx_pause.tc;
355 	conf.tx_qid = pfc_conf->rx_pause.tx_qid;
356 
357 	rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
358 	if (rc)
359 		return rc;
360 
361 	return rc;
362 }
363 
364 int
cnxk_nix_flow_ops_get(struct rte_eth_dev * eth_dev,const struct rte_flow_ops ** ops)365 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
366 		      const struct rte_flow_ops **ops)
367 {
368 	RTE_SET_USED(eth_dev);
369 
370 	*ops = &cnxk_flow_ops;
371 	return 0;
372 }
373 
374 int
cnxk_nix_mac_addr_set(struct rte_eth_dev * eth_dev,struct rte_ether_addr * addr)375 cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
376 {
377 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
378 	struct roc_nix *nix = &dev->nix;
379 	int rc;
380 
381 	/* Update mac address at NPC */
382 	rc = roc_nix_npc_mac_addr_set(nix, addr->addr_bytes);
383 	if (rc)
384 		goto exit;
385 
386 	/* Update mac address at CGX for PFs only */
387 	if (!roc_nix_is_vf_or_sdp(nix)) {
388 		rc = roc_nix_mac_addr_set(nix, addr->addr_bytes);
389 		if (rc) {
390 			/* Rollback to previous mac address */
391 			roc_nix_npc_mac_addr_set(nix, dev->mac_addr);
392 			goto exit;
393 		}
394 	}
395 
396 	/* Update mac address to cnxk ethernet device */
397 	rte_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
398 
399 exit:
400 	return rc;
401 }
402 
403 int
cnxk_nix_mac_addr_add(struct rte_eth_dev * eth_dev,struct rte_ether_addr * addr,uint32_t index,uint32_t pool)404 cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
405 		      uint32_t index, uint32_t pool)
406 {
407 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
408 	struct roc_nix *nix = &dev->nix;
409 	int rc;
410 
411 	PLT_SET_USED(index);
412 	PLT_SET_USED(pool);
413 
414 	rc = roc_nix_mac_addr_add(nix, addr->addr_bytes);
415 	if (rc < 0) {
416 		plt_err("Failed to add mac address, rc=%d", rc);
417 		return rc;
418 	}
419 
420 	/* Enable promiscuous mode at NIX level */
421 	roc_nix_npc_promisc_ena_dis(nix, true);
422 	dev->dmac_filter_enable = true;
423 	eth_dev->data->promiscuous = false;
424 	dev->dmac_filter_count++;
425 
426 	return 0;
427 }
428 
429 void
cnxk_nix_mac_addr_del(struct rte_eth_dev * eth_dev,uint32_t index)430 cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
431 {
432 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
433 	struct roc_nix *nix = &dev->nix;
434 	int rc;
435 
436 	rc = roc_nix_mac_addr_del(nix, index);
437 	if (rc)
438 		plt_err("Failed to delete mac address, rc=%d", rc);
439 
440 	dev->dmac_filter_count--;
441 }
442 
443 int
cnxk_nix_mtu_set(struct rte_eth_dev * eth_dev,uint16_t mtu)444 cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
445 {
446 	uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
447 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
448 	struct rte_eth_dev_data *data = eth_dev->data;
449 	struct roc_nix *nix = &dev->nix;
450 	int rc = -EINVAL;
451 	uint32_t buffsz;
452 
453 	frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
454 
455 	/* Check if MTU is within the allowed range */
456 	if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
457 		plt_err("MTU is lesser than minimum");
458 		goto exit;
459 	}
460 
461 	if ((frame_size - RTE_ETHER_CRC_LEN) >
462 	    ((uint32_t)roc_nix_max_pkt_len(nix))) {
463 		plt_err("MTU is greater than maximum");
464 		goto exit;
465 	}
466 
467 	buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
468 	old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
469 
470 	/* Refuse MTU that requires the support of scattered packets
471 	 * when this feature has not been enabled before.
472 	 */
473 	if (data->dev_started && frame_size > buffsz &&
474 	    !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
475 		plt_err("Scatter offload is not enabled for mtu");
476 		goto exit;
477 	}
478 
479 	/* Check <seg size> * <max_seg>  >= max_frame */
480 	if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)	&&
481 	    frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
482 		plt_err("Greater than maximum supported packet length");
483 		goto exit;
484 	}
485 
486 	frame_size -= RTE_ETHER_CRC_LEN;
487 
488 	/* Update mtu on Tx */
489 	rc = roc_nix_mac_mtu_set(nix, frame_size);
490 	if (rc) {
491 		plt_err("Failed to set MTU, rc=%d", rc);
492 		goto exit;
493 	}
494 
495 	/* Sync same frame size on Rx */
496 	rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
497 	if (rc) {
498 		/* Rollback to older mtu */
499 		roc_nix_mac_mtu_set(nix,
500 				    old_frame_size - RTE_ETHER_CRC_LEN);
501 		plt_err("Failed to max Rx frame length, rc=%d", rc);
502 		goto exit;
503 	}
504 exit:
505 	return rc;
506 }
507 
508 int
cnxk_nix_promisc_enable(struct rte_eth_dev * eth_dev)509 cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev)
510 {
511 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
512 	struct roc_nix *nix = &dev->nix;
513 	int rc = 0;
514 
515 	if (roc_nix_is_vf_or_sdp(nix))
516 		return rc;
517 
518 	rc = roc_nix_npc_promisc_ena_dis(nix, true);
519 	if (rc) {
520 		plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
521 			roc_error_msg_get(rc));
522 		return rc;
523 	}
524 
525 	rc = roc_nix_mac_promisc_mode_enable(nix, true);
526 	if (rc) {
527 		plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
528 			roc_error_msg_get(rc));
529 		roc_nix_npc_promisc_ena_dis(nix, false);
530 		return rc;
531 	}
532 
533 	return 0;
534 }
535 
536 int
cnxk_nix_promisc_disable(struct rte_eth_dev * eth_dev)537 cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev)
538 {
539 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
540 	struct roc_nix *nix = &dev->nix;
541 	int rc = 0;
542 
543 	if (roc_nix_is_vf_or_sdp(nix))
544 		return rc;
545 
546 	rc = roc_nix_npc_promisc_ena_dis(nix, dev->dmac_filter_enable);
547 	if (rc) {
548 		plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
549 			roc_error_msg_get(rc));
550 		return rc;
551 	}
552 
553 	rc = roc_nix_mac_promisc_mode_enable(nix, false);
554 	if (rc) {
555 		plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
556 			roc_error_msg_get(rc));
557 		roc_nix_npc_promisc_ena_dis(nix, !dev->dmac_filter_enable);
558 		return rc;
559 	}
560 
561 	dev->dmac_filter_enable = false;
562 	return 0;
563 }
564 
565 int
cnxk_nix_allmulticast_enable(struct rte_eth_dev * eth_dev)566 cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
567 {
568 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
569 
570 	return roc_nix_npc_mcast_config(&dev->nix, true,
571 					eth_dev->data->promiscuous);
572 }
573 
574 int
cnxk_nix_allmulticast_disable(struct rte_eth_dev * eth_dev)575 cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
576 {
577 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
578 
579 	return roc_nix_npc_mcast_config(&dev->nix, false,
580 					eth_dev->data->promiscuous);
581 }
582 
583 int
cnxk_nix_set_link_up(struct rte_eth_dev * eth_dev)584 cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev)
585 {
586 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
587 	struct roc_nix *nix = &dev->nix;
588 	int rc, i;
589 
590 	if (roc_nix_is_vf_or_sdp(nix))
591 		return -ENOTSUP;
592 
593 	rc = roc_nix_mac_link_state_set(nix, true);
594 	if (rc)
595 		goto exit;
596 
597 	/* Start tx queues  */
598 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
599 		rc = cnxk_nix_tx_queue_start(eth_dev, i);
600 		if (rc)
601 			goto exit;
602 	}
603 
604 exit:
605 	return rc;
606 }
607 
608 int
cnxk_nix_set_link_down(struct rte_eth_dev * eth_dev)609 cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev)
610 {
611 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
612 	struct roc_nix *nix = &dev->nix;
613 	int rc, i;
614 
615 	if (roc_nix_is_vf_or_sdp(nix))
616 		return -ENOTSUP;
617 
618 	/* Stop tx queues  */
619 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
620 		rc = cnxk_nix_tx_queue_stop(eth_dev, i);
621 		if (rc)
622 			goto exit;
623 	}
624 
625 	rc = roc_nix_mac_link_state_set(nix, false);
626 exit:
627 	return rc;
628 }
629 
630 int
cnxk_nix_get_module_info(struct rte_eth_dev * eth_dev,struct rte_eth_dev_module_info * modinfo)631 cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
632 			 struct rte_eth_dev_module_info *modinfo)
633 {
634 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
635 	struct roc_nix_eeprom_info eeprom_info = {0};
636 	struct roc_nix *nix = &dev->nix;
637 	int rc;
638 
639 	rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
640 	if (rc)
641 		return rc;
642 
643 	modinfo->type = eeprom_info.sff_id;
644 	modinfo->eeprom_len = ROC_NIX_EEPROM_SIZE;
645 	return 0;
646 }
647 
648 int
cnxk_nix_get_module_eeprom(struct rte_eth_dev * eth_dev,struct rte_dev_eeprom_info * info)649 cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
650 			   struct rte_dev_eeprom_info *info)
651 {
652 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
653 	struct roc_nix_eeprom_info eeprom_info = {0};
654 	struct roc_nix *nix = &dev->nix;
655 	int rc = -EINVAL;
656 
657 	if (!info->data || !info->length ||
658 	    (info->offset + info->length > ROC_NIX_EEPROM_SIZE))
659 		return rc;
660 
661 	rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
662 	if (rc)
663 		return rc;
664 
665 	rte_memcpy(info->data, eeprom_info.buf + info->offset, info->length);
666 	return 0;
667 }
668 
669 int
cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev * eth_dev,uint16_t rx_queue_id)670 cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
671 {
672 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
673 
674 	roc_nix_rx_queue_intr_enable(&dev->nix, rx_queue_id);
675 	return 0;
676 }
677 
678 int
cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev * eth_dev,uint16_t rx_queue_id)679 cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
680 			       uint16_t rx_queue_id)
681 {
682 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
683 
684 	roc_nix_rx_queue_intr_disable(&dev->nix, rx_queue_id);
685 	return 0;
686 }
687 
688 int
cnxk_nix_pool_ops_supported(struct rte_eth_dev * eth_dev,const char * pool)689 cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
690 {
691 	RTE_SET_USED(eth_dev);
692 
693 	if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
694 		return 0;
695 
696 	return -ENOTSUP;
697 }
698 
699 int
cnxk_nix_fw_version_get(struct rte_eth_dev * eth_dev,char * fw_version,size_t fw_size)700 cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
701 			size_t fw_size)
702 {
703 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
704 	const char *str = roc_npc_profile_name_get(&dev->npc);
705 	uint32_t size = strlen(str) + 1;
706 
707 	if (fw_size > size)
708 		fw_size = size;
709 
710 	rte_strlcpy(fw_version, str, fw_size);
711 
712 	if (fw_size < size)
713 		return size;
714 
715 	return 0;
716 }
717 
718 void
cnxk_nix_rxq_info_get(struct rte_eth_dev * eth_dev,uint16_t qid,struct rte_eth_rxq_info * qinfo)719 cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
720 		      struct rte_eth_rxq_info *qinfo)
721 {
722 	void *rxq = eth_dev->data->rx_queues[qid];
723 	struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
724 
725 	memset(qinfo, 0, sizeof(*qinfo));
726 
727 	qinfo->mp = rxq_sp->qconf.mp;
728 	qinfo->scattered_rx = eth_dev->data->scattered_rx;
729 	qinfo->nb_desc = rxq_sp->qconf.nb_desc;
730 
731 	memcpy(&qinfo->conf, &rxq_sp->qconf.conf.rx, sizeof(qinfo->conf));
732 }
733 
734 void
cnxk_nix_txq_info_get(struct rte_eth_dev * eth_dev,uint16_t qid,struct rte_eth_txq_info * qinfo)735 cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
736 		      struct rte_eth_txq_info *qinfo)
737 {
738 	void *txq = eth_dev->data->tx_queues[qid];
739 	struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
740 
741 	memset(qinfo, 0, sizeof(*qinfo));
742 
743 	qinfo->nb_desc = txq_sp->qconf.nb_desc;
744 
745 	memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
746 }
747 
748 uint32_t
cnxk_nix_rx_queue_count(void * rxq)749 cnxk_nix_rx_queue_count(void *rxq)
750 {
751 	struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
752 	struct roc_nix *nix = &rxq_sp->dev->nix;
753 	uint32_t head, tail;
754 
755 	roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
756 	return (tail - head) % (rxq_sp->qconf.nb_desc);
757 }
758 
759 static inline int
nix_offset_has_packet(uint32_t head,uint32_t tail,uint16_t offset,bool is_rx)760 nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset, bool is_rx)
761 {
762 	/* Check given offset(queue index) has packet filled/xmit by HW
763 	 * in case of Rx or Tx.
764 	 * Also, checks for wrap around case.
765 	 */
766 	return ((tail > head && offset <= tail && offset >= head) ||
767 		(head > tail && (offset >= head || offset <= tail))) ?
768 		       is_rx :
769 		       !is_rx;
770 }
771 
772 int
cnxk_nix_rx_descriptor_status(void * rxq,uint16_t offset)773 cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset)
774 {
775 	struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
776 	struct roc_nix *nix = &rxq_sp->dev->nix;
777 	uint32_t head, tail;
778 
779 	if (rxq_sp->qconf.nb_desc <= offset)
780 		return -EINVAL;
781 
782 	roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
783 
784 	if (nix_offset_has_packet(head, tail, offset, 1))
785 		return RTE_ETH_RX_DESC_DONE;
786 	else
787 		return RTE_ETH_RX_DESC_AVAIL;
788 }
789 
790 int
cnxk_nix_tx_descriptor_status(void * txq,uint16_t offset)791 cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset)
792 {
793 	struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
794 	struct roc_nix *nix = &txq_sp->dev->nix;
795 	uint32_t head = 0, tail = 0;
796 
797 	if (txq_sp->qconf.nb_desc <= offset)
798 		return -EINVAL;
799 
800 	roc_nix_sq_head_tail_get(nix, txq_sp->qid, &head, &tail);
801 
802 	if (nix_offset_has_packet(head, tail, offset, 0))
803 		return RTE_ETH_TX_DESC_DONE;
804 	else
805 		return RTE_ETH_TX_DESC_FULL;
806 }
807 
808 /* It is a NOP for cnxk as HW frees the buffer on xmit */
809 int
cnxk_nix_tx_done_cleanup(void * txq,uint32_t free_cnt)810 cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
811 {
812 	RTE_SET_USED(txq);
813 	RTE_SET_USED(free_cnt);
814 
815 	return 0;
816 }
817 
818 int
cnxk_nix_dev_get_reg(struct rte_eth_dev * eth_dev,struct rte_dev_reg_info * regs)819 cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
820 {
821 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
822 	struct roc_nix *nix = &dev->nix;
823 	uint64_t *data = regs->data;
824 	int rc = -ENOTSUP;
825 
826 	if (data == NULL) {
827 		rc = roc_nix_lf_get_reg_count(nix);
828 		if (rc > 0) {
829 			regs->length = rc;
830 			regs->width = 8;
831 			rc = 0;
832 		}
833 		return rc;
834 	}
835 
836 	if (!regs->length ||
837 	    regs->length == (uint32_t)roc_nix_lf_get_reg_count(nix))
838 		return roc_nix_lf_reg_dump(nix, data);
839 
840 	return rc;
841 }
842 
843 int
cnxk_nix_reta_update(struct rte_eth_dev * eth_dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)844 cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
845 		     struct rte_eth_rss_reta_entry64 *reta_conf,
846 		     uint16_t reta_size)
847 {
848 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
849 	uint16_t reta[ROC_NIX_RSS_RETA_MAX];
850 	struct roc_nix *nix = &dev->nix;
851 	int i, j, rc = -EINVAL, idx = 0;
852 
853 	if (reta_size != dev->nix.reta_sz) {
854 		plt_err("Size of hash lookup table configured (%d) does not "
855 			"match the number hardware can supported (%d)",
856 			reta_size, dev->nix.reta_sz);
857 		goto fail;
858 	}
859 
860 	roc_nix_rss_reta_get(nix, 0, reta);
861 
862 	/* Copy RETA table */
863 	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
864 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
865 			if ((reta_conf[i].mask >> j) & 0x01)
866 				reta[idx] = reta_conf[i].reta[j];
867 			idx++;
868 		}
869 	}
870 
871 	return roc_nix_rss_reta_set(nix, 0, reta);
872 
873 fail:
874 	return rc;
875 }
876 
877 int
cnxk_nix_reta_query(struct rte_eth_dev * eth_dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)878 cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
879 		    struct rte_eth_rss_reta_entry64 *reta_conf,
880 		    uint16_t reta_size)
881 {
882 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
883 	uint16_t reta[ROC_NIX_RSS_RETA_MAX];
884 	struct roc_nix *nix = &dev->nix;
885 	int rc = -EINVAL, i, j, idx = 0;
886 
887 	if (reta_size != dev->nix.reta_sz) {
888 		plt_err("Size of hash lookup table configured (%d) does not "
889 			"match the number hardware can supported (%d)",
890 			reta_size, dev->nix.reta_sz);
891 		goto fail;
892 	}
893 
894 	rc = roc_nix_rss_reta_get(nix, 0, reta);
895 	if (rc)
896 		goto fail;
897 
898 	/* Copy RETA table */
899 	for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
900 		for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
901 			if ((reta_conf[i].mask >> j) & 0x01)
902 				reta_conf[i].reta[j] = reta[idx];
903 			idx++;
904 		}
905 	}
906 
907 	return 0;
908 
909 fail:
910 	return rc;
911 }
912 
913 int
cnxk_nix_rss_hash_update(struct rte_eth_dev * eth_dev,struct rte_eth_rss_conf * rss_conf)914 cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
915 			 struct rte_eth_rss_conf *rss_conf)
916 {
917 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
918 	struct roc_nix *nix = &dev->nix;
919 	uint8_t rss_hash_level;
920 	uint32_t flowkey_cfg;
921 	int rc = -EINVAL;
922 	uint8_t alg_idx;
923 
924 	if (rss_conf->rss_key && rss_conf->rss_key_len != ROC_NIX_RSS_KEY_LEN) {
925 		plt_err("Hash key size mismatch %d vs %d",
926 			rss_conf->rss_key_len, ROC_NIX_RSS_KEY_LEN);
927 		goto fail;
928 	}
929 
930 	if (rss_conf->rss_key)
931 		roc_nix_rss_key_set(nix, rss_conf->rss_key);
932 
933 	rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
934 	if (rss_hash_level)
935 		rss_hash_level -= 1;
936 	flowkey_cfg =
937 		cnxk_rss_ethdev_to_nix(dev, rss_conf->rss_hf, rss_hash_level);
938 
939 	rc = roc_nix_rss_flowkey_set(nix, &alg_idx, flowkey_cfg,
940 				     ROC_NIX_RSS_GROUP_DEFAULT,
941 				     ROC_NIX_RSS_MCAM_IDX_DEFAULT);
942 	if (rc) {
943 		plt_err("Failed to set RSS hash function rc=%d", rc);
944 		return rc;
945 	}
946 
947 fail:
948 	return rc;
949 }
950 
951 int
cnxk_nix_rss_hash_conf_get(struct rte_eth_dev * eth_dev,struct rte_eth_rss_conf * rss_conf)952 cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
953 			   struct rte_eth_rss_conf *rss_conf)
954 {
955 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
956 
957 	if (rss_conf->rss_key)
958 		roc_nix_rss_key_get(&dev->nix, rss_conf->rss_key);
959 
960 	rss_conf->rss_key_len = ROC_NIX_RSS_KEY_LEN;
961 	rss_conf->rss_hf = dev->ethdev_rss_hf;
962 
963 	return 0;
964 }
965 
966 int
cnxk_nix_mc_addr_list_configure(struct rte_eth_dev * eth_dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)967 cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
968 				struct rte_ether_addr *mc_addr_set,
969 				uint32_t nb_mc_addr)
970 {
971 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
972 	struct rte_eth_dev_data *data = eth_dev->data;
973 	struct rte_ether_addr null_mac_addr;
974 	struct roc_nix *nix = &dev->nix;
975 	int rc, index;
976 	uint32_t i;
977 
978 	memset(&null_mac_addr, 0, sizeof(null_mac_addr));
979 
980 	/* All configured multicast filters should be flushed first */
981 	for (i = 0; i < dev->max_mac_entries; i++) {
982 		if (rte_is_multicast_ether_addr(&data->mac_addrs[i])) {
983 			rc = roc_nix_mac_addr_del(nix, i);
984 			if (rc) {
985 				plt_err("Failed to flush mcast address, rc=%d",
986 					rc);
987 				return rc;
988 			}
989 
990 			dev->dmac_filter_count--;
991 			/* Update address in NIC data structure */
992 			rte_ether_addr_copy(&null_mac_addr,
993 					    &data->mac_addrs[i]);
994 		}
995 	}
996 
997 	if (!mc_addr_set || !nb_mc_addr)
998 		return 0;
999 
1000 	/* Check for available space */
1001 	if (nb_mc_addr >
1002 	    ((uint32_t)(dev->max_mac_entries - dev->dmac_filter_count))) {
1003 		plt_err("No space is available to add multicast filters");
1004 		return -ENOSPC;
1005 	}
1006 
1007 	/* Multicast addresses are to be installed */
1008 	for (i = 0; i < nb_mc_addr; i++) {
1009 		index = roc_nix_mac_addr_add(nix, mc_addr_set[i].addr_bytes);
1010 		if (index < 0) {
1011 			plt_err("Failed to add mcast mac address, rc=%d",
1012 				index);
1013 			return index;
1014 		}
1015 
1016 		dev->dmac_filter_count++;
1017 		/* Update address in NIC data structure */
1018 		rte_ether_addr_copy(&mc_addr_set[i], &data->mac_addrs[index]);
1019 	}
1020 
1021 	roc_nix_npc_promisc_ena_dis(nix, true);
1022 	dev->dmac_filter_enable = true;
1023 	eth_dev->data->promiscuous = false;
1024 
1025 	return 0;
1026 }
1027 
1028 int
nix_priority_flow_ctrl_configure(struct rte_eth_dev * eth_dev,struct cnxk_pfc_cfg * conf)1029 nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
1030 				 struct cnxk_pfc_cfg *conf)
1031 {
1032 	enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
1033 					   ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
1034 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1035 	struct rte_eth_dev_data *data = eth_dev->data;
1036 	struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
1037 	struct roc_nix *nix = &dev->nix;
1038 	struct roc_nix_pfc_cfg pfc_cfg;
1039 	struct roc_nix_fc_cfg fc_cfg;
1040 	struct cnxk_eth_rxq_sp *rxq;
1041 	struct cnxk_eth_txq_sp *txq;
1042 	uint8_t rx_pause, tx_pause;
1043 	enum rte_eth_fc_mode mode;
1044 	struct roc_nix_cq *cq;
1045 	struct roc_nix_sq *sq;
1046 	int rc;
1047 
1048 	if (roc_nix_is_vf_or_sdp(nix)) {
1049 		plt_err("Prio flow ctrl config is not allowed on VF and SDP");
1050 		return -ENOTSUP;
1051 	}
1052 
1053 	if (roc_model_is_cn96_ax() && data->dev_started) {
1054 		/* On Ax, CQ should be in disabled state
1055 		 * while setting flow control configuration.
1056 		 */
1057 		plt_info("Stop the port=%d for setting flow control",
1058 			 data->port_id);
1059 		return 0;
1060 	}
1061 
1062 	if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
1063 	    dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
1064 		plt_err("Same TC can not be configured on multiple SQs");
1065 		return -ENOTSUP;
1066 	}
1067 
1068 	mode = conf->fc_cfg.mode;
1069 	rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
1070 	tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
1071 
1072 	if (data->rx_queues == NULL || data->tx_queues == NULL) {
1073 		rc = 0;
1074 		goto exit;
1075 	}
1076 
1077 	/* Configure CQs */
1078 	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
1079 	rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
1080 	cq = &dev->cqs[rxq->qid];
1081 	fc_cfg.type = ROC_NIX_FC_CQ_CFG;
1082 	fc_cfg.cq_cfg.tc = conf->rx_tc;
1083 	fc_cfg.cq_cfg.enable = !!tx_pause;
1084 	fc_cfg.cq_cfg.rq = cq->qid;
1085 	fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
1086 	rc = roc_nix_fc_config_set(nix, &fc_cfg);
1087 	if (rc)
1088 		goto exit;
1089 
1090 	/* Check if RX pause frame is enabled or not */
1091 	if (pfc->fc_cfg.rx_pause ^ rx_pause) {
1092 		if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
1093 			goto exit;
1094 
1095 		if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
1096 		    eth_dev->data->nb_tx_queues > 1) {
1097 			/*
1098 			 * Disabled xmit will be enabled when
1099 			 * new topology is available.
1100 			 */
1101 			rc = roc_nix_tm_hierarchy_disable(nix);
1102 			if (rc)
1103 				goto exit;
1104 
1105 			rc = roc_nix_tm_pfc_prepare_tree(nix);
1106 			if (rc)
1107 				goto exit;
1108 
1109 			rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
1110 							 true);
1111 			if (rc)
1112 				goto exit;
1113 		}
1114 	}
1115 
1116 	txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
1117 	sq = &dev->sqs[txq->qid];
1118 	memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
1119 	fc_cfg.type = ROC_NIX_FC_TM_CFG;
1120 	fc_cfg.tm_cfg.sq = sq->qid;
1121 	fc_cfg.tm_cfg.tc = conf->tx_tc;
1122 	fc_cfg.tm_cfg.enable = !!rx_pause;
1123 	rc = roc_nix_fc_config_set(nix, &fc_cfg);
1124 	if (rc)
1125 		return rc;
1126 
1127 	dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
1128 
1129 	/* Configure MAC block */
1130 	if (tx_pause)
1131 		pfc->class_en |= BIT(conf->rx_tc);
1132 	else
1133 		pfc->class_en &= ~BIT(conf->rx_tc);
1134 
1135 	if (pfc->class_en)
1136 		mode = RTE_ETH_FC_FULL;
1137 
1138 	memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
1139 	pfc_cfg.mode = mode_map[mode];
1140 	pfc_cfg.tc = pfc->class_en;
1141 	rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
1142 	if (rc)
1143 		return rc;
1144 
1145 	pfc->fc_cfg.rx_pause = rx_pause;
1146 	pfc->fc_cfg.tx_pause = tx_pause;
1147 	pfc->fc_cfg.mode = mode;
1148 
1149 exit:
1150 	return rc;
1151 }
1152