xref: /f-stack/dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision ebf5cedb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 #include <stdbool.h>
8 
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <rte_alarm.h>
15 
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_irq.h"
20 #include "bnxt_ring.h"
21 #include "bnxt_rxq.h"
22 #include "bnxt_rxr.h"
23 #include "bnxt_stats.h"
24 #include "bnxt_txq.h"
25 #include "bnxt_txr.h"
26 #include "bnxt_vnic.h"
27 #include "hsi_struct_def_dpdk.h"
28 #include "bnxt_nvm_defs.h"
29 
30 #define DRV_MODULE_NAME		"bnxt"
31 static const char bnxt_version[] =
32 	"Broadcom NetXtreme driver " DRV_MODULE_NAME;
33 int bnxt_logtype_driver;
34 
35 /*
36  * The set of PCI devices this driver supports
37  */
38 static const struct rte_pci_id bnxt_pci_id_map[] = {
39 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
40 			 BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
41 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
42 			 BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
43 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
44 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
45 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
46 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
47 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
48 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
49 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
50 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
51 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
52 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
53 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
54 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
55 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
56 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
57 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
58 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
59 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
60 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
61 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
62 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
63 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
64 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
65 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
66 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
67 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
68 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
69 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
70 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
71 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
72 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
73 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
74 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
75 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
76 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
77 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
78 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
79 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
80 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
81 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
82 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
83 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
84 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
85 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
86 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
87 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
88 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
89 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
90 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
91 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
92 	{ .vendor_id = 0, /* sentinel */ },
93 };
94 
95 #define BNXT_ETH_RSS_SUPPORT (	\
96 	ETH_RSS_IPV4 |		\
97 	ETH_RSS_NONFRAG_IPV4_TCP |	\
98 	ETH_RSS_NONFRAG_IPV4_UDP |	\
99 	ETH_RSS_IPV6 |		\
100 	ETH_RSS_NONFRAG_IPV6_TCP |	\
101 	ETH_RSS_NONFRAG_IPV6_UDP)
102 
103 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
104 				     DEV_TX_OFFLOAD_IPV4_CKSUM | \
105 				     DEV_TX_OFFLOAD_TCP_CKSUM | \
106 				     DEV_TX_OFFLOAD_UDP_CKSUM | \
107 				     DEV_TX_OFFLOAD_TCP_TSO | \
108 				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
109 				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
110 				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
111 				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
112 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
113 				     DEV_TX_OFFLOAD_QINQ_INSERT | \
114 				     DEV_TX_OFFLOAD_MULTI_SEGS)
115 
116 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
117 				     DEV_RX_OFFLOAD_VLAN_STRIP | \
118 				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
119 				     DEV_RX_OFFLOAD_UDP_CKSUM | \
120 				     DEV_RX_OFFLOAD_TCP_CKSUM | \
121 				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
122 				     DEV_RX_OFFLOAD_JUMBO_FRAME | \
123 				     DEV_RX_OFFLOAD_KEEP_CRC | \
124 				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
125 				     DEV_RX_OFFLOAD_TCP_LRO | \
126 				     DEV_RX_OFFLOAD_SCATTER | \
127 				     DEV_RX_OFFLOAD_RSS_HASH)
128 
129 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
130 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
131 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
132 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
133 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
134 static void bnxt_cancel_fw_health_check(struct bnxt *bp);
135 static int bnxt_restore_vlan_filters(struct bnxt *bp);
136 static void bnxt_dev_recover(void *arg);
137 
138 int is_bnxt_in_error(struct bnxt *bp)
139 {
140 	if (bp->flags & BNXT_FLAG_FATAL_ERROR)
141 		return -EIO;
142 	if (bp->flags & BNXT_FLAG_FW_RESET)
143 		return -EBUSY;
144 
145 	return 0;
146 }
147 
148 /***********************/
149 
150 /*
151  * High level utility functions
152  */
153 
154 uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
155 {
156 	if (!BNXT_CHIP_THOR(bp))
157 		return 1;
158 
159 	return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
160 				  BNXT_RSS_ENTRIES_PER_CTX_THOR) /
161 				    BNXT_RSS_ENTRIES_PER_CTX_THOR;
162 }
163 
164 static uint16_t  bnxt_rss_hash_tbl_size(const struct bnxt *bp)
165 {
166 	if (!BNXT_CHIP_THOR(bp))
167 		return HW_HASH_INDEX_SIZE;
168 
169 	return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
170 }
171 
172 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
173 {
174 	bnxt_free_filter_mem(bp);
175 	bnxt_free_vnic_attributes(bp);
176 	bnxt_free_vnic_mem(bp);
177 
178 	/* tx/rx rings are configured as part of *_queue_setup callbacks.
179 	 * If the number of rings change across fw update,
180 	 * we don't have much choice except to warn the user.
181 	 */
182 	if (!reconfig) {
183 		bnxt_free_stats(bp);
184 		bnxt_free_tx_rings(bp);
185 		bnxt_free_rx_rings(bp);
186 	}
187 	bnxt_free_async_cp_ring(bp);
188 	bnxt_free_rxtx_nq_ring(bp);
189 
190 	rte_free(bp->grp_info);
191 	bp->grp_info = NULL;
192 }
193 
194 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
195 {
196 	int rc;
197 
198 	rc = bnxt_alloc_ring_grps(bp);
199 	if (rc)
200 		goto alloc_mem_err;
201 
202 	rc = bnxt_alloc_async_ring_struct(bp);
203 	if (rc)
204 		goto alloc_mem_err;
205 
206 	rc = bnxt_alloc_vnic_mem(bp);
207 	if (rc)
208 		goto alloc_mem_err;
209 
210 	rc = bnxt_alloc_vnic_attributes(bp);
211 	if (rc)
212 		goto alloc_mem_err;
213 
214 	rc = bnxt_alloc_filter_mem(bp);
215 	if (rc)
216 		goto alloc_mem_err;
217 
218 	rc = bnxt_alloc_async_cp_ring(bp);
219 	if (rc)
220 		goto alloc_mem_err;
221 
222 	rc = bnxt_alloc_rxtx_nq_ring(bp);
223 	if (rc)
224 		goto alloc_mem_err;
225 
226 	return 0;
227 
228 alloc_mem_err:
229 	bnxt_free_mem(bp, reconfig);
230 	return rc;
231 }
232 
233 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
234 {
235 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
236 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
237 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
238 	struct bnxt_rx_queue *rxq;
239 	unsigned int j;
240 	int rc;
241 
242 	rc = bnxt_vnic_grp_alloc(bp, vnic);
243 	if (rc)
244 		goto err_out;
245 
246 	PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
247 		    vnic_id, vnic, vnic->fw_grp_ids);
248 
249 	rc = bnxt_hwrm_vnic_alloc(bp, vnic);
250 	if (rc)
251 		goto err_out;
252 
253 	/* Alloc RSS context only if RSS mode is enabled */
254 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
255 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
256 
257 		rc = 0;
258 		for (j = 0; j < nr_ctxs; j++) {
259 			rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
260 			if (rc)
261 				break;
262 		}
263 		if (rc) {
264 			PMD_DRV_LOG(ERR,
265 				    "HWRM vnic %d ctx %d alloc failure rc: %x\n",
266 				    vnic_id, j, rc);
267 			goto err_out;
268 		}
269 		vnic->num_lb_ctxts = nr_ctxs;
270 	}
271 
272 	/*
273 	 * Firmware sets pf pair in default vnic cfg. If the VLAN strip
274 	 * setting is not available at this time, it will not be
275 	 * configured correctly in the CFA.
276 	 */
277 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
278 		vnic->vlan_strip = true;
279 	else
280 		vnic->vlan_strip = false;
281 
282 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
283 	if (rc)
284 		goto err_out;
285 
286 	rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
287 	if (rc)
288 		goto err_out;
289 
290 	for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
291 		rxq = bp->eth_dev->data->rx_queues[j];
292 
293 		PMD_DRV_LOG(DEBUG,
294 			    "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
295 			    j, rxq->vnic, rxq->vnic->fw_grp_ids);
296 
297 		if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
298 			rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
299 	}
300 
301 	rc = bnxt_vnic_rss_configure(bp, vnic);
302 	if (rc)
303 		goto err_out;
304 
305 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
306 
307 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
308 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
309 	else
310 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
311 
312 	return 0;
313 err_out:
314 	PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
315 		    vnic_id, rc);
316 	return rc;
317 }
318 
319 static int bnxt_init_chip(struct bnxt *bp)
320 {
321 	struct rte_eth_link new;
322 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
323 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
324 	uint32_t intr_vector = 0;
325 	uint32_t queue_id, base = BNXT_MISC_VEC_ID;
326 	uint32_t vec = BNXT_MISC_VEC_ID;
327 	unsigned int i, j;
328 	int rc;
329 
330 	if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
331 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
332 			DEV_RX_OFFLOAD_JUMBO_FRAME;
333 		bp->flags |= BNXT_FLAG_JUMBO;
334 	} else {
335 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
336 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
337 		bp->flags &= ~BNXT_FLAG_JUMBO;
338 	}
339 
340 	/* THOR does not support ring groups.
341 	 * But we will use the array to save RSS context IDs.
342 	 */
343 	if (BNXT_CHIP_THOR(bp))
344 		bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
345 
346 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
347 	if (rc) {
348 		PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
349 		goto err_out;
350 	}
351 
352 	rc = bnxt_alloc_hwrm_rings(bp);
353 	if (rc) {
354 		PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
355 		goto err_out;
356 	}
357 
358 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
359 	if (rc) {
360 		PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
361 		goto err_out;
362 	}
363 
364 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
365 		goto skip_cosq_cfg;
366 
367 	for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
368 		if (bp->rx_cos_queue[i].id != 0xff) {
369 			struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
370 
371 			if (!vnic) {
372 				PMD_DRV_LOG(ERR,
373 					    "Num pools more than FW profile\n");
374 				rc = -EINVAL;
375 				goto err_out;
376 			}
377 			vnic->cos_queue_id = bp->rx_cos_queue[i].id;
378 			bp->rx_cosq_cnt++;
379 		}
380 	}
381 
382 skip_cosq_cfg:
383 	rc = bnxt_mq_rx_configure(bp);
384 	if (rc) {
385 		PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
386 		goto err_out;
387 	}
388 
389 	/* VNIC configuration */
390 	for (i = 0; i < bp->nr_vnics; i++) {
391 		rc = bnxt_setup_one_vnic(bp, i);
392 		if (rc)
393 			goto err_out;
394 	}
395 
396 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
397 	if (rc) {
398 		PMD_DRV_LOG(ERR,
399 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
400 		goto err_out;
401 	}
402 
403 	/* check and configure queue intr-vector mapping */
404 	if ((rte_intr_cap_multiple(intr_handle) ||
405 	     !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
406 	    bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
407 		intr_vector = bp->eth_dev->data->nb_rx_queues;
408 		PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
409 		if (intr_vector > bp->rx_cp_nr_rings) {
410 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
411 					bp->rx_cp_nr_rings);
412 			return -ENOTSUP;
413 		}
414 		rc = rte_intr_efd_enable(intr_handle, intr_vector);
415 		if (rc)
416 			return rc;
417 	}
418 
419 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
420 		intr_handle->intr_vec =
421 			rte_zmalloc("intr_vec",
422 				    bp->eth_dev->data->nb_rx_queues *
423 				    sizeof(int), 0);
424 		if (intr_handle->intr_vec == NULL) {
425 			PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
426 				" intr_vec", bp->eth_dev->data->nb_rx_queues);
427 			rc = -ENOMEM;
428 			goto err_disable;
429 		}
430 		PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
431 			"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
432 			 intr_handle->intr_vec, intr_handle->nb_efd,
433 			intr_handle->max_intr);
434 		for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
435 		     queue_id++) {
436 			intr_handle->intr_vec[queue_id] =
437 							vec + BNXT_RX_VEC_START;
438 			if (vec < base + intr_handle->nb_efd - 1)
439 				vec++;
440 		}
441 	}
442 
443 	/* enable uio/vfio intr/eventfd mapping */
444 	rc = rte_intr_enable(intr_handle);
445 #ifndef RTE_EXEC_ENV_FREEBSD
446 	/* In FreeBSD OS, nic_uio driver does not support interrupts */
447 	if (rc)
448 		goto err_free;
449 #endif
450 
451 	rc = bnxt_get_hwrm_link_config(bp, &new);
452 	if (rc) {
453 		PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
454 		goto err_free;
455 	}
456 
457 	if (!bp->link_info.link_up) {
458 		rc = bnxt_set_hwrm_link_config(bp, true);
459 		if (rc) {
460 			PMD_DRV_LOG(ERR,
461 				"HWRM link config failure rc: %x\n", rc);
462 			goto err_free;
463 		}
464 	}
465 	bnxt_print_link_info(bp->eth_dev);
466 
467 	return 0;
468 
469 err_free:
470 	rte_free(intr_handle->intr_vec);
471 err_disable:
472 	rte_intr_efd_disable(intr_handle);
473 err_out:
474 	/* Some of the error status returned by FW may not be from errno.h */
475 	if (rc > 0)
476 		rc = -EIO;
477 
478 	return rc;
479 }
480 
481 static int bnxt_shutdown_nic(struct bnxt *bp)
482 {
483 	bnxt_free_all_hwrm_resources(bp);
484 	bnxt_free_all_filters(bp);
485 	bnxt_free_all_vnics(bp);
486 	return 0;
487 }
488 
489 /*
490  * Device configuration and status function
491  */
492 
493 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
494 				struct rte_eth_dev_info *dev_info)
495 {
496 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
497 	struct bnxt *bp = eth_dev->data->dev_private;
498 	uint16_t max_vnics, i, j, vpool, vrxq;
499 	unsigned int max_rx_rings;
500 	int rc;
501 
502 	rc = is_bnxt_in_error(bp);
503 	if (rc)
504 		return rc;
505 
506 	/* MAC Specifics */
507 	dev_info->max_mac_addrs = bp->max_l2_ctx;
508 	dev_info->max_hash_mac_addrs = 0;
509 
510 	/* PF/VF specifics */
511 	if (BNXT_PF(bp))
512 		dev_info->max_vfs = pdev->max_vfs;
513 
514 	max_rx_rings = BNXT_MAX_RINGS(bp);
515 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
516 	dev_info->max_rx_queues = max_rx_rings;
517 	dev_info->max_tx_queues = max_rx_rings;
518 	dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
519 	dev_info->hash_key_size = 40;
520 	max_vnics = bp->max_vnics;
521 
522 	/* MTU specifics */
523 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
524 	dev_info->max_mtu = BNXT_MAX_MTU;
525 
526 	/* Fast path specifics */
527 	dev_info->min_rx_bufsize = 1;
528 	dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
529 
530 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
531 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
532 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
533 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
534 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
535 
536 	/* *INDENT-OFF* */
537 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
538 		.rx_thresh = {
539 			.pthresh = 8,
540 			.hthresh = 8,
541 			.wthresh = 0,
542 		},
543 		.rx_free_thresh = 32,
544 		/* If no descriptors available, pkts are dropped by default */
545 		.rx_drop_en = 1,
546 	};
547 
548 	dev_info->default_txconf = (struct rte_eth_txconf) {
549 		.tx_thresh = {
550 			.pthresh = 32,
551 			.hthresh = 0,
552 			.wthresh = 0,
553 		},
554 		.tx_free_thresh = 32,
555 		.tx_rs_thresh = 32,
556 	};
557 	eth_dev->data->dev_conf.intr_conf.lsc = 1;
558 
559 	eth_dev->data->dev_conf.intr_conf.rxq = 1;
560 	dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
561 	dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
562 	dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
563 	dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
564 
565 	/* *INDENT-ON* */
566 
567 	/*
568 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
569 	 *       need further investigation.
570 	 */
571 
572 	/* VMDq resources */
573 	vpool = 64; /* ETH_64_POOLS */
574 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
575 	for (i = 0; i < 4; vpool >>= 1, i++) {
576 		if (max_vnics > vpool) {
577 			for (j = 0; j < 5; vrxq >>= 1, j++) {
578 				if (dev_info->max_rx_queues > vrxq) {
579 					if (vpool > vrxq)
580 						vpool = vrxq;
581 					goto found;
582 				}
583 			}
584 			/* Not enough resources to support VMDq */
585 			break;
586 		}
587 	}
588 	/* Not enough resources to support VMDq */
589 	vpool = 0;
590 	vrxq = 0;
591 found:
592 	dev_info->max_vmdq_pools = vpool;
593 	dev_info->vmdq_queue_num = vrxq;
594 
595 	dev_info->vmdq_pool_base = 0;
596 	dev_info->vmdq_queue_base = 0;
597 
598 	return 0;
599 }
600 
601 /* Configure the device based on the configuration provided */
602 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
603 {
604 	struct bnxt *bp = eth_dev->data->dev_private;
605 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
606 	int rc;
607 
608 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
609 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
610 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
611 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
612 
613 	rc = is_bnxt_in_error(bp);
614 	if (rc)
615 		return rc;
616 
617 	if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
618 		rc = bnxt_hwrm_check_vf_rings(bp);
619 		if (rc) {
620 			PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
621 			return -ENOSPC;
622 		}
623 
624 		/* If a resource has already been allocated - in this case
625 		 * it is the async completion ring, free it. Reallocate it after
626 		 * resource reservation. This will ensure the resource counts
627 		 * are calculated correctly.
628 		 */
629 
630 		pthread_mutex_lock(&bp->def_cp_lock);
631 
632 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
633 			bnxt_disable_int(bp);
634 			bnxt_free_cp_ring(bp, bp->async_cp_ring);
635 		}
636 
637 		rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
638 		if (rc) {
639 			PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
640 			pthread_mutex_unlock(&bp->def_cp_lock);
641 			return -ENOSPC;
642 		}
643 
644 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
645 			rc = bnxt_alloc_async_cp_ring(bp);
646 			if (rc) {
647 				pthread_mutex_unlock(&bp->def_cp_lock);
648 				return rc;
649 			}
650 			bnxt_enable_int(bp);
651 		}
652 
653 		pthread_mutex_unlock(&bp->def_cp_lock);
654 	} else {
655 		/* legacy driver needs to get updated values */
656 		rc = bnxt_hwrm_func_qcaps(bp);
657 		if (rc) {
658 			PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
659 			return rc;
660 		}
661 	}
662 
663 	/* Inherit new configurations */
664 	if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
665 	    eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
666 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
667 		+ BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
668 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
669 	    bp->max_stat_ctx)
670 		goto resource_error;
671 
672 	if (BNXT_HAS_RING_GRPS(bp) &&
673 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
674 		goto resource_error;
675 
676 	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
677 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
678 		goto resource_error;
679 
680 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
681 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
682 
683 	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
684 		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
685 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
686 
687 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
688 		eth_dev->data->mtu =
689 			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
690 			RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
691 			BNXT_NUM_VLANS;
692 		bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
693 	}
694 	return 0;
695 
696 resource_error:
697 	PMD_DRV_LOG(ERR,
698 		    "Insufficient resources to support requested config\n");
699 	PMD_DRV_LOG(ERR,
700 		    "Num Queues Requested: Tx %d, Rx %d\n",
701 		    eth_dev->data->nb_tx_queues,
702 		    eth_dev->data->nb_rx_queues);
703 	PMD_DRV_LOG(ERR,
704 		    "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
705 		    bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
706 		    bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
707 	return -ENOSPC;
708 }
709 
710 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
711 {
712 	struct rte_eth_link *link = &eth_dev->data->dev_link;
713 
714 	if (link->link_status)
715 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
716 			eth_dev->data->port_id,
717 			(uint32_t)link->link_speed,
718 			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
719 			("full-duplex") : ("half-duplex\n"));
720 	else
721 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
722 			eth_dev->data->port_id);
723 }
724 
725 /*
726  * Determine whether the current configuration requires support for scattered
727  * receive; return 1 if scattered receive is required and 0 if not.
728  */
729 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
730 {
731 	uint16_t buf_size;
732 	int i;
733 
734 	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
735 		return 1;
736 
737 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
738 		struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
739 
740 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
741 				      RTE_PKTMBUF_HEADROOM);
742 		if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
743 			return 1;
744 	}
745 	return 0;
746 }
747 
748 static eth_rx_burst_t
749 bnxt_receive_function(__rte_unused struct rte_eth_dev *eth_dev)
750 {
751 #ifdef RTE_ARCH_X86
752 #ifndef RTE_LIBRTE_IEEE1588
753 	/*
754 	 * Vector mode receive can be enabled only if scatter rx is not
755 	 * in use and rx offloads are limited to VLAN stripping and
756 	 * CRC stripping.
757 	 */
758 	if (!eth_dev->data->scattered_rx &&
759 	    !(eth_dev->data->dev_conf.rxmode.offloads &
760 	      ~(DEV_RX_OFFLOAD_VLAN_STRIP |
761 		DEV_RX_OFFLOAD_KEEP_CRC |
762 		DEV_RX_OFFLOAD_JUMBO_FRAME |
763 		DEV_RX_OFFLOAD_IPV4_CKSUM |
764 		DEV_RX_OFFLOAD_UDP_CKSUM |
765 		DEV_RX_OFFLOAD_TCP_CKSUM |
766 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
767 		DEV_RX_OFFLOAD_RSS_HASH |
768 		DEV_RX_OFFLOAD_VLAN_FILTER))) {
769 		PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
770 			    eth_dev->data->port_id);
771 		return bnxt_recv_pkts_vec;
772 	}
773 	PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
774 		    eth_dev->data->port_id);
775 	PMD_DRV_LOG(INFO,
776 		    "Port %d scatter: %d rx offload: %" PRIX64 "\n",
777 		    eth_dev->data->port_id,
778 		    eth_dev->data->scattered_rx,
779 		    eth_dev->data->dev_conf.rxmode.offloads);
780 #endif
781 #endif
782 	return bnxt_recv_pkts;
783 }
784 
785 static eth_tx_burst_t
786 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
787 {
788 #ifdef RTE_ARCH_X86
789 #ifndef RTE_LIBRTE_IEEE1588
790 	/*
791 	 * Vector mode transmit can be enabled only if not using scatter rx
792 	 * or tx offloads.
793 	 */
794 	if (!eth_dev->data->scattered_rx &&
795 	    !eth_dev->data->dev_conf.txmode.offloads) {
796 		PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
797 			    eth_dev->data->port_id);
798 		return bnxt_xmit_pkts_vec;
799 	}
800 	PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
801 		    eth_dev->data->port_id);
802 	PMD_DRV_LOG(INFO,
803 		    "Port %d scatter: %d tx offload: %" PRIX64 "\n",
804 		    eth_dev->data->port_id,
805 		    eth_dev->data->scattered_rx,
806 		    eth_dev->data->dev_conf.txmode.offloads);
807 #endif
808 #endif
809 	return bnxt_xmit_pkts;
810 }
811 
812 static int bnxt_handle_if_change_status(struct bnxt *bp)
813 {
814 	int rc;
815 
816 	/* Since fw has undergone a reset and lost all contexts,
817 	 * set fatal flag to not issue hwrm during cleanup
818 	 */
819 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
820 	bnxt_uninit_resources(bp, true);
821 
822 	/* clear fatal flag so that re-init happens */
823 	bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
824 	rc = bnxt_init_resources(bp, true);
825 
826 	bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
827 
828 	return rc;
829 }
830 
831 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
832 {
833 	struct bnxt *bp = eth_dev->data->dev_private;
834 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
835 	int vlan_mask = 0;
836 	int rc;
837 
838 	if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
839 		PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
840 		return -EINVAL;
841 	}
842 
843 	if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
844 		PMD_DRV_LOG(ERR,
845 			"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
846 			bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
847 	}
848 
849 	rc = bnxt_hwrm_if_change(bp, 1);
850 	if (!rc) {
851 		if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
852 			rc = bnxt_handle_if_change_status(bp);
853 			if (rc)
854 				return rc;
855 		}
856 	}
857 	bnxt_enable_int(bp);
858 
859 	rc = bnxt_init_chip(bp);
860 	if (rc)
861 		goto error;
862 
863 	eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
864 	eth_dev->data->dev_started = 1;
865 
866 	bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
867 
868 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
869 		vlan_mask |= ETH_VLAN_FILTER_MASK;
870 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
871 		vlan_mask |= ETH_VLAN_STRIP_MASK;
872 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
873 	if (rc)
874 		goto error;
875 
876 	eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
877 	eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
878 
879 	pthread_mutex_lock(&bp->def_cp_lock);
880 	bnxt_schedule_fw_health_check(bp);
881 	pthread_mutex_unlock(&bp->def_cp_lock);
882 	return 0;
883 
884 error:
885 	bnxt_hwrm_if_change(bp, 0);
886 	bnxt_shutdown_nic(bp);
887 	bnxt_free_tx_mbufs(bp);
888 	bnxt_free_rx_mbufs(bp);
889 	eth_dev->data->dev_started = 0;
890 	return rc;
891 }
892 
893 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
894 {
895 	struct bnxt *bp = eth_dev->data->dev_private;
896 	int rc = 0;
897 
898 	if (!bp->link_info.link_up)
899 		rc = bnxt_set_hwrm_link_config(bp, true);
900 	if (!rc)
901 		eth_dev->data->dev_link.link_status = 1;
902 
903 	bnxt_print_link_info(eth_dev);
904 	return rc;
905 }
906 
907 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
908 {
909 	struct bnxt *bp = eth_dev->data->dev_private;
910 
911 	eth_dev->data->dev_link.link_status = 0;
912 	bnxt_set_hwrm_link_config(bp, false);
913 	bp->link_info.link_up = 0;
914 
915 	return 0;
916 }
917 
918 /* Unload the driver, release resources */
919 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
920 {
921 	struct bnxt *bp = eth_dev->data->dev_private;
922 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
923 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
924 
925 	eth_dev->data->dev_started = 0;
926 	/* Prevent crashes when queues are still in use */
927 	eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
928 	eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
929 
930 	bnxt_disable_int(bp);
931 
932 	/* disable uio/vfio intr/eventfd mapping */
933 	rte_intr_disable(intr_handle);
934 
935 	bnxt_cancel_fw_health_check(bp);
936 
937 	bnxt_dev_set_link_down_op(eth_dev);
938 
939 	/* Wait for link to be reset and the async notification to process.
940 	 * During reset recovery, there is no need to wait and
941 	 * VF/NPAR functions do not have privilege to change PHY config.
942 	 */
943 	if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
944 		bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
945 
946 	/* Clean queue intr-vector mapping */
947 	rte_intr_efd_disable(intr_handle);
948 	if (intr_handle->intr_vec != NULL) {
949 		rte_free(intr_handle->intr_vec);
950 		intr_handle->intr_vec = NULL;
951 	}
952 
953 	bnxt_hwrm_port_clr_stats(bp);
954 	bnxt_free_tx_mbufs(bp);
955 	bnxt_free_rx_mbufs(bp);
956 	/* Process any remaining notifications in default completion queue */
957 	bnxt_int_handler(eth_dev);
958 	bnxt_shutdown_nic(bp);
959 	bnxt_hwrm_if_change(bp, 0);
960 	bp->rx_cosq_cnt = 0;
961 }
962 
963 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
964 {
965 	struct bnxt *bp = eth_dev->data->dev_private;
966 
967 	/* cancel the recovery handler before remove dev */
968 	rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
969 	rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
970 
971 	if (eth_dev->data->dev_started)
972 		bnxt_dev_stop_op(eth_dev);
973 
974 	if (eth_dev->data->mac_addrs != NULL) {
975 		rte_free(eth_dev->data->mac_addrs);
976 		eth_dev->data->mac_addrs = NULL;
977 	}
978 	if (bp->grp_info != NULL) {
979 		rte_free(bp->grp_info);
980 		bp->grp_info = NULL;
981 	}
982 
983 	bnxt_dev_uninit(eth_dev);
984 }
985 
986 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
987 				    uint32_t index)
988 {
989 	struct bnxt *bp = eth_dev->data->dev_private;
990 	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
991 	struct bnxt_vnic_info *vnic;
992 	struct bnxt_filter_info *filter, *temp_filter;
993 	uint32_t i;
994 
995 	if (is_bnxt_in_error(bp))
996 		return;
997 
998 	/*
999 	 * Loop through all VNICs from the specified filter flow pools to
1000 	 * remove the corresponding MAC addr filter
1001 	 */
1002 	for (i = 0; i < bp->nr_vnics; i++) {
1003 		if (!(pool_mask & (1ULL << i)))
1004 			continue;
1005 
1006 		vnic = &bp->vnic_info[i];
1007 		filter = STAILQ_FIRST(&vnic->filter);
1008 		while (filter) {
1009 			temp_filter = STAILQ_NEXT(filter, next);
1010 			if (filter->mac_index == index) {
1011 				STAILQ_REMOVE(&vnic->filter, filter,
1012 						bnxt_filter_info, next);
1013 				bnxt_hwrm_clear_l2_filter(bp, filter);
1014 				filter->mac_index = INVALID_MAC_INDEX;
1015 				memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN);
1016 				bnxt_free_filter(bp, filter);
1017 			}
1018 			filter = temp_filter;
1019 		}
1020 	}
1021 }
1022 
1023 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1024 			       struct rte_ether_addr *mac_addr, uint32_t index,
1025 			       uint32_t pool)
1026 {
1027 	struct bnxt_filter_info *filter;
1028 	int rc = 0;
1029 
1030 	/* Attach requested MAC address to the new l2_filter */
1031 	STAILQ_FOREACH(filter, &vnic->filter, next) {
1032 		if (filter->mac_index == index) {
1033 			PMD_DRV_LOG(DEBUG,
1034 				    "MAC addr already existed for pool %d\n",
1035 				    pool);
1036 			return 0;
1037 		}
1038 	}
1039 
1040 	filter = bnxt_alloc_filter(bp);
1041 	if (!filter) {
1042 		PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
1043 		return -ENODEV;
1044 	}
1045 
1046 	/* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
1047 	 * if the MAC that's been programmed now is a different one, then,
1048 	 * copy that addr to filter->l2_addr
1049 	 */
1050 	if (mac_addr)
1051 		memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1052 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1053 
1054 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1055 	if (!rc) {
1056 		filter->mac_index = index;
1057 		if (filter->mac_index == 0)
1058 			STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1059 		else
1060 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1061 	} else {
1062 		memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN);
1063 		bnxt_free_filter(bp, filter);
1064 	}
1065 
1066 	return rc;
1067 }
1068 
1069 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1070 				struct rte_ether_addr *mac_addr,
1071 				uint32_t index, uint32_t pool)
1072 {
1073 	struct bnxt *bp = eth_dev->data->dev_private;
1074 	struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
1075 	int rc = 0;
1076 
1077 	rc = is_bnxt_in_error(bp);
1078 	if (rc)
1079 		return rc;
1080 
1081 	if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
1082 		PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
1083 		return -ENOTSUP;
1084 	}
1085 
1086 	if (!vnic) {
1087 		PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
1088 		return -EINVAL;
1089 	}
1090 
1091 	/* Filter settings will get applied when port is started */
1092 	if (!eth_dev->data->dev_started)
1093 		return 0;
1094 
1095 	rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
1096 
1097 	return rc;
1098 }
1099 
1100 int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
1101 		     bool exp_link_status)
1102 {
1103 	int rc = 0;
1104 	struct bnxt *bp = eth_dev->data->dev_private;
1105 	struct rte_eth_link new;
1106 	int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
1107 		  BNXT_LINK_DOWN_WAIT_CNT;
1108 
1109 	rc = is_bnxt_in_error(bp);
1110 	if (rc)
1111 		return rc;
1112 
1113 	memset(&new, 0, sizeof(new));
1114 	do {
1115 		/* Retrieve link info from hardware */
1116 		rc = bnxt_get_hwrm_link_config(bp, &new);
1117 		if (rc) {
1118 			new.link_speed = ETH_LINK_SPEED_100M;
1119 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
1120 			PMD_DRV_LOG(ERR,
1121 				"Failed to retrieve link rc = 0x%x!\n", rc);
1122 			goto out;
1123 		}
1124 
1125 		if (!wait_to_complete || new.link_status == exp_link_status)
1126 			break;
1127 
1128 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1129 	} while (cnt--);
1130 
1131 out:
1132 	/* Timed out or success */
1133 	if (new.link_status != eth_dev->data->dev_link.link_status ||
1134 	new.link_speed != eth_dev->data->dev_link.link_speed) {
1135 		rte_eth_linkstatus_set(eth_dev, &new);
1136 
1137 		_rte_eth_dev_callback_process(eth_dev,
1138 					      RTE_ETH_EVENT_INTR_LSC,
1139 					      NULL);
1140 
1141 		bnxt_print_link_info(eth_dev);
1142 	}
1143 
1144 	return rc;
1145 }
1146 
1147 static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
1148 			       int wait_to_complete)
1149 {
1150 	return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
1151 }
1152 
1153 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1154 {
1155 	struct bnxt *bp = eth_dev->data->dev_private;
1156 	struct bnxt_vnic_info *vnic;
1157 	uint32_t old_flags;
1158 	int rc;
1159 
1160 	rc = is_bnxt_in_error(bp);
1161 	if (rc)
1162 		return rc;
1163 
1164 	/* Filter settings will get applied when port is started */
1165 	if (!eth_dev->data->dev_started)
1166 		return 0;
1167 
1168 	if (bp->vnic_info == NULL)
1169 		return 0;
1170 
1171 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1172 
1173 	old_flags = vnic->flags;
1174 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
1175 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1176 	if (rc != 0)
1177 		vnic->flags = old_flags;
1178 
1179 	return rc;
1180 }
1181 
1182 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1183 {
1184 	struct bnxt *bp = eth_dev->data->dev_private;
1185 	struct bnxt_vnic_info *vnic;
1186 	uint32_t old_flags;
1187 	int rc;
1188 
1189 	rc = is_bnxt_in_error(bp);
1190 	if (rc)
1191 		return rc;
1192 
1193 	/* Filter settings will get applied when port is started */
1194 	if (!eth_dev->data->dev_started)
1195 		return 0;
1196 
1197 	if (bp->vnic_info == NULL)
1198 		return 0;
1199 
1200 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1201 
1202 	old_flags = vnic->flags;
1203 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
1204 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1205 	if (rc != 0)
1206 		vnic->flags = old_flags;
1207 
1208 	return rc;
1209 }
1210 
1211 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1212 {
1213 	struct bnxt *bp = eth_dev->data->dev_private;
1214 	struct bnxt_vnic_info *vnic;
1215 	uint32_t old_flags;
1216 	int rc;
1217 
1218 	rc = is_bnxt_in_error(bp);
1219 	if (rc)
1220 		return rc;
1221 
1222 	/* Filter settings will get applied when port is started */
1223 	if (!eth_dev->data->dev_started)
1224 		return 0;
1225 
1226 	if (bp->vnic_info == NULL)
1227 		return 0;
1228 
1229 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1230 
1231 	old_flags = vnic->flags;
1232 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1233 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1234 	if (rc != 0)
1235 		vnic->flags = old_flags;
1236 
1237 	return rc;
1238 }
1239 
1240 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1241 {
1242 	struct bnxt *bp = eth_dev->data->dev_private;
1243 	struct bnxt_vnic_info *vnic;
1244 	uint32_t old_flags;
1245 	int rc;
1246 
1247 	rc = is_bnxt_in_error(bp);
1248 	if (rc)
1249 		return rc;
1250 
1251 	/* Filter settings will get applied when port is started */
1252 	if (!eth_dev->data->dev_started)
1253 		return 0;
1254 
1255 	if (bp->vnic_info == NULL)
1256 		return 0;
1257 
1258 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1259 
1260 	old_flags = vnic->flags;
1261 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1262 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1263 	if (rc != 0)
1264 		vnic->flags = old_flags;
1265 
1266 	return rc;
1267 }
1268 
1269 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
1270 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
1271 {
1272 	if (qid >= bp->rx_nr_rings)
1273 		return NULL;
1274 
1275 	return bp->eth_dev->data->rx_queues[qid];
1276 }
1277 
1278 /* Return rxq corresponding to a given rss table ring/group ID. */
1279 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
1280 {
1281 	struct bnxt_rx_queue *rxq;
1282 	unsigned int i;
1283 
1284 	if (!BNXT_HAS_RING_GRPS(bp)) {
1285 		for (i = 0; i < bp->rx_nr_rings; i++) {
1286 			rxq = bp->eth_dev->data->rx_queues[i];
1287 			if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
1288 				return rxq->index;
1289 		}
1290 	} else {
1291 		for (i = 0; i < bp->rx_nr_rings; i++) {
1292 			if (bp->grp_info[i].fw_grp_id == fwr)
1293 				return i;
1294 		}
1295 	}
1296 
1297 	return INVALID_HW_RING_ID;
1298 }
1299 
1300 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1301 			    struct rte_eth_rss_reta_entry64 *reta_conf,
1302 			    uint16_t reta_size)
1303 {
1304 	struct bnxt *bp = eth_dev->data->dev_private;
1305 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1306 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1307 	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1308 	uint16_t idx, sft;
1309 	int i, rc;
1310 
1311 	rc = is_bnxt_in_error(bp);
1312 	if (rc)
1313 		return rc;
1314 
1315 	if (!vnic->rss_table)
1316 		return -EINVAL;
1317 
1318 	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1319 		return -EINVAL;
1320 
1321 	if (reta_size != tbl_size) {
1322 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1323 			"(%d) must equal the size supported by the hardware "
1324 			"(%d)\n", reta_size, tbl_size);
1325 		return -EINVAL;
1326 	}
1327 
1328 	for (i = 0; i < reta_size; i++) {
1329 		struct bnxt_rx_queue *rxq;
1330 
1331 		idx = i / RTE_RETA_GROUP_SIZE;
1332 		sft = i % RTE_RETA_GROUP_SIZE;
1333 
1334 		if (!(reta_conf[idx].mask & (1ULL << sft)))
1335 			continue;
1336 
1337 		rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
1338 		if (!rxq) {
1339 			PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
1340 			return -EINVAL;
1341 		}
1342 
1343 		if (BNXT_CHIP_THOR(bp)) {
1344 			vnic->rss_table[i * 2] =
1345 				rxq->rx_ring->rx_ring_struct->fw_ring_id;
1346 			vnic->rss_table[i * 2 + 1] =
1347 				rxq->cp_ring->cp_ring_struct->fw_ring_id;
1348 		} else {
1349 			vnic->rss_table[i] =
1350 			    vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
1351 		}
1352 	}
1353 
1354 	bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1355 	return 0;
1356 }
1357 
1358 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1359 			      struct rte_eth_rss_reta_entry64 *reta_conf,
1360 			      uint16_t reta_size)
1361 {
1362 	struct bnxt *bp = eth_dev->data->dev_private;
1363 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1364 	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1365 	uint16_t idx, sft, i;
1366 	int rc;
1367 
1368 	rc = is_bnxt_in_error(bp);
1369 	if (rc)
1370 		return rc;
1371 
1372 	/* Retrieve from the default VNIC */
1373 	if (!vnic)
1374 		return -EINVAL;
1375 	if (!vnic->rss_table)
1376 		return -EINVAL;
1377 
1378 	if (reta_size != tbl_size) {
1379 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1380 			"(%d) must equal the size supported by the hardware "
1381 			"(%d)\n", reta_size, tbl_size);
1382 		return -EINVAL;
1383 	}
1384 
1385 	for (idx = 0, i = 0; i < reta_size; i++) {
1386 		idx = i / RTE_RETA_GROUP_SIZE;
1387 		sft = i % RTE_RETA_GROUP_SIZE;
1388 
1389 		if (reta_conf[idx].mask & (1ULL << sft)) {
1390 			uint16_t qid;
1391 
1392 			if (BNXT_CHIP_THOR(bp))
1393 				qid = bnxt_rss_to_qid(bp,
1394 						      vnic->rss_table[i * 2]);
1395 			else
1396 				qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1397 
1398 			if (qid == INVALID_HW_RING_ID) {
1399 				PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1400 				return -EINVAL;
1401 			}
1402 			reta_conf[idx].reta[sft] = qid;
1403 		}
1404 	}
1405 
1406 	return 0;
1407 }
1408 
1409 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1410 				   struct rte_eth_rss_conf *rss_conf)
1411 {
1412 	struct bnxt *bp = eth_dev->data->dev_private;
1413 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1414 	struct bnxt_vnic_info *vnic;
1415 	int rc;
1416 
1417 	rc = is_bnxt_in_error(bp);
1418 	if (rc)
1419 		return rc;
1420 
1421 	/*
1422 	 * If RSS enablement were different than dev_configure,
1423 	 * then return -EINVAL
1424 	 */
1425 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1426 		if (!rss_conf->rss_hf)
1427 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
1428 	} else {
1429 		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1430 			return -EINVAL;
1431 	}
1432 
1433 	bp->flags |= BNXT_FLAG_UPDATE_HASH;
1434 	memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
1435 
1436 	/* Update the default RSS VNIC(s) */
1437 	vnic = &bp->vnic_info[0];
1438 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1439 
1440 	/*
1441 	 * If hashkey is not specified, use the previously configured
1442 	 * hashkey
1443 	 */
1444 	if (!rss_conf->rss_key)
1445 		goto rss_config;
1446 
1447 	if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
1448 		PMD_DRV_LOG(ERR,
1449 			    "Invalid hashkey length, should be 16 bytes\n");
1450 		return -EINVAL;
1451 	}
1452 	memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
1453 
1454 rss_config:
1455 	bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1456 	return 0;
1457 }
1458 
1459 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1460 				     struct rte_eth_rss_conf *rss_conf)
1461 {
1462 	struct bnxt *bp = eth_dev->data->dev_private;
1463 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1464 	int len, rc;
1465 	uint32_t hash_types;
1466 
1467 	rc = is_bnxt_in_error(bp);
1468 	if (rc)
1469 		return rc;
1470 
1471 	/* RSS configuration is the same for all VNICs */
1472 	if (vnic && vnic->rss_hash_key) {
1473 		if (rss_conf->rss_key) {
1474 			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1475 			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1476 			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1477 		}
1478 
1479 		hash_types = vnic->hash_type;
1480 		rss_conf->rss_hf = 0;
1481 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1482 			rss_conf->rss_hf |= ETH_RSS_IPV4;
1483 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1484 		}
1485 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1486 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1487 			hash_types &=
1488 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1489 		}
1490 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1491 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1492 			hash_types &=
1493 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1494 		}
1495 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1496 			rss_conf->rss_hf |= ETH_RSS_IPV6;
1497 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1498 		}
1499 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1500 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1501 			hash_types &=
1502 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1503 		}
1504 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1505 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1506 			hash_types &=
1507 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1508 		}
1509 		if (hash_types) {
1510 			PMD_DRV_LOG(ERR,
1511 				"Unknwon RSS config from firmware (%08x), RSS disabled",
1512 				vnic->hash_type);
1513 			return -ENOTSUP;
1514 		}
1515 	} else {
1516 		rss_conf->rss_hf = 0;
1517 	}
1518 	return 0;
1519 }
1520 
1521 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1522 			       struct rte_eth_fc_conf *fc_conf)
1523 {
1524 	struct bnxt *bp = dev->data->dev_private;
1525 	struct rte_eth_link link_info;
1526 	int rc;
1527 
1528 	rc = is_bnxt_in_error(bp);
1529 	if (rc)
1530 		return rc;
1531 
1532 	rc = bnxt_get_hwrm_link_config(bp, &link_info);
1533 	if (rc)
1534 		return rc;
1535 
1536 	memset(fc_conf, 0, sizeof(*fc_conf));
1537 	if (bp->link_info.auto_pause)
1538 		fc_conf->autoneg = 1;
1539 	switch (bp->link_info.pause) {
1540 	case 0:
1541 		fc_conf->mode = RTE_FC_NONE;
1542 		break;
1543 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1544 		fc_conf->mode = RTE_FC_TX_PAUSE;
1545 		break;
1546 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1547 		fc_conf->mode = RTE_FC_RX_PAUSE;
1548 		break;
1549 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1550 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1551 		fc_conf->mode = RTE_FC_FULL;
1552 		break;
1553 	}
1554 	return 0;
1555 }
1556 
1557 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1558 			       struct rte_eth_fc_conf *fc_conf)
1559 {
1560 	struct bnxt *bp = dev->data->dev_private;
1561 	int rc;
1562 
1563 	rc = is_bnxt_in_error(bp);
1564 	if (rc)
1565 		return rc;
1566 
1567 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1568 		PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1569 		return -ENOTSUP;
1570 	}
1571 
1572 	switch (fc_conf->mode) {
1573 	case RTE_FC_NONE:
1574 		bp->link_info.auto_pause = 0;
1575 		bp->link_info.force_pause = 0;
1576 		break;
1577 	case RTE_FC_RX_PAUSE:
1578 		if (fc_conf->autoneg) {
1579 			bp->link_info.auto_pause =
1580 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1581 			bp->link_info.force_pause = 0;
1582 		} else {
1583 			bp->link_info.auto_pause = 0;
1584 			bp->link_info.force_pause =
1585 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1586 		}
1587 		break;
1588 	case RTE_FC_TX_PAUSE:
1589 		if (fc_conf->autoneg) {
1590 			bp->link_info.auto_pause =
1591 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1592 			bp->link_info.force_pause = 0;
1593 		} else {
1594 			bp->link_info.auto_pause = 0;
1595 			bp->link_info.force_pause =
1596 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1597 		}
1598 		break;
1599 	case RTE_FC_FULL:
1600 		if (fc_conf->autoneg) {
1601 			bp->link_info.auto_pause =
1602 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1603 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1604 			bp->link_info.force_pause = 0;
1605 		} else {
1606 			bp->link_info.auto_pause = 0;
1607 			bp->link_info.force_pause =
1608 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1609 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1610 		}
1611 		break;
1612 	}
1613 	return bnxt_set_hwrm_link_config(bp, true);
1614 }
1615 
1616 /* Add UDP tunneling port */
1617 static int
1618 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1619 			 struct rte_eth_udp_tunnel *udp_tunnel)
1620 {
1621 	struct bnxt *bp = eth_dev->data->dev_private;
1622 	uint16_t tunnel_type = 0;
1623 	int rc = 0;
1624 
1625 	rc = is_bnxt_in_error(bp);
1626 	if (rc)
1627 		return rc;
1628 
1629 	switch (udp_tunnel->prot_type) {
1630 	case RTE_TUNNEL_TYPE_VXLAN:
1631 		if (bp->vxlan_port_cnt) {
1632 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1633 				udp_tunnel->udp_port);
1634 			if (bp->vxlan_port != udp_tunnel->udp_port) {
1635 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1636 				return -ENOSPC;
1637 			}
1638 			bp->vxlan_port_cnt++;
1639 			return 0;
1640 		}
1641 		tunnel_type =
1642 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1643 		bp->vxlan_port_cnt++;
1644 		break;
1645 	case RTE_TUNNEL_TYPE_GENEVE:
1646 		if (bp->geneve_port_cnt) {
1647 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1648 				udp_tunnel->udp_port);
1649 			if (bp->geneve_port != udp_tunnel->udp_port) {
1650 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1651 				return -ENOSPC;
1652 			}
1653 			bp->geneve_port_cnt++;
1654 			return 0;
1655 		}
1656 		tunnel_type =
1657 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1658 		bp->geneve_port_cnt++;
1659 		break;
1660 	default:
1661 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1662 		return -ENOTSUP;
1663 	}
1664 	rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1665 					     tunnel_type);
1666 	return rc;
1667 }
1668 
1669 static int
1670 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1671 			 struct rte_eth_udp_tunnel *udp_tunnel)
1672 {
1673 	struct bnxt *bp = eth_dev->data->dev_private;
1674 	uint16_t tunnel_type = 0;
1675 	uint16_t port = 0;
1676 	int rc = 0;
1677 
1678 	rc = is_bnxt_in_error(bp);
1679 	if (rc)
1680 		return rc;
1681 
1682 	switch (udp_tunnel->prot_type) {
1683 	case RTE_TUNNEL_TYPE_VXLAN:
1684 		if (!bp->vxlan_port_cnt) {
1685 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1686 			return -EINVAL;
1687 		}
1688 		if (bp->vxlan_port != udp_tunnel->udp_port) {
1689 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1690 				udp_tunnel->udp_port, bp->vxlan_port);
1691 			return -EINVAL;
1692 		}
1693 		if (--bp->vxlan_port_cnt)
1694 			return 0;
1695 
1696 		tunnel_type =
1697 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1698 		port = bp->vxlan_fw_dst_port_id;
1699 		break;
1700 	case RTE_TUNNEL_TYPE_GENEVE:
1701 		if (!bp->geneve_port_cnt) {
1702 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1703 			return -EINVAL;
1704 		}
1705 		if (bp->geneve_port != udp_tunnel->udp_port) {
1706 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1707 				udp_tunnel->udp_port, bp->geneve_port);
1708 			return -EINVAL;
1709 		}
1710 		if (--bp->geneve_port_cnt)
1711 			return 0;
1712 
1713 		tunnel_type =
1714 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1715 		port = bp->geneve_fw_dst_port_id;
1716 		break;
1717 	default:
1718 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1719 		return -ENOTSUP;
1720 	}
1721 
1722 	rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1723 	if (!rc) {
1724 		if (tunnel_type ==
1725 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1726 			bp->vxlan_port = 0;
1727 		if (tunnel_type ==
1728 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1729 			bp->geneve_port = 0;
1730 	}
1731 	return rc;
1732 }
1733 
1734 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1735 {
1736 	struct bnxt_filter_info *filter;
1737 	struct bnxt_vnic_info *vnic;
1738 	int rc = 0;
1739 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
1740 
1741 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1742 	filter = STAILQ_FIRST(&vnic->filter);
1743 	while (filter) {
1744 		/* Search for this matching MAC+VLAN filter */
1745 		if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
1746 			/* Delete the filter */
1747 			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1748 			if (rc)
1749 				return rc;
1750 			STAILQ_REMOVE(&vnic->filter, filter,
1751 				      bnxt_filter_info, next);
1752 			bnxt_free_filter(bp, filter);
1753 			PMD_DRV_LOG(INFO,
1754 				    "Deleted vlan filter for %d\n",
1755 				    vlan_id);
1756 			return 0;
1757 		}
1758 		filter = STAILQ_NEXT(filter, next);
1759 	}
1760 	return -ENOENT;
1761 }
1762 
1763 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1764 {
1765 	struct bnxt_filter_info *filter;
1766 	struct bnxt_vnic_info *vnic;
1767 	int rc = 0;
1768 	uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
1769 		HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
1770 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
1771 
1772 	/* Implementation notes on the use of VNIC in this command:
1773 	 *
1774 	 * By default, these filters belong to default vnic for the function.
1775 	 * Once these filters are set up, only destination VNIC can be modified.
1776 	 * If the destination VNIC is not specified in this command,
1777 	 * then the HWRM shall only create an l2 context id.
1778 	 */
1779 
1780 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1781 	filter = STAILQ_FIRST(&vnic->filter);
1782 	/* Check if the VLAN has already been added */
1783 	while (filter) {
1784 		if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
1785 			return -EEXIST;
1786 
1787 		filter = STAILQ_NEXT(filter, next);
1788 	}
1789 
1790 	/* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
1791 	 * command to create MAC+VLAN filter with the right flags, enables set.
1792 	 */
1793 	filter = bnxt_alloc_filter(bp);
1794 	if (!filter) {
1795 		PMD_DRV_LOG(ERR,
1796 			    "MAC/VLAN filter alloc failed\n");
1797 		return -ENOMEM;
1798 	}
1799 	/* MAC + VLAN ID filter */
1800 	/* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
1801 	 * untagged packets are received
1802 	 *
1803 	 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
1804 	 * packets and only the programmed vlan's packets are received
1805 	 */
1806 	filter->l2_ivlan = vlan_id;
1807 	filter->l2_ivlan_mask = 0x0FFF;
1808 	filter->enables |= en;
1809 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1810 
1811 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1812 	if (rc) {
1813 		/* Free the newly allocated filter as we were
1814 		 * not able to create the filter in hardware.
1815 		 */
1816 		filter->fw_l2_filter_id = UINT64_MAX;
1817 		bnxt_free_filter(bp, filter);
1818 		return rc;
1819 	}
1820 
1821 	filter->mac_index = 0;
1822 	/* Add this new filter to the list */
1823 	if (vlan_id == 0)
1824 		STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1825 	else
1826 		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1827 
1828 	PMD_DRV_LOG(INFO,
1829 		    "Added Vlan filter for %d\n", vlan_id);
1830 	return rc;
1831 }
1832 
1833 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1834 		uint16_t vlan_id, int on)
1835 {
1836 	struct bnxt *bp = eth_dev->data->dev_private;
1837 	int rc;
1838 
1839 	rc = is_bnxt_in_error(bp);
1840 	if (rc)
1841 		return rc;
1842 
1843 	/* These operations apply to ALL existing MAC/VLAN filters */
1844 	if (on)
1845 		return bnxt_add_vlan_filter(bp, vlan_id);
1846 	else
1847 		return bnxt_del_vlan_filter(bp, vlan_id);
1848 }
1849 
1850 static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
1851 				    struct bnxt_vnic_info *vnic)
1852 {
1853 	struct bnxt_filter_info *filter;
1854 	int rc;
1855 
1856 	filter = STAILQ_FIRST(&vnic->filter);
1857 	while (filter) {
1858 		if (filter->mac_index == 0 &&
1859 		    !memcmp(filter->l2_addr, bp->mac_addr,
1860 			    RTE_ETHER_ADDR_LEN)) {
1861 			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1862 			if (!rc) {
1863 				STAILQ_REMOVE(&vnic->filter, filter,
1864 					      bnxt_filter_info, next);
1865 				bnxt_free_filter(bp, filter);
1866 				filter->fw_l2_filter_id = UINT64_MAX;
1867 			}
1868 			return rc;
1869 		}
1870 		filter = STAILQ_NEXT(filter, next);
1871 	}
1872 	return 0;
1873 }
1874 
1875 static int
1876 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
1877 {
1878 	struct bnxt_vnic_info *vnic;
1879 	unsigned int i;
1880 	int rc;
1881 
1882 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1883 	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
1884 		/* Remove any VLAN filters programmed */
1885 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
1886 			bnxt_del_vlan_filter(bp, i);
1887 
1888 		rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
1889 		if (rc)
1890 			return rc;
1891 	} else {
1892 		/* Default filter will allow packets that match the
1893 		 * dest mac. So, it has to be deleted, otherwise, we
1894 		 * will endup receiving vlan packets for which the
1895 		 * filter is not programmed, when hw-vlan-filter
1896 		 * configuration is ON
1897 		 */
1898 		bnxt_del_dflt_mac_filter(bp, vnic);
1899 		/* This filter will allow only untagged packets */
1900 		bnxt_add_vlan_filter(bp, 0);
1901 	}
1902 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
1903 		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
1904 
1905 	return 0;
1906 }
1907 
1908 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
1909 {
1910 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
1911 	unsigned int i;
1912 	int rc;
1913 
1914 	/* Destroy vnic filters and vnic */
1915 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
1916 	    DEV_RX_OFFLOAD_VLAN_FILTER) {
1917 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
1918 			bnxt_del_vlan_filter(bp, i);
1919 	}
1920 	bnxt_del_dflt_mac_filter(bp, vnic);
1921 
1922 	rc = bnxt_hwrm_vnic_free(bp, vnic);
1923 	if (rc)
1924 		return rc;
1925 
1926 	rte_free(vnic->fw_grp_ids);
1927 	vnic->fw_grp_ids = NULL;
1928 
1929 	return 0;
1930 }
1931 
1932 static int
1933 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
1934 {
1935 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1936 	int rc;
1937 
1938 	/* Destroy, recreate and reconfigure the default vnic */
1939 	rc = bnxt_free_one_vnic(bp, 0);
1940 	if (rc)
1941 		return rc;
1942 
1943 	/* default vnic 0 */
1944 	rc = bnxt_setup_one_vnic(bp, 0);
1945 	if (rc)
1946 		return rc;
1947 
1948 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
1949 	    DEV_RX_OFFLOAD_VLAN_FILTER) {
1950 		rc = bnxt_add_vlan_filter(bp, 0);
1951 		if (rc)
1952 			return rc;
1953 		rc = bnxt_restore_vlan_filters(bp);
1954 		if (rc)
1955 			return rc;
1956 	} else {
1957 		rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
1958 		if (rc)
1959 			return rc;
1960 	}
1961 
1962 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1963 	if (rc)
1964 		return rc;
1965 
1966 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
1967 		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
1968 
1969 	return rc;
1970 }
1971 
1972 static int
1973 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1974 {
1975 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1976 	struct bnxt *bp = dev->data->dev_private;
1977 	int rc;
1978 
1979 	rc = is_bnxt_in_error(bp);
1980 	if (rc)
1981 		return rc;
1982 
1983 	/* Filter settings will get applied when port is started */
1984 	if (!dev->data->dev_started)
1985 		return 0;
1986 
1987 	if (mask & ETH_VLAN_FILTER_MASK) {
1988 		/* Enable or disable VLAN filtering */
1989 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
1990 		if (rc)
1991 			return rc;
1992 	}
1993 
1994 	if (mask & ETH_VLAN_STRIP_MASK) {
1995 		/* Enable or disable VLAN stripping */
1996 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
1997 		if (rc)
1998 			return rc;
1999 	}
2000 
2001 	if (mask & ETH_VLAN_EXTEND_MASK) {
2002 		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2003 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
2004 		else
2005 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
2006 	}
2007 
2008 	return 0;
2009 }
2010 
2011 static int
2012 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
2013 		      uint16_t tpid)
2014 {
2015 	struct bnxt *bp = dev->data->dev_private;
2016 	int qinq = dev->data->dev_conf.rxmode.offloads &
2017 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
2018 
2019 	if (vlan_type != ETH_VLAN_TYPE_INNER &&
2020 	    vlan_type != ETH_VLAN_TYPE_OUTER) {
2021 		PMD_DRV_LOG(ERR,
2022 			    "Unsupported vlan type.");
2023 		return -EINVAL;
2024 	}
2025 	if (!qinq) {
2026 		PMD_DRV_LOG(ERR,
2027 			    "QinQ not enabled. Needs to be ON as we can "
2028 			    "accelerate only outer vlan\n");
2029 		return -EINVAL;
2030 	}
2031 
2032 	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2033 		switch (tpid) {
2034 		case RTE_ETHER_TYPE_QINQ:
2035 			bp->outer_tpid_bd =
2036 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
2037 				break;
2038 		case RTE_ETHER_TYPE_VLAN:
2039 			bp->outer_tpid_bd =
2040 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
2041 				break;
2042 		case 0x9100:
2043 			bp->outer_tpid_bd =
2044 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
2045 				break;
2046 		case 0x9200:
2047 			bp->outer_tpid_bd =
2048 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
2049 				break;
2050 		case 0x9300:
2051 			bp->outer_tpid_bd =
2052 				 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
2053 				break;
2054 		default:
2055 			PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
2056 			return -EINVAL;
2057 		}
2058 		bp->outer_tpid_bd |= tpid;
2059 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
2060 	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
2061 		PMD_DRV_LOG(ERR,
2062 			    "Can accelerate only outer vlan in QinQ\n");
2063 		return -EINVAL;
2064 	}
2065 
2066 	return 0;
2067 }
2068 
2069 static int
2070 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
2071 			     struct rte_ether_addr *addr)
2072 {
2073 	struct bnxt *bp = dev->data->dev_private;
2074 	/* Default Filter is tied to VNIC 0 */
2075 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
2076 	int rc;
2077 
2078 	rc = is_bnxt_in_error(bp);
2079 	if (rc)
2080 		return rc;
2081 
2082 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2083 		return -EPERM;
2084 
2085 	if (rte_is_zero_ether_addr(addr))
2086 		return -EINVAL;
2087 
2088 	/* Filter settings will get applied when port is started */
2089 	if (!dev->data->dev_started)
2090 		return 0;
2091 
2092 	/* Check if the requested MAC is already added */
2093 	if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
2094 		return 0;
2095 
2096 	/* Destroy filter and re-create it */
2097 	bnxt_del_dflt_mac_filter(bp, vnic);
2098 
2099 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
2100 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
2101 		/* This filter will allow only untagged packets */
2102 		rc = bnxt_add_vlan_filter(bp, 0);
2103 	} else {
2104 		rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
2105 	}
2106 
2107 	PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
2108 	return rc;
2109 }
2110 
2111 static int
2112 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
2113 			  struct rte_ether_addr *mc_addr_set,
2114 			  uint32_t nb_mc_addr)
2115 {
2116 	struct bnxt *bp = eth_dev->data->dev_private;
2117 	char *mc_addr_list = (char *)mc_addr_set;
2118 	struct bnxt_vnic_info *vnic;
2119 	uint32_t off = 0, i = 0;
2120 	int rc;
2121 
2122 	rc = is_bnxt_in_error(bp);
2123 	if (rc)
2124 		return rc;
2125 
2126 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
2127 
2128 	if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
2129 		vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
2130 		goto allmulti;
2131 	}
2132 
2133 	/* TODO Check for Duplicate mcast addresses */
2134 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
2135 	for (i = 0; i < nb_mc_addr; i++) {
2136 		memcpy(vnic->mc_list + off, &mc_addr_list[i],
2137 			RTE_ETHER_ADDR_LEN);
2138 		off += RTE_ETHER_ADDR_LEN;
2139 	}
2140 
2141 	vnic->mc_addr_cnt = i;
2142 	if (vnic->mc_addr_cnt)
2143 		vnic->flags |= BNXT_VNIC_INFO_MCAST;
2144 	else
2145 		vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
2146 
2147 allmulti:
2148 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2149 }
2150 
2151 static int
2152 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2153 {
2154 	struct bnxt *bp = dev->data->dev_private;
2155 	uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
2156 	uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
2157 	uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
2158 	int ret;
2159 
2160 	ret = snprintf(fw_version, fw_size, "%d.%d.%d",
2161 			fw_major, fw_minor, fw_updt);
2162 
2163 	ret += 1; /* add the size of '\0' */
2164 	if (fw_size < (uint32_t)ret)
2165 		return ret;
2166 	else
2167 		return 0;
2168 }
2169 
2170 static void
2171 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2172 	struct rte_eth_rxq_info *qinfo)
2173 {
2174 	struct bnxt *bp = dev->data->dev_private;
2175 	struct bnxt_rx_queue *rxq;
2176 
2177 	if (is_bnxt_in_error(bp))
2178 		return;
2179 
2180 	rxq = dev->data->rx_queues[queue_id];
2181 
2182 	qinfo->mp = rxq->mb_pool;
2183 	qinfo->scattered_rx = dev->data->scattered_rx;
2184 	qinfo->nb_desc = rxq->nb_rx_desc;
2185 
2186 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2187 	qinfo->conf.rx_drop_en = 0;
2188 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2189 }
2190 
2191 static void
2192 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2193 	struct rte_eth_txq_info *qinfo)
2194 {
2195 	struct bnxt *bp = dev->data->dev_private;
2196 	struct bnxt_tx_queue *txq;
2197 
2198 	if (is_bnxt_in_error(bp))
2199 		return;
2200 
2201 	txq = dev->data->tx_queues[queue_id];
2202 
2203 	qinfo->nb_desc = txq->nb_tx_desc;
2204 
2205 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2206 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2207 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2208 
2209 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2210 	qinfo->conf.tx_rs_thresh = 0;
2211 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2212 }
2213 
2214 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
2215 {
2216 	struct bnxt *bp = eth_dev->data->dev_private;
2217 	uint32_t new_pkt_size;
2218 	uint32_t rc = 0;
2219 	uint32_t i;
2220 
2221 	rc = is_bnxt_in_error(bp);
2222 	if (rc)
2223 		return rc;
2224 
2225 	/* Exit if receive queues are not configured yet */
2226 	if (!eth_dev->data->nb_rx_queues)
2227 		return rc;
2228 
2229 	new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2230 		       VLAN_TAG_SIZE * BNXT_NUM_VLANS;
2231 
2232 #ifdef RTE_ARCH_X86
2233 	/*
2234 	 * If vector-mode tx/rx is active, disallow any MTU change that would
2235 	 * require scattered receive support.
2236 	 */
2237 	if (eth_dev->data->dev_started &&
2238 	    (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
2239 	     eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
2240 	    (new_pkt_size >
2241 	     eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2242 		PMD_DRV_LOG(ERR,
2243 			    "MTU change would require scattered rx support. ");
2244 		PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
2245 		return -EINVAL;
2246 	}
2247 #endif
2248 
2249 	if (new_mtu > RTE_ETHER_MTU) {
2250 		bp->flags |= BNXT_FLAG_JUMBO;
2251 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
2252 			DEV_RX_OFFLOAD_JUMBO_FRAME;
2253 	} else {
2254 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
2255 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
2256 		bp->flags &= ~BNXT_FLAG_JUMBO;
2257 	}
2258 
2259 	/* Is there a change in mtu setting? */
2260 	if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
2261 		return rc;
2262 
2263 	for (i = 0; i < bp->nr_vnics; i++) {
2264 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2265 		uint16_t size = 0;
2266 
2267 		vnic->mru = BNXT_VNIC_MRU(new_mtu);
2268 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
2269 		if (rc)
2270 			break;
2271 
2272 		size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2273 		size -= RTE_PKTMBUF_HEADROOM;
2274 
2275 		if (size < new_mtu) {
2276 			rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
2277 			if (rc)
2278 				return rc;
2279 		}
2280 	}
2281 
2282 	if (!rc)
2283 		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
2284 
2285 	PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
2286 
2287 	return rc;
2288 }
2289 
2290 static int
2291 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
2292 {
2293 	struct bnxt *bp = dev->data->dev_private;
2294 	uint16_t vlan = bp->vlan;
2295 	int rc;
2296 
2297 	rc = is_bnxt_in_error(bp);
2298 	if (rc)
2299 		return rc;
2300 
2301 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2302 		PMD_DRV_LOG(ERR,
2303 			"PVID cannot be modified for this function\n");
2304 		return -ENOTSUP;
2305 	}
2306 	bp->vlan = on ? pvid : 0;
2307 
2308 	rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
2309 	if (rc)
2310 		bp->vlan = vlan;
2311 	return rc;
2312 }
2313 
2314 static int
2315 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
2316 {
2317 	struct bnxt *bp = dev->data->dev_private;
2318 	int rc;
2319 
2320 	rc = is_bnxt_in_error(bp);
2321 	if (rc)
2322 		return rc;
2323 
2324 	return bnxt_hwrm_port_led_cfg(bp, true);
2325 }
2326 
2327 static int
2328 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
2329 {
2330 	struct bnxt *bp = dev->data->dev_private;
2331 	int rc;
2332 
2333 	rc = is_bnxt_in_error(bp);
2334 	if (rc)
2335 		return rc;
2336 
2337 	return bnxt_hwrm_port_led_cfg(bp, false);
2338 }
2339 
2340 static uint32_t
2341 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2342 {
2343 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2344 	uint32_t desc = 0, raw_cons = 0, cons;
2345 	struct bnxt_cp_ring_info *cpr;
2346 	struct bnxt_rx_queue *rxq;
2347 	struct rx_pkt_cmpl *rxcmp;
2348 	int rc;
2349 
2350 	rc = is_bnxt_in_error(bp);
2351 	if (rc)
2352 		return rc;
2353 
2354 	rxq = dev->data->rx_queues[rx_queue_id];
2355 	cpr = rxq->cp_ring;
2356 	raw_cons = cpr->cp_raw_cons;
2357 
2358 	while (1) {
2359 		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2360 		rte_prefetch0(&cpr->cp_desc_ring[cons]);
2361 		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2362 
2363 		if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
2364 			break;
2365 		} else {
2366 			raw_cons++;
2367 			desc++;
2368 		}
2369 	}
2370 
2371 	return desc;
2372 }
2373 
2374 static int
2375 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2376 {
2377 	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
2378 	struct bnxt_rx_ring_info *rxr;
2379 	struct bnxt_cp_ring_info *cpr;
2380 	struct bnxt_sw_rx_bd *rx_buf;
2381 	struct rx_pkt_cmpl *rxcmp;
2382 	uint32_t cons, cp_cons;
2383 	int rc;
2384 
2385 	if (!rxq)
2386 		return -EINVAL;
2387 
2388 	rc = is_bnxt_in_error(rxq->bp);
2389 	if (rc)
2390 		return rc;
2391 
2392 	cpr = rxq->cp_ring;
2393 	rxr = rxq->rx_ring;
2394 
2395 	if (offset >= rxq->nb_rx_desc)
2396 		return -EINVAL;
2397 
2398 	cons = RING_CMP(cpr->cp_ring_struct, offset);
2399 	cp_cons = cpr->cp_raw_cons;
2400 	rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2401 
2402 	if (cons > cp_cons) {
2403 		if (CMPL_VALID(rxcmp, cpr->valid))
2404 			return RTE_ETH_RX_DESC_DONE;
2405 	} else {
2406 		if (CMPL_VALID(rxcmp, !cpr->valid))
2407 			return RTE_ETH_RX_DESC_DONE;
2408 	}
2409 	rx_buf = &rxr->rx_buf_ring[cons];
2410 	if (rx_buf->mbuf == NULL)
2411 		return RTE_ETH_RX_DESC_UNAVAIL;
2412 
2413 
2414 	return RTE_ETH_RX_DESC_AVAIL;
2415 }
2416 
2417 static int
2418 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
2419 {
2420 	struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
2421 	struct bnxt_tx_ring_info *txr;
2422 	struct bnxt_cp_ring_info *cpr;
2423 	struct bnxt_sw_tx_bd *tx_buf;
2424 	struct tx_pkt_cmpl *txcmp;
2425 	uint32_t cons, cp_cons;
2426 	int rc;
2427 
2428 	if (!txq)
2429 		return -EINVAL;
2430 
2431 	rc = is_bnxt_in_error(txq->bp);
2432 	if (rc)
2433 		return rc;
2434 
2435 	cpr = txq->cp_ring;
2436 	txr = txq->tx_ring;
2437 
2438 	if (offset >= txq->nb_tx_desc)
2439 		return -EINVAL;
2440 
2441 	cons = RING_CMP(cpr->cp_ring_struct, offset);
2442 	txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2443 	cp_cons = cpr->cp_raw_cons;
2444 
2445 	if (cons > cp_cons) {
2446 		if (CMPL_VALID(txcmp, cpr->valid))
2447 			return RTE_ETH_TX_DESC_UNAVAIL;
2448 	} else {
2449 		if (CMPL_VALID(txcmp, !cpr->valid))
2450 			return RTE_ETH_TX_DESC_UNAVAIL;
2451 	}
2452 	tx_buf = &txr->tx_buf_ring[cons];
2453 	if (tx_buf->mbuf == NULL)
2454 		return RTE_ETH_TX_DESC_DONE;
2455 
2456 	return RTE_ETH_TX_DESC_FULL;
2457 }
2458 
2459 static struct bnxt_filter_info *
2460 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
2461 				struct rte_eth_ethertype_filter *efilter,
2462 				struct bnxt_vnic_info *vnic0,
2463 				struct bnxt_vnic_info *vnic,
2464 				int *ret)
2465 {
2466 	struct bnxt_filter_info *mfilter = NULL;
2467 	int match = 0;
2468 	*ret = 0;
2469 
2470 	if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2471 		efilter->ether_type == RTE_ETHER_TYPE_IPV6) {
2472 		PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
2473 			" ethertype filter.", efilter->ether_type);
2474 		*ret = -EINVAL;
2475 		goto exit;
2476 	}
2477 	if (efilter->queue >= bp->rx_nr_rings) {
2478 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2479 		*ret = -EINVAL;
2480 		goto exit;
2481 	}
2482 
2483 	vnic0 = &bp->vnic_info[0];
2484 	vnic = &bp->vnic_info[efilter->queue];
2485 	if (vnic == NULL) {
2486 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2487 		*ret = -EINVAL;
2488 		goto exit;
2489 	}
2490 
2491 	if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2492 		STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
2493 			if ((!memcmp(efilter->mac_addr.addr_bytes,
2494 				     mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2495 			     mfilter->flags ==
2496 			     HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
2497 			     mfilter->ethertype == efilter->ether_type)) {
2498 				match = 1;
2499 				break;
2500 			}
2501 		}
2502 	} else {
2503 		STAILQ_FOREACH(mfilter, &vnic->filter, next)
2504 			if ((!memcmp(efilter->mac_addr.addr_bytes,
2505 				     mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2506 			     mfilter->ethertype == efilter->ether_type &&
2507 			     mfilter->flags ==
2508 			     HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
2509 				match = 1;
2510 				break;
2511 			}
2512 	}
2513 
2514 	if (match)
2515 		*ret = -EEXIST;
2516 
2517 exit:
2518 	return mfilter;
2519 }
2520 
2521 static int
2522 bnxt_ethertype_filter(struct rte_eth_dev *dev,
2523 			enum rte_filter_op filter_op,
2524 			void *arg)
2525 {
2526 	struct bnxt *bp = dev->data->dev_private;
2527 	struct rte_eth_ethertype_filter *efilter =
2528 			(struct rte_eth_ethertype_filter *)arg;
2529 	struct bnxt_filter_info *bfilter, *filter1;
2530 	struct bnxt_vnic_info *vnic, *vnic0;
2531 	int ret;
2532 
2533 	if (filter_op == RTE_ETH_FILTER_NOP)
2534 		return 0;
2535 
2536 	if (arg == NULL) {
2537 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2538 			    filter_op);
2539 		return -EINVAL;
2540 	}
2541 
2542 	vnic0 = &bp->vnic_info[0];
2543 	vnic = &bp->vnic_info[efilter->queue];
2544 
2545 	switch (filter_op) {
2546 	case RTE_ETH_FILTER_ADD:
2547 		bnxt_match_and_validate_ether_filter(bp, efilter,
2548 							vnic0, vnic, &ret);
2549 		if (ret < 0)
2550 			return ret;
2551 
2552 		bfilter = bnxt_get_unused_filter(bp);
2553 		if (bfilter == NULL) {
2554 			PMD_DRV_LOG(ERR,
2555 				"Not enough resources for a new filter.\n");
2556 			return -ENOMEM;
2557 		}
2558 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2559 		memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
2560 		       RTE_ETHER_ADDR_LEN);
2561 		memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
2562 		       RTE_ETHER_ADDR_LEN);
2563 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2564 		bfilter->ethertype = efilter->ether_type;
2565 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2566 
2567 		filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
2568 		if (filter1 == NULL) {
2569 			ret = -EINVAL;
2570 			goto cleanup;
2571 		}
2572 		bfilter->enables |=
2573 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2574 		bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2575 
2576 		bfilter->dst_id = vnic->fw_vnic_id;
2577 
2578 		if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2579 			bfilter->flags =
2580 				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2581 		}
2582 
2583 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2584 		if (ret)
2585 			goto cleanup;
2586 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2587 		break;
2588 	case RTE_ETH_FILTER_DELETE:
2589 		filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
2590 							vnic0, vnic, &ret);
2591 		if (ret == -EEXIST) {
2592 			ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
2593 
2594 			STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
2595 				      next);
2596 			bnxt_free_filter(bp, filter1);
2597 		} else if (ret == 0) {
2598 			PMD_DRV_LOG(ERR, "No matching filter found\n");
2599 		}
2600 		break;
2601 	default:
2602 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2603 		ret = -EINVAL;
2604 		goto error;
2605 	}
2606 	return ret;
2607 cleanup:
2608 	bnxt_free_filter(bp, bfilter);
2609 error:
2610 	return ret;
2611 }
2612 
2613 static inline int
2614 parse_ntuple_filter(struct bnxt *bp,
2615 		    struct rte_eth_ntuple_filter *nfilter,
2616 		    struct bnxt_filter_info *bfilter)
2617 {
2618 	uint32_t en = 0;
2619 
2620 	if (nfilter->queue >= bp->rx_nr_rings) {
2621 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
2622 		return -EINVAL;
2623 	}
2624 
2625 	switch (nfilter->dst_port_mask) {
2626 	case UINT16_MAX:
2627 		bfilter->dst_port_mask = -1;
2628 		bfilter->dst_port = nfilter->dst_port;
2629 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
2630 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2631 		break;
2632 	default:
2633 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2634 		return -EINVAL;
2635 	}
2636 
2637 	bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2638 	en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2639 
2640 	switch (nfilter->proto_mask) {
2641 	case UINT8_MAX:
2642 		if (nfilter->proto == 17) /* IPPROTO_UDP */
2643 			bfilter->ip_protocol = 17;
2644 		else if (nfilter->proto == 6) /* IPPROTO_TCP */
2645 			bfilter->ip_protocol = 6;
2646 		else
2647 			return -EINVAL;
2648 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2649 		break;
2650 	default:
2651 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
2652 		return -EINVAL;
2653 	}
2654 
2655 	switch (nfilter->dst_ip_mask) {
2656 	case UINT32_MAX:
2657 		bfilter->dst_ipaddr_mask[0] = -1;
2658 		bfilter->dst_ipaddr[0] = nfilter->dst_ip;
2659 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
2660 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2661 		break;
2662 	default:
2663 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
2664 		return -EINVAL;
2665 	}
2666 
2667 	switch (nfilter->src_ip_mask) {
2668 	case UINT32_MAX:
2669 		bfilter->src_ipaddr_mask[0] = -1;
2670 		bfilter->src_ipaddr[0] = nfilter->src_ip;
2671 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
2672 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2673 		break;
2674 	default:
2675 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2676 		return -EINVAL;
2677 	}
2678 
2679 	switch (nfilter->src_port_mask) {
2680 	case UINT16_MAX:
2681 		bfilter->src_port_mask = -1;
2682 		bfilter->src_port = nfilter->src_port;
2683 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
2684 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2685 		break;
2686 	default:
2687 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
2688 		return -EINVAL;
2689 	}
2690 
2691 	bfilter->enables = en;
2692 	return 0;
2693 }
2694 
2695 static struct bnxt_filter_info*
2696 bnxt_match_ntuple_filter(struct bnxt *bp,
2697 			 struct bnxt_filter_info *bfilter,
2698 			 struct bnxt_vnic_info **mvnic)
2699 {
2700 	struct bnxt_filter_info *mfilter = NULL;
2701 	int i;
2702 
2703 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
2704 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2705 		STAILQ_FOREACH(mfilter, &vnic->filter, next) {
2706 			if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
2707 			    bfilter->src_ipaddr_mask[0] ==
2708 			    mfilter->src_ipaddr_mask[0] &&
2709 			    bfilter->src_port == mfilter->src_port &&
2710 			    bfilter->src_port_mask == mfilter->src_port_mask &&
2711 			    bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
2712 			    bfilter->dst_ipaddr_mask[0] ==
2713 			    mfilter->dst_ipaddr_mask[0] &&
2714 			    bfilter->dst_port == mfilter->dst_port &&
2715 			    bfilter->dst_port_mask == mfilter->dst_port_mask &&
2716 			    bfilter->flags == mfilter->flags &&
2717 			    bfilter->enables == mfilter->enables) {
2718 				if (mvnic)
2719 					*mvnic = vnic;
2720 				return mfilter;
2721 			}
2722 		}
2723 	}
2724 	return NULL;
2725 }
2726 
2727 static int
2728 bnxt_cfg_ntuple_filter(struct bnxt *bp,
2729 		       struct rte_eth_ntuple_filter *nfilter,
2730 		       enum rte_filter_op filter_op)
2731 {
2732 	struct bnxt_filter_info *bfilter, *mfilter, *filter1;
2733 	struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
2734 	int ret;
2735 
2736 	if (nfilter->flags != RTE_5TUPLE_FLAGS) {
2737 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
2738 		return -EINVAL;
2739 	}
2740 
2741 	if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
2742 		PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
2743 		return -EINVAL;
2744 	}
2745 
2746 	bfilter = bnxt_get_unused_filter(bp);
2747 	if (bfilter == NULL) {
2748 		PMD_DRV_LOG(ERR,
2749 			"Not enough resources for a new filter.\n");
2750 		return -ENOMEM;
2751 	}
2752 	ret = parse_ntuple_filter(bp, nfilter, bfilter);
2753 	if (ret < 0)
2754 		goto free_filter;
2755 
2756 	vnic = &bp->vnic_info[nfilter->queue];
2757 	vnic0 = &bp->vnic_info[0];
2758 	filter1 = STAILQ_FIRST(&vnic0->filter);
2759 	if (filter1 == NULL) {
2760 		ret = -EINVAL;
2761 		goto free_filter;
2762 	}
2763 
2764 	bfilter->dst_id = vnic->fw_vnic_id;
2765 	bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2766 	bfilter->enables |=
2767 		HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2768 	bfilter->ethertype = 0x800;
2769 	bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2770 
2771 	mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
2772 
2773 	if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2774 	    bfilter->dst_id == mfilter->dst_id) {
2775 		PMD_DRV_LOG(ERR, "filter exists.\n");
2776 		ret = -EEXIST;
2777 		goto free_filter;
2778 	} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2779 		   bfilter->dst_id != mfilter->dst_id) {
2780 		mfilter->dst_id = vnic->fw_vnic_id;
2781 		ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
2782 		STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
2783 		STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
2784 		PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
2785 		PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
2786 		goto free_filter;
2787 	}
2788 	if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2789 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
2790 		ret = -ENOENT;
2791 		goto free_filter;
2792 	}
2793 
2794 	if (filter_op == RTE_ETH_FILTER_ADD) {
2795 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2796 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2797 		if (ret)
2798 			goto free_filter;
2799 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2800 	} else {
2801 		if (mfilter == NULL) {
2802 			/* This should not happen. But for Coverity! */
2803 			ret = -ENOENT;
2804 			goto free_filter;
2805 		}
2806 		ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
2807 
2808 		STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
2809 		bnxt_free_filter(bp, mfilter);
2810 		mfilter->fw_l2_filter_id = -1;
2811 		bnxt_free_filter(bp, bfilter);
2812 		bfilter->fw_l2_filter_id = -1;
2813 	}
2814 
2815 	return 0;
2816 free_filter:
2817 	bfilter->fw_l2_filter_id = -1;
2818 	bnxt_free_filter(bp, bfilter);
2819 	return ret;
2820 }
2821 
2822 static int
2823 bnxt_ntuple_filter(struct rte_eth_dev *dev,
2824 			enum rte_filter_op filter_op,
2825 			void *arg)
2826 {
2827 	struct bnxt *bp = dev->data->dev_private;
2828 	int ret;
2829 
2830 	if (filter_op == RTE_ETH_FILTER_NOP)
2831 		return 0;
2832 
2833 	if (arg == NULL) {
2834 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2835 			    filter_op);
2836 		return -EINVAL;
2837 	}
2838 
2839 	switch (filter_op) {
2840 	case RTE_ETH_FILTER_ADD:
2841 		ret = bnxt_cfg_ntuple_filter(bp,
2842 			(struct rte_eth_ntuple_filter *)arg,
2843 			filter_op);
2844 		break;
2845 	case RTE_ETH_FILTER_DELETE:
2846 		ret = bnxt_cfg_ntuple_filter(bp,
2847 			(struct rte_eth_ntuple_filter *)arg,
2848 			filter_op);
2849 		break;
2850 	default:
2851 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2852 		ret = -EINVAL;
2853 		break;
2854 	}
2855 	return ret;
2856 }
2857 
2858 static int
2859 bnxt_parse_fdir_filter(struct bnxt *bp,
2860 		       struct rte_eth_fdir_filter *fdir,
2861 		       struct bnxt_filter_info *filter)
2862 {
2863 	enum rte_fdir_mode fdir_mode =
2864 		bp->eth_dev->data->dev_conf.fdir_conf.mode;
2865 	struct bnxt_vnic_info *vnic0, *vnic;
2866 	struct bnxt_filter_info *filter1;
2867 	uint32_t en = 0;
2868 	int i;
2869 
2870 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2871 		return -EINVAL;
2872 
2873 	filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
2874 	en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
2875 
2876 	switch (fdir->input.flow_type) {
2877 	case RTE_ETH_FLOW_IPV4:
2878 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2879 		/* FALLTHROUGH */
2880 		filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
2881 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2882 		filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
2883 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2884 		filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
2885 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2886 		filter->ip_addr_type =
2887 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2888 		filter->src_ipaddr_mask[0] = 0xffffffff;
2889 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2890 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2891 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2892 		filter->ethertype = 0x800;
2893 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2894 		break;
2895 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2896 		filter->src_port = fdir->input.flow.tcp4_flow.src_port;
2897 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2898 		filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
2899 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2900 		filter->dst_port_mask = 0xffff;
2901 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2902 		filter->src_port_mask = 0xffff;
2903 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2904 		filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
2905 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2906 		filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
2907 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2908 		filter->ip_protocol = 6;
2909 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2910 		filter->ip_addr_type =
2911 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2912 		filter->src_ipaddr_mask[0] = 0xffffffff;
2913 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2914 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2915 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2916 		filter->ethertype = 0x800;
2917 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2918 		break;
2919 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2920 		filter->src_port = fdir->input.flow.udp4_flow.src_port;
2921 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2922 		filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
2923 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2924 		filter->dst_port_mask = 0xffff;
2925 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2926 		filter->src_port_mask = 0xffff;
2927 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2928 		filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
2929 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2930 		filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
2931 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2932 		filter->ip_protocol = 17;
2933 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2934 		filter->ip_addr_type =
2935 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2936 		filter->src_ipaddr_mask[0] = 0xffffffff;
2937 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2938 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2939 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2940 		filter->ethertype = 0x800;
2941 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2942 		break;
2943 	case RTE_ETH_FLOW_IPV6:
2944 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2945 		/* FALLTHROUGH */
2946 		filter->ip_addr_type =
2947 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2948 		filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
2949 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2950 		rte_memcpy(filter->src_ipaddr,
2951 			   fdir->input.flow.ipv6_flow.src_ip, 16);
2952 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2953 		rte_memcpy(filter->dst_ipaddr,
2954 			   fdir->input.flow.ipv6_flow.dst_ip, 16);
2955 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2956 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2957 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2958 		memset(filter->src_ipaddr_mask, 0xff, 16);
2959 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2960 		filter->ethertype = 0x86dd;
2961 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2962 		break;
2963 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2964 		filter->src_port = fdir->input.flow.tcp6_flow.src_port;
2965 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2966 		filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
2967 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2968 		filter->dst_port_mask = 0xffff;
2969 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2970 		filter->src_port_mask = 0xffff;
2971 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2972 		filter->ip_addr_type =
2973 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2974 		filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
2975 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2976 		rte_memcpy(filter->src_ipaddr,
2977 			   fdir->input.flow.tcp6_flow.ip.src_ip, 16);
2978 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2979 		rte_memcpy(filter->dst_ipaddr,
2980 			   fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
2981 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2982 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2983 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2984 		memset(filter->src_ipaddr_mask, 0xff, 16);
2985 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2986 		filter->ethertype = 0x86dd;
2987 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2988 		break;
2989 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2990 		filter->src_port = fdir->input.flow.udp6_flow.src_port;
2991 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2992 		filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
2993 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2994 		filter->dst_port_mask = 0xffff;
2995 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2996 		filter->src_port_mask = 0xffff;
2997 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2998 		filter->ip_addr_type =
2999 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3000 		filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
3001 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3002 		rte_memcpy(filter->src_ipaddr,
3003 			   fdir->input.flow.udp6_flow.ip.src_ip, 16);
3004 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3005 		rte_memcpy(filter->dst_ipaddr,
3006 			   fdir->input.flow.udp6_flow.ip.dst_ip, 16);
3007 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3008 		memset(filter->dst_ipaddr_mask, 0xff, 16);
3009 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3010 		memset(filter->src_ipaddr_mask, 0xff, 16);
3011 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3012 		filter->ethertype = 0x86dd;
3013 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3014 		break;
3015 	case RTE_ETH_FLOW_L2_PAYLOAD:
3016 		filter->ethertype = fdir->input.flow.l2_flow.ether_type;
3017 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3018 		break;
3019 	case RTE_ETH_FLOW_VXLAN:
3020 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3021 			return -EINVAL;
3022 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3023 		filter->tunnel_type =
3024 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
3025 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3026 		break;
3027 	case RTE_ETH_FLOW_NVGRE:
3028 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3029 			return -EINVAL;
3030 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3031 		filter->tunnel_type =
3032 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
3033 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3034 		break;
3035 	case RTE_ETH_FLOW_UNKNOWN:
3036 	case RTE_ETH_FLOW_RAW:
3037 	case RTE_ETH_FLOW_FRAG_IPV4:
3038 	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
3039 	case RTE_ETH_FLOW_FRAG_IPV6:
3040 	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
3041 	case RTE_ETH_FLOW_IPV6_EX:
3042 	case RTE_ETH_FLOW_IPV6_TCP_EX:
3043 	case RTE_ETH_FLOW_IPV6_UDP_EX:
3044 	case RTE_ETH_FLOW_GENEVE:
3045 		/* FALLTHROUGH */
3046 	default:
3047 		return -EINVAL;
3048 	}
3049 
3050 	vnic0 = &bp->vnic_info[0];
3051 	vnic = &bp->vnic_info[fdir->action.rx_queue];
3052 	if (vnic == NULL) {
3053 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
3054 		return -EINVAL;
3055 	}
3056 
3057 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3058 		rte_memcpy(filter->dst_macaddr,
3059 			fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
3060 			en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
3061 	}
3062 
3063 	if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
3064 		filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
3065 		filter1 = STAILQ_FIRST(&vnic0->filter);
3066 		//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
3067 	} else {
3068 		filter->dst_id = vnic->fw_vnic_id;
3069 		for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
3070 			if (filter->dst_macaddr[i] == 0x00)
3071 				filter1 = STAILQ_FIRST(&vnic0->filter);
3072 			else
3073 				filter1 = bnxt_get_l2_filter(bp, filter, vnic);
3074 	}
3075 
3076 	if (filter1 == NULL)
3077 		return -EINVAL;
3078 
3079 	en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3080 	filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3081 
3082 	filter->enables = en;
3083 
3084 	return 0;
3085 }
3086 
3087 static struct bnxt_filter_info *
3088 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
3089 		struct bnxt_vnic_info **mvnic)
3090 {
3091 	struct bnxt_filter_info *mf = NULL;
3092 	int i;
3093 
3094 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
3095 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3096 
3097 		STAILQ_FOREACH(mf, &vnic->filter, next) {
3098 			if (mf->filter_type == nf->filter_type &&
3099 			    mf->flags == nf->flags &&
3100 			    mf->src_port == nf->src_port &&
3101 			    mf->src_port_mask == nf->src_port_mask &&
3102 			    mf->dst_port == nf->dst_port &&
3103 			    mf->dst_port_mask == nf->dst_port_mask &&
3104 			    mf->ip_protocol == nf->ip_protocol &&
3105 			    mf->ip_addr_type == nf->ip_addr_type &&
3106 			    mf->ethertype == nf->ethertype &&
3107 			    mf->vni == nf->vni &&
3108 			    mf->tunnel_type == nf->tunnel_type &&
3109 			    mf->l2_ovlan == nf->l2_ovlan &&
3110 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
3111 			    mf->l2_ivlan == nf->l2_ivlan &&
3112 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
3113 			    !memcmp(mf->l2_addr, nf->l2_addr,
3114 				    RTE_ETHER_ADDR_LEN) &&
3115 			    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
3116 				    RTE_ETHER_ADDR_LEN) &&
3117 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
3118 				    RTE_ETHER_ADDR_LEN) &&
3119 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
3120 				    RTE_ETHER_ADDR_LEN) &&
3121 			    !memcmp(mf->src_ipaddr, nf->src_ipaddr,
3122 				    sizeof(nf->src_ipaddr)) &&
3123 			    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
3124 				    sizeof(nf->src_ipaddr_mask)) &&
3125 			    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
3126 				    sizeof(nf->dst_ipaddr)) &&
3127 			    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
3128 				    sizeof(nf->dst_ipaddr_mask))) {
3129 				if (mvnic)
3130 					*mvnic = vnic;
3131 				return mf;
3132 			}
3133 		}
3134 	}
3135 	return NULL;
3136 }
3137 
3138 static int
3139 bnxt_fdir_filter(struct rte_eth_dev *dev,
3140 		 enum rte_filter_op filter_op,
3141 		 void *arg)
3142 {
3143 	struct bnxt *bp = dev->data->dev_private;
3144 	struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
3145 	struct bnxt_filter_info *filter, *match;
3146 	struct bnxt_vnic_info *vnic, *mvnic;
3147 	int ret = 0, i;
3148 
3149 	if (filter_op == RTE_ETH_FILTER_NOP)
3150 		return 0;
3151 
3152 	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
3153 		return -EINVAL;
3154 
3155 	switch (filter_op) {
3156 	case RTE_ETH_FILTER_ADD:
3157 	case RTE_ETH_FILTER_DELETE:
3158 		/* FALLTHROUGH */
3159 		filter = bnxt_get_unused_filter(bp);
3160 		if (filter == NULL) {
3161 			PMD_DRV_LOG(ERR,
3162 				"Not enough resources for a new flow.\n");
3163 			return -ENOMEM;
3164 		}
3165 
3166 		ret = bnxt_parse_fdir_filter(bp, fdir, filter);
3167 		if (ret != 0)
3168 			goto free_filter;
3169 		filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3170 
3171 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3172 			vnic = &bp->vnic_info[0];
3173 		else
3174 			vnic = &bp->vnic_info[fdir->action.rx_queue];
3175 
3176 		match = bnxt_match_fdir(bp, filter, &mvnic);
3177 		if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
3178 			if (match->dst_id == vnic->fw_vnic_id) {
3179 				PMD_DRV_LOG(ERR, "Flow already exists.\n");
3180 				ret = -EEXIST;
3181 				goto free_filter;
3182 			} else {
3183 				match->dst_id = vnic->fw_vnic_id;
3184 				ret = bnxt_hwrm_set_ntuple_filter(bp,
3185 								  match->dst_id,
3186 								  match);
3187 				STAILQ_REMOVE(&mvnic->filter, match,
3188 					      bnxt_filter_info, next);
3189 				STAILQ_INSERT_TAIL(&vnic->filter, match, next);
3190 				PMD_DRV_LOG(ERR,
3191 					"Filter with matching pattern exist\n");
3192 				PMD_DRV_LOG(ERR,
3193 					"Updated it to new destination q\n");
3194 				goto free_filter;
3195 			}
3196 		}
3197 		if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3198 			PMD_DRV_LOG(ERR, "Flow does not exist.\n");
3199 			ret = -ENOENT;
3200 			goto free_filter;
3201 		}
3202 
3203 		if (filter_op == RTE_ETH_FILTER_ADD) {
3204 			ret = bnxt_hwrm_set_ntuple_filter(bp,
3205 							  filter->dst_id,
3206 							  filter);
3207 			if (ret)
3208 				goto free_filter;
3209 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
3210 		} else {
3211 			ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
3212 			STAILQ_REMOVE(&vnic->filter, match,
3213 				      bnxt_filter_info, next);
3214 			bnxt_free_filter(bp, match);
3215 			filter->fw_l2_filter_id = -1;
3216 			bnxt_free_filter(bp, filter);
3217 		}
3218 		break;
3219 	case RTE_ETH_FILTER_FLUSH:
3220 		for (i = bp->nr_vnics - 1; i >= 0; i--) {
3221 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3222 
3223 			STAILQ_FOREACH(filter, &vnic->filter, next) {
3224 				if (filter->filter_type ==
3225 				    HWRM_CFA_NTUPLE_FILTER) {
3226 					ret =
3227 					bnxt_hwrm_clear_ntuple_filter(bp,
3228 								      filter);
3229 					STAILQ_REMOVE(&vnic->filter, filter,
3230 						      bnxt_filter_info, next);
3231 				}
3232 			}
3233 		}
3234 		return ret;
3235 	case RTE_ETH_FILTER_UPDATE:
3236 	case RTE_ETH_FILTER_STATS:
3237 	case RTE_ETH_FILTER_INFO:
3238 		PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
3239 		break;
3240 	default:
3241 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3242 		ret = -EINVAL;
3243 		break;
3244 	}
3245 	return ret;
3246 
3247 free_filter:
3248 	filter->fw_l2_filter_id = -1;
3249 	bnxt_free_filter(bp, filter);
3250 	return ret;
3251 }
3252 
3253 static int
3254 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
3255 		    enum rte_filter_type filter_type,
3256 		    enum rte_filter_op filter_op, void *arg)
3257 {
3258 	int ret = 0;
3259 
3260 	ret = is_bnxt_in_error(dev->data->dev_private);
3261 	if (ret)
3262 		return ret;
3263 
3264 	switch (filter_type) {
3265 	case RTE_ETH_FILTER_TUNNEL:
3266 		PMD_DRV_LOG(ERR,
3267 			"filter type: %d: To be implemented\n", filter_type);
3268 		break;
3269 	case RTE_ETH_FILTER_FDIR:
3270 		ret = bnxt_fdir_filter(dev, filter_op, arg);
3271 		break;
3272 	case RTE_ETH_FILTER_NTUPLE:
3273 		ret = bnxt_ntuple_filter(dev, filter_op, arg);
3274 		break;
3275 	case RTE_ETH_FILTER_ETHERTYPE:
3276 		ret = bnxt_ethertype_filter(dev, filter_op, arg);
3277 		break;
3278 	case RTE_ETH_FILTER_GENERIC:
3279 		if (filter_op != RTE_ETH_FILTER_GET)
3280 			return -EINVAL;
3281 		*(const void **)arg = &bnxt_flow_ops;
3282 		break;
3283 	default:
3284 		PMD_DRV_LOG(ERR,
3285 			"Filter type (%d) not supported", filter_type);
3286 		ret = -EINVAL;
3287 		break;
3288 	}
3289 	return ret;
3290 }
3291 
3292 static const uint32_t *
3293 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
3294 {
3295 	static const uint32_t ptypes[] = {
3296 		RTE_PTYPE_L2_ETHER_VLAN,
3297 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3298 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3299 		RTE_PTYPE_L4_ICMP,
3300 		RTE_PTYPE_L4_TCP,
3301 		RTE_PTYPE_L4_UDP,
3302 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
3303 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
3304 		RTE_PTYPE_INNER_L4_ICMP,
3305 		RTE_PTYPE_INNER_L4_TCP,
3306 		RTE_PTYPE_INNER_L4_UDP,
3307 		RTE_PTYPE_UNKNOWN
3308 	};
3309 
3310 	if (!dev->rx_pkt_burst)
3311 		return NULL;
3312 
3313 	return ptypes;
3314 }
3315 
3316 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3317 			 int reg_win)
3318 {
3319 	uint32_t reg_base = *reg_arr & 0xfffff000;
3320 	uint32_t win_off;
3321 	int i;
3322 
3323 	for (i = 0; i < count; i++) {
3324 		if ((reg_arr[i] & 0xfffff000) != reg_base)
3325 			return -ERANGE;
3326 	}
3327 	win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
3328 	rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3329 	return 0;
3330 }
3331 
3332 static int bnxt_map_ptp_regs(struct bnxt *bp)
3333 {
3334 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3335 	uint32_t *reg_arr;
3336 	int rc, i;
3337 
3338 	reg_arr = ptp->rx_regs;
3339 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3340 	if (rc)
3341 		return rc;
3342 
3343 	reg_arr = ptp->tx_regs;
3344 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3345 	if (rc)
3346 		return rc;
3347 
3348 	for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3349 		ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3350 
3351 	for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3352 		ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3353 
3354 	return 0;
3355 }
3356 
3357 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3358 {
3359 	rte_write32(0, (uint8_t *)bp->bar0 +
3360 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
3361 	rte_write32(0, (uint8_t *)bp->bar0 +
3362 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3363 }
3364 
3365 static uint64_t bnxt_cc_read(struct bnxt *bp)
3366 {
3367 	uint64_t ns;
3368 
3369 	ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3370 			      BNXT_GRCPF_REG_SYNC_TIME));
3371 	ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3372 					  BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3373 	return ns;
3374 }
3375 
3376 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3377 {
3378 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3379 	uint32_t fifo;
3380 
3381 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3382 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3383 	if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3384 		return -EAGAIN;
3385 
3386 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3387 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3388 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3389 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3390 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3391 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3392 
3393 	return 0;
3394 }
3395 
3396 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3397 {
3398 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3399 	struct bnxt_pf_info *pf = &bp->pf;
3400 	uint16_t port_id;
3401 	uint32_t fifo;
3402 
3403 	if (!ptp)
3404 		return -ENODEV;
3405 
3406 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3407 				ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3408 	if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3409 		return -EAGAIN;
3410 
3411 	port_id = pf->port_id;
3412 	rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
3413 	       ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3414 
3415 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3416 				   ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3417 	if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3418 /*		bnxt_clr_rx_ts(bp);	  TBD  */
3419 		return -EBUSY;
3420 	}
3421 
3422 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3423 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3424 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3425 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3426 
3427 	return 0;
3428 }
3429 
3430 static int
3431 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3432 {
3433 	uint64_t ns;
3434 	struct bnxt *bp = dev->data->dev_private;
3435 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3436 
3437 	if (!ptp)
3438 		return 0;
3439 
3440 	ns = rte_timespec_to_ns(ts);
3441 	/* Set the timecounters to a new value. */
3442 	ptp->tc.nsec = ns;
3443 
3444 	return 0;
3445 }
3446 
3447 static int
3448 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3449 {
3450 	struct bnxt *bp = dev->data->dev_private;
3451 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3452 	uint64_t ns, systime_cycles = 0;
3453 	int rc = 0;
3454 
3455 	if (!ptp)
3456 		return 0;
3457 
3458 	if (BNXT_CHIP_THOR(bp))
3459 		rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
3460 					     &systime_cycles);
3461 	else
3462 		systime_cycles = bnxt_cc_read(bp);
3463 
3464 	ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3465 	*ts = rte_ns_to_timespec(ns);
3466 
3467 	return rc;
3468 }
3469 static int
3470 bnxt_timesync_enable(struct rte_eth_dev *dev)
3471 {
3472 	struct bnxt *bp = dev->data->dev_private;
3473 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3474 	uint32_t shift = 0;
3475 	int rc;
3476 
3477 	if (!ptp)
3478 		return 0;
3479 
3480 	ptp->rx_filter = 1;
3481 	ptp->tx_tstamp_en = 1;
3482 	ptp->rxctl = BNXT_PTP_MSG_EVENTS;
3483 
3484 	rc = bnxt_hwrm_ptp_cfg(bp);
3485 	if (rc)
3486 		return rc;
3487 
3488 	memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
3489 	memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3490 	memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3491 
3492 	ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3493 	ptp->tc.cc_shift = shift;
3494 	ptp->tc.nsec_mask = (1ULL << shift) - 1;
3495 
3496 	ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3497 	ptp->rx_tstamp_tc.cc_shift = shift;
3498 	ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3499 
3500 	ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3501 	ptp->tx_tstamp_tc.cc_shift = shift;
3502 	ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3503 
3504 	if (!BNXT_CHIP_THOR(bp))
3505 		bnxt_map_ptp_regs(bp);
3506 
3507 	return 0;
3508 }
3509 
3510 static int
3511 bnxt_timesync_disable(struct rte_eth_dev *dev)
3512 {
3513 	struct bnxt *bp = dev->data->dev_private;
3514 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3515 
3516 	if (!ptp)
3517 		return 0;
3518 
3519 	ptp->rx_filter = 0;
3520 	ptp->tx_tstamp_en = 0;
3521 	ptp->rxctl = 0;
3522 
3523 	bnxt_hwrm_ptp_cfg(bp);
3524 
3525 	if (!BNXT_CHIP_THOR(bp))
3526 		bnxt_unmap_ptp_regs(bp);
3527 
3528 	return 0;
3529 }
3530 
3531 static int
3532 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3533 				 struct timespec *timestamp,
3534 				 uint32_t flags __rte_unused)
3535 {
3536 	struct bnxt *bp = dev->data->dev_private;
3537 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3538 	uint64_t rx_tstamp_cycles = 0;
3539 	uint64_t ns;
3540 
3541 	if (!ptp)
3542 		return 0;
3543 
3544 	if (BNXT_CHIP_THOR(bp))
3545 		rx_tstamp_cycles = ptp->rx_timestamp;
3546 	else
3547 		bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
3548 
3549 	ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
3550 	*timestamp = rte_ns_to_timespec(ns);
3551 	return  0;
3552 }
3553 
3554 static int
3555 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3556 				 struct timespec *timestamp)
3557 {
3558 	struct bnxt *bp = dev->data->dev_private;
3559 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3560 	uint64_t tx_tstamp_cycles = 0;
3561 	uint64_t ns;
3562 	int rc = 0;
3563 
3564 	if (!ptp)
3565 		return 0;
3566 
3567 	if (BNXT_CHIP_THOR(bp))
3568 		rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
3569 					     &tx_tstamp_cycles);
3570 	else
3571 		rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
3572 
3573 	ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
3574 	*timestamp = rte_ns_to_timespec(ns);
3575 
3576 	return rc;
3577 }
3578 
3579 static int
3580 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3581 {
3582 	struct bnxt *bp = dev->data->dev_private;
3583 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3584 
3585 	if (!ptp)
3586 		return 0;
3587 
3588 	ptp->tc.nsec += delta;
3589 
3590 	return 0;
3591 }
3592 
3593 static int
3594 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
3595 {
3596 	struct bnxt *bp = dev->data->dev_private;
3597 	int rc;
3598 	uint32_t dir_entries;
3599 	uint32_t entry_length;
3600 
3601 	rc = is_bnxt_in_error(bp);
3602 	if (rc)
3603 		return rc;
3604 
3605 	PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
3606 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
3607 		    bp->pdev->addr.devid, bp->pdev->addr.function);
3608 
3609 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3610 	if (rc != 0)
3611 		return rc;
3612 
3613 	return dir_entries * entry_length;
3614 }
3615 
3616 static int
3617 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
3618 		struct rte_dev_eeprom_info *in_eeprom)
3619 {
3620 	struct bnxt *bp = dev->data->dev_private;
3621 	uint32_t index;
3622 	uint32_t offset;
3623 	int rc;
3624 
3625 	rc = is_bnxt_in_error(bp);
3626 	if (rc)
3627 		return rc;
3628 
3629 	PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
3630 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
3631 		    bp->pdev->addr.devid, bp->pdev->addr.function,
3632 		    in_eeprom->offset, in_eeprom->length);
3633 
3634 	if (in_eeprom->offset == 0) /* special offset value to get directory */
3635 		return bnxt_get_nvram_directory(bp, in_eeprom->length,
3636 						in_eeprom->data);
3637 
3638 	index = in_eeprom->offset >> 24;
3639 	offset = in_eeprom->offset & 0xffffff;
3640 
3641 	if (index != 0)
3642 		return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
3643 					   in_eeprom->length, in_eeprom->data);
3644 
3645 	return 0;
3646 }
3647 
3648 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
3649 {
3650 	switch (dir_type) {
3651 	case BNX_DIR_TYPE_CHIMP_PATCH:
3652 	case BNX_DIR_TYPE_BOOTCODE:
3653 	case BNX_DIR_TYPE_BOOTCODE_2:
3654 	case BNX_DIR_TYPE_APE_FW:
3655 	case BNX_DIR_TYPE_APE_PATCH:
3656 	case BNX_DIR_TYPE_KONG_FW:
3657 	case BNX_DIR_TYPE_KONG_PATCH:
3658 	case BNX_DIR_TYPE_BONO_FW:
3659 	case BNX_DIR_TYPE_BONO_PATCH:
3660 		/* FALLTHROUGH */
3661 		return true;
3662 	}
3663 
3664 	return false;
3665 }
3666 
3667 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
3668 {
3669 	switch (dir_type) {
3670 	case BNX_DIR_TYPE_AVS:
3671 	case BNX_DIR_TYPE_EXP_ROM_MBA:
3672 	case BNX_DIR_TYPE_PCIE:
3673 	case BNX_DIR_TYPE_TSCF_UCODE:
3674 	case BNX_DIR_TYPE_EXT_PHY:
3675 	case BNX_DIR_TYPE_CCM:
3676 	case BNX_DIR_TYPE_ISCSI_BOOT:
3677 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3678 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3679 		/* FALLTHROUGH */
3680 		return true;
3681 	}
3682 
3683 	return false;
3684 }
3685 
3686 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
3687 {
3688 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3689 		bnxt_dir_type_is_other_exec_format(dir_type);
3690 }
3691 
3692 static int
3693 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
3694 		struct rte_dev_eeprom_info *in_eeprom)
3695 {
3696 	struct bnxt *bp = dev->data->dev_private;
3697 	uint8_t index, dir_op;
3698 	uint16_t type, ext, ordinal, attr;
3699 	int rc;
3700 
3701 	rc = is_bnxt_in_error(bp);
3702 	if (rc)
3703 		return rc;
3704 
3705 	PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
3706 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
3707 		    bp->pdev->addr.devid, bp->pdev->addr.function,
3708 		    in_eeprom->offset, in_eeprom->length);
3709 
3710 	if (!BNXT_PF(bp)) {
3711 		PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
3712 		return -EINVAL;
3713 	}
3714 
3715 	type = in_eeprom->magic >> 16;
3716 
3717 	if (type == 0xffff) { /* special value for directory operations */
3718 		index = in_eeprom->magic & 0xff;
3719 		dir_op = in_eeprom->magic >> 8;
3720 		if (index == 0)
3721 			return -EINVAL;
3722 		switch (dir_op) {
3723 		case 0x0e: /* erase */
3724 			if (in_eeprom->offset != ~in_eeprom->magic)
3725 				return -EINVAL;
3726 			return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
3727 		default:
3728 			return -EINVAL;
3729 		}
3730 	}
3731 
3732 	/* Create or re-write an NVM item: */
3733 	if (bnxt_dir_type_is_executable(type) == true)
3734 		return -EOPNOTSUPP;
3735 	ext = in_eeprom->magic & 0xffff;
3736 	ordinal = in_eeprom->offset >> 16;
3737 	attr = in_eeprom->offset & 0xffff;
3738 
3739 	return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
3740 				     in_eeprom->data, in_eeprom->length);
3741 }
3742 
3743 /*
3744  * Initialization
3745  */
3746 
3747 static const struct eth_dev_ops bnxt_dev_ops = {
3748 	.dev_infos_get = bnxt_dev_info_get_op,
3749 	.dev_close = bnxt_dev_close_op,
3750 	.dev_configure = bnxt_dev_configure_op,
3751 	.dev_start = bnxt_dev_start_op,
3752 	.dev_stop = bnxt_dev_stop_op,
3753 	.dev_set_link_up = bnxt_dev_set_link_up_op,
3754 	.dev_set_link_down = bnxt_dev_set_link_down_op,
3755 	.stats_get = bnxt_stats_get_op,
3756 	.stats_reset = bnxt_stats_reset_op,
3757 	.rx_queue_setup = bnxt_rx_queue_setup_op,
3758 	.rx_queue_release = bnxt_rx_queue_release_op,
3759 	.tx_queue_setup = bnxt_tx_queue_setup_op,
3760 	.tx_queue_release = bnxt_tx_queue_release_op,
3761 	.rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
3762 	.rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
3763 	.reta_update = bnxt_reta_update_op,
3764 	.reta_query = bnxt_reta_query_op,
3765 	.rss_hash_update = bnxt_rss_hash_update_op,
3766 	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
3767 	.link_update = bnxt_link_update_op,
3768 	.promiscuous_enable = bnxt_promiscuous_enable_op,
3769 	.promiscuous_disable = bnxt_promiscuous_disable_op,
3770 	.allmulticast_enable = bnxt_allmulticast_enable_op,
3771 	.allmulticast_disable = bnxt_allmulticast_disable_op,
3772 	.mac_addr_add = bnxt_mac_addr_add_op,
3773 	.mac_addr_remove = bnxt_mac_addr_remove_op,
3774 	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
3775 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
3776 	.udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
3777 	.udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
3778 	.vlan_filter_set = bnxt_vlan_filter_set_op,
3779 	.vlan_offload_set = bnxt_vlan_offload_set_op,
3780 	.vlan_tpid_set = bnxt_vlan_tpid_set_op,
3781 	.vlan_pvid_set = bnxt_vlan_pvid_set_op,
3782 	.mtu_set = bnxt_mtu_set_op,
3783 	.mac_addr_set = bnxt_set_default_mac_addr_op,
3784 	.xstats_get = bnxt_dev_xstats_get_op,
3785 	.xstats_get_names = bnxt_dev_xstats_get_names_op,
3786 	.xstats_reset = bnxt_dev_xstats_reset_op,
3787 	.fw_version_get = bnxt_fw_version_get,
3788 	.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
3789 	.rxq_info_get = bnxt_rxq_info_get_op,
3790 	.txq_info_get = bnxt_txq_info_get_op,
3791 	.dev_led_on = bnxt_dev_led_on_op,
3792 	.dev_led_off = bnxt_dev_led_off_op,
3793 	.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
3794 	.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
3795 	.rx_queue_count = bnxt_rx_queue_count_op,
3796 	.rx_descriptor_status = bnxt_rx_descriptor_status_op,
3797 	.tx_descriptor_status = bnxt_tx_descriptor_status_op,
3798 	.rx_queue_start = bnxt_rx_queue_start,
3799 	.rx_queue_stop = bnxt_rx_queue_stop,
3800 	.tx_queue_start = bnxt_tx_queue_start,
3801 	.tx_queue_stop = bnxt_tx_queue_stop,
3802 	.filter_ctrl = bnxt_filter_ctrl_op,
3803 	.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
3804 	.get_eeprom_length    = bnxt_get_eeprom_length_op,
3805 	.get_eeprom           = bnxt_get_eeprom_op,
3806 	.set_eeprom           = bnxt_set_eeprom_op,
3807 	.timesync_enable      = bnxt_timesync_enable,
3808 	.timesync_disable     = bnxt_timesync_disable,
3809 	.timesync_read_time   = bnxt_timesync_read_time,
3810 	.timesync_write_time   = bnxt_timesync_write_time,
3811 	.timesync_adjust_time = bnxt_timesync_adjust_time,
3812 	.timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
3813 	.timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
3814 };
3815 
3816 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
3817 {
3818 	uint32_t offset;
3819 
3820 	/* Only pre-map the reset GRC registers using window 3 */
3821 	rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
3822 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
3823 
3824 	offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
3825 
3826 	return offset;
3827 }
3828 
3829 int bnxt_map_fw_health_status_regs(struct bnxt *bp)
3830 {
3831 	struct bnxt_error_recovery_info *info = bp->recovery_info;
3832 	uint32_t reg_base = 0xffffffff;
3833 	int i;
3834 
3835 	/* Only pre-map the monitoring GRC registers using window 2 */
3836 	for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
3837 		uint32_t reg = info->status_regs[i];
3838 
3839 		if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
3840 			continue;
3841 
3842 		if (reg_base == 0xffffffff)
3843 			reg_base = reg & 0xfffff000;
3844 		if ((reg & 0xfffff000) != reg_base)
3845 			return -ERANGE;
3846 
3847 		/* Use mask 0xffc as the Lower 2 bits indicates
3848 		 * address space location
3849 		 */
3850 		info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
3851 						(reg & 0xffc);
3852 	}
3853 
3854 	if (reg_base == 0xffffffff)
3855 		return 0;
3856 
3857 	rte_write32(reg_base, (uint8_t *)bp->bar0 +
3858 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
3859 
3860 	return 0;
3861 }
3862 
3863 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
3864 {
3865 	struct bnxt_error_recovery_info *info = bp->recovery_info;
3866 	uint32_t delay = info->delay_after_reset[index];
3867 	uint32_t val = info->reset_reg_val[index];
3868 	uint32_t reg = info->reset_reg[index];
3869 	uint32_t type, offset;
3870 
3871 	type = BNXT_FW_STATUS_REG_TYPE(reg);
3872 	offset = BNXT_FW_STATUS_REG_OFF(reg);
3873 
3874 	switch (type) {
3875 	case BNXT_FW_STATUS_REG_TYPE_CFG:
3876 		rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
3877 		break;
3878 	case BNXT_FW_STATUS_REG_TYPE_GRC:
3879 		offset = bnxt_map_reset_regs(bp, offset);
3880 		rte_write32(val, (uint8_t *)bp->bar0 + offset);
3881 		break;
3882 	case BNXT_FW_STATUS_REG_TYPE_BAR0:
3883 		rte_write32(val, (uint8_t *)bp->bar0 + offset);
3884 		break;
3885 	}
3886 	/* wait on a specific interval of time until core reset is complete */
3887 	if (delay)
3888 		rte_delay_ms(delay);
3889 }
3890 
3891 static void bnxt_dev_cleanup(struct bnxt *bp)
3892 {
3893 	bnxt_set_hwrm_link_config(bp, false);
3894 	bp->link_info.link_up = 0;
3895 	if (bp->eth_dev->data->dev_started)
3896 		bnxt_dev_stop_op(bp->eth_dev);
3897 
3898 	bnxt_uninit_resources(bp, true);
3899 }
3900 
3901 static int bnxt_restore_vlan_filters(struct bnxt *bp)
3902 {
3903 	struct rte_eth_dev *dev = bp->eth_dev;
3904 	struct rte_vlan_filter_conf *vfc;
3905 	int vidx, vbit, rc;
3906 	uint16_t vlan_id;
3907 
3908 	for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
3909 		vfc = &dev->data->vlan_filter_conf;
3910 		vidx = vlan_id / 64;
3911 		vbit = vlan_id % 64;
3912 
3913 		/* Each bit corresponds to a VLAN id */
3914 		if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
3915 			rc = bnxt_add_vlan_filter(bp, vlan_id);
3916 			if (rc)
3917 				return rc;
3918 		}
3919 	}
3920 
3921 	return 0;
3922 }
3923 
3924 static int bnxt_restore_mac_filters(struct bnxt *bp)
3925 {
3926 	struct rte_eth_dev *dev = bp->eth_dev;
3927 	struct rte_eth_dev_info dev_info;
3928 	struct rte_ether_addr *addr;
3929 	uint64_t pool_mask;
3930 	uint32_t pool = 0;
3931 	uint16_t i;
3932 	int rc;
3933 
3934 	if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp))
3935 		return 0;
3936 
3937 	rc = bnxt_dev_info_get_op(dev, &dev_info);
3938 	if (rc)
3939 		return rc;
3940 
3941 	/* replay MAC address configuration */
3942 	for (i = 1; i < dev_info.max_mac_addrs; i++) {
3943 		addr = &dev->data->mac_addrs[i];
3944 
3945 		/* skip zero address */
3946 		if (rte_is_zero_ether_addr(addr))
3947 			continue;
3948 
3949 		pool = 0;
3950 		pool_mask = dev->data->mac_pool_sel[i];
3951 
3952 		do {
3953 			if (pool_mask & 1ULL) {
3954 				rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
3955 				if (rc)
3956 					return rc;
3957 			}
3958 			pool_mask >>= 1;
3959 			pool++;
3960 		} while (pool_mask);
3961 	}
3962 
3963 	return 0;
3964 }
3965 
3966 static int bnxt_restore_filters(struct bnxt *bp)
3967 {
3968 	struct rte_eth_dev *dev = bp->eth_dev;
3969 	int ret = 0;
3970 
3971 	if (dev->data->all_multicast) {
3972 		ret = bnxt_allmulticast_enable_op(dev);
3973 		if (ret)
3974 			return ret;
3975 	}
3976 	if (dev->data->promiscuous) {
3977 		ret = bnxt_promiscuous_enable_op(dev);
3978 		if (ret)
3979 			return ret;
3980 	}
3981 
3982 	ret = bnxt_restore_mac_filters(bp);
3983 	if (ret)
3984 		return ret;
3985 
3986 	ret = bnxt_restore_vlan_filters(bp);
3987 	/* TODO restore other filters as well */
3988 	return ret;
3989 }
3990 
3991 static void bnxt_dev_recover(void *arg)
3992 {
3993 	struct bnxt *bp = arg;
3994 	int timeout = bp->fw_reset_max_msecs;
3995 	int rc = 0;
3996 
3997 	/* Clear Error flag so that device re-init should happen */
3998 	bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
3999 
4000 	do {
4001 		rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
4002 		if (rc == 0)
4003 			break;
4004 		rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
4005 		timeout -= BNXT_FW_READY_WAIT_INTERVAL;
4006 	} while (rc && timeout);
4007 
4008 	if (rc) {
4009 		PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
4010 		goto err;
4011 	}
4012 
4013 	rc = bnxt_init_resources(bp, true);
4014 	if (rc) {
4015 		PMD_DRV_LOG(ERR,
4016 			    "Failed to initialize resources after reset\n");
4017 		goto err;
4018 	}
4019 	/* clear reset flag as the device is initialized now */
4020 	bp->flags &= ~BNXT_FLAG_FW_RESET;
4021 
4022 	rc = bnxt_dev_start_op(bp->eth_dev);
4023 	if (rc) {
4024 		PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
4025 		goto err_start;
4026 	}
4027 
4028 	rc = bnxt_restore_filters(bp);
4029 	if (rc)
4030 		goto err_start;
4031 
4032 	PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
4033 	return;
4034 err_start:
4035 	bnxt_dev_stop_op(bp->eth_dev);
4036 err:
4037 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
4038 	bnxt_uninit_resources(bp, false);
4039 	PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
4040 }
4041 
4042 void bnxt_dev_reset_and_resume(void *arg)
4043 {
4044 	struct bnxt *bp = arg;
4045 	int rc;
4046 
4047 	bnxt_dev_cleanup(bp);
4048 
4049 	bnxt_wait_for_device_shutdown(bp);
4050 
4051 	rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
4052 			       bnxt_dev_recover, (void *)bp);
4053 	if (rc)
4054 		PMD_DRV_LOG(ERR, "Error setting recovery alarm");
4055 }
4056 
4057 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
4058 {
4059 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4060 	uint32_t reg = info->status_regs[index];
4061 	uint32_t type, offset, val = 0;
4062 
4063 	type = BNXT_FW_STATUS_REG_TYPE(reg);
4064 	offset = BNXT_FW_STATUS_REG_OFF(reg);
4065 
4066 	switch (type) {
4067 	case BNXT_FW_STATUS_REG_TYPE_CFG:
4068 		rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
4069 		break;
4070 	case BNXT_FW_STATUS_REG_TYPE_GRC:
4071 		offset = info->mapped_status_regs[index];
4072 		/* FALLTHROUGH */
4073 	case BNXT_FW_STATUS_REG_TYPE_BAR0:
4074 		val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4075 				       offset));
4076 		break;
4077 	}
4078 
4079 	return val;
4080 }
4081 
4082 static int bnxt_fw_reset_all(struct bnxt *bp)
4083 {
4084 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4085 	uint32_t i;
4086 	int rc = 0;
4087 
4088 	if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4089 		/* Reset through master function driver */
4090 		for (i = 0; i < info->reg_array_cnt; i++)
4091 			bnxt_write_fw_reset_reg(bp, i);
4092 		/* Wait for time specified by FW after triggering reset */
4093 		rte_delay_ms(info->master_func_wait_period_after_reset);
4094 	} else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
4095 		/* Reset with the help of Kong processor */
4096 		rc = bnxt_hwrm_fw_reset(bp);
4097 		if (rc)
4098 			PMD_DRV_LOG(ERR, "Failed to reset FW\n");
4099 	}
4100 
4101 	return rc;
4102 }
4103 
4104 static void bnxt_fw_reset_cb(void *arg)
4105 {
4106 	struct bnxt *bp = arg;
4107 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4108 	int rc = 0;
4109 
4110 	/* Only Master function can do FW reset */
4111 	if (bnxt_is_master_func(bp) &&
4112 	    bnxt_is_recovery_enabled(bp)) {
4113 		rc = bnxt_fw_reset_all(bp);
4114 		if (rc) {
4115 			PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
4116 			return;
4117 		}
4118 	}
4119 
4120 	/* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
4121 	 * EXCEPTION_FATAL_ASYNC event to all the functions
4122 	 * (including MASTER FUNC). After receiving this Async, all the active
4123 	 * drivers should treat this case as FW initiated recovery
4124 	 */
4125 	if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4126 		bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
4127 		bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
4128 
4129 		/* To recover from error */
4130 		rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
4131 				  (void *)bp);
4132 	}
4133 }
4134 
4135 /* Driver should poll FW heartbeat, reset_counter with the frequency
4136  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
4137  * When the driver detects heartbeat stop or change in reset_counter,
4138  * it has to trigger a reset to recover from the error condition.
4139  * A “master PF” is the function who will have the privilege to
4140  * initiate the chimp reset. The master PF will be elected by the
4141  * firmware and will be notified through async message.
4142  */
4143 static void bnxt_check_fw_health(void *arg)
4144 {
4145 	struct bnxt *bp = arg;
4146 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4147 	uint32_t val = 0, wait_msec;
4148 
4149 	if (!info || !bnxt_is_recovery_enabled(bp) ||
4150 	    is_bnxt_in_error(bp))
4151 		return;
4152 
4153 	val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
4154 	if (val == info->last_heart_beat)
4155 		goto reset;
4156 
4157 	info->last_heart_beat = val;
4158 
4159 	val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
4160 	if (val != info->last_reset_counter)
4161 		goto reset;
4162 
4163 	info->last_reset_counter = val;
4164 
4165 	rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
4166 			  bnxt_check_fw_health, (void *)bp);
4167 
4168 	return;
4169 reset:
4170 	/* Stop DMA to/from device */
4171 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
4172 	bp->flags |= BNXT_FLAG_FW_RESET;
4173 
4174 	PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
4175 
4176 	if (bnxt_is_master_func(bp))
4177 		wait_msec = info->master_func_wait_period;
4178 	else
4179 		wait_msec = info->normal_func_wait_period;
4180 
4181 	rte_eal_alarm_set(US_PER_MS * wait_msec,
4182 			  bnxt_fw_reset_cb, (void *)bp);
4183 }
4184 
4185 void bnxt_schedule_fw_health_check(struct bnxt *bp)
4186 {
4187 	uint32_t polling_freq;
4188 
4189 	if (!bnxt_is_recovery_enabled(bp))
4190 		return;
4191 
4192 	if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
4193 		return;
4194 
4195 	polling_freq = bp->recovery_info->driver_polling_freq;
4196 
4197 	rte_eal_alarm_set(US_PER_MS * polling_freq,
4198 			  bnxt_check_fw_health, (void *)bp);
4199 	bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4200 }
4201 
4202 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
4203 {
4204 	if (!bnxt_is_recovery_enabled(bp))
4205 		return;
4206 
4207 	rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
4208 	bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4209 }
4210 
4211 static bool bnxt_vf_pciid(uint16_t id)
4212 {
4213 	if (id == BROADCOM_DEV_ID_57304_VF ||
4214 	    id == BROADCOM_DEV_ID_57406_VF ||
4215 	    id == BROADCOM_DEV_ID_5731X_VF ||
4216 	    id == BROADCOM_DEV_ID_5741X_VF ||
4217 	    id == BROADCOM_DEV_ID_57414_VF ||
4218 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
4219 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||
4220 	    id == BROADCOM_DEV_ID_58802_VF ||
4221 	    id == BROADCOM_DEV_ID_57500_VF1 ||
4222 	    id == BROADCOM_DEV_ID_57500_VF2)
4223 		return true;
4224 	return false;
4225 }
4226 
4227 static bool bnxt_thor_device(uint16_t id)
4228 {
4229 	if (id == BROADCOM_DEV_ID_57508 ||
4230 	    id == BROADCOM_DEV_ID_57504 ||
4231 	    id == BROADCOM_DEV_ID_57502 ||
4232 	    id == BROADCOM_DEV_ID_57508_MF1 ||
4233 	    id == BROADCOM_DEV_ID_57504_MF1 ||
4234 	    id == BROADCOM_DEV_ID_57502_MF1 ||
4235 	    id == BROADCOM_DEV_ID_57508_MF2 ||
4236 	    id == BROADCOM_DEV_ID_57504_MF2 ||
4237 	    id == BROADCOM_DEV_ID_57502_MF2 ||
4238 	    id == BROADCOM_DEV_ID_57500_VF1 ||
4239 	    id == BROADCOM_DEV_ID_57500_VF2)
4240 		return true;
4241 
4242 	return false;
4243 }
4244 
4245 bool bnxt_stratus_device(struct bnxt *bp)
4246 {
4247 	uint16_t id = bp->pdev->id.device_id;
4248 
4249 	if (id == BROADCOM_DEV_ID_STRATUS_NIC ||
4250 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
4251 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF2)
4252 		return true;
4253 	return false;
4254 }
4255 
4256 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4257 {
4258 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4259 	struct bnxt *bp = eth_dev->data->dev_private;
4260 
4261 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
4262 	bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
4263 	bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
4264 	if (!bp->bar0 || !bp->doorbell_base) {
4265 		PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
4266 		return -ENODEV;
4267 	}
4268 
4269 	bp->eth_dev = eth_dev;
4270 	bp->pdev = pci_dev;
4271 
4272 	return 0;
4273 }
4274 
4275 static int bnxt_alloc_ctx_mem_blk(__rte_unused struct bnxt *bp,
4276 				  struct bnxt_ctx_pg_info *ctx_pg,
4277 				  uint32_t mem_size,
4278 				  const char *suffix,
4279 				  uint16_t idx)
4280 {
4281 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
4282 	const struct rte_memzone *mz = NULL;
4283 	char mz_name[RTE_MEMZONE_NAMESIZE];
4284 	rte_iova_t mz_phys_addr;
4285 	uint64_t valid_bits = 0;
4286 	uint32_t sz;
4287 	int i;
4288 
4289 	if (!mem_size)
4290 		return 0;
4291 
4292 	rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
4293 			 BNXT_PAGE_SIZE;
4294 	rmem->page_size = BNXT_PAGE_SIZE;
4295 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
4296 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
4297 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4298 
4299 	valid_bits = PTU_PTE_VALID;
4300 
4301 	if (rmem->nr_pages > 1) {
4302 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4303 			 "bnxt_ctx_pg_tbl%s_%x_%d",
4304 			 suffix, idx, bp->eth_dev->data->port_id);
4305 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4306 		mz = rte_memzone_lookup(mz_name);
4307 		if (!mz) {
4308 			mz = rte_memzone_reserve_aligned(mz_name,
4309 						rmem->nr_pages * 8,
4310 						SOCKET_ID_ANY,
4311 						RTE_MEMZONE_2MB |
4312 						RTE_MEMZONE_SIZE_HINT_ONLY |
4313 						RTE_MEMZONE_IOVA_CONTIG,
4314 						BNXT_PAGE_SIZE);
4315 			if (mz == NULL)
4316 				return -ENOMEM;
4317 		}
4318 
4319 		memset(mz->addr, 0, mz->len);
4320 		mz_phys_addr = mz->iova;
4321 
4322 		rmem->pg_tbl = mz->addr;
4323 		rmem->pg_tbl_map = mz_phys_addr;
4324 		rmem->pg_tbl_mz = mz;
4325 	}
4326 
4327 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
4328 		 suffix, idx, bp->eth_dev->data->port_id);
4329 	mz = rte_memzone_lookup(mz_name);
4330 	if (!mz) {
4331 		mz = rte_memzone_reserve_aligned(mz_name,
4332 						 mem_size,
4333 						 SOCKET_ID_ANY,
4334 						 RTE_MEMZONE_1GB |
4335 						 RTE_MEMZONE_SIZE_HINT_ONLY |
4336 						 RTE_MEMZONE_IOVA_CONTIG,
4337 						 BNXT_PAGE_SIZE);
4338 		if (mz == NULL)
4339 			return -ENOMEM;
4340 	}
4341 
4342 	memset(mz->addr, 0, mz->len);
4343 	mz_phys_addr = mz->iova;
4344 
4345 	for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
4346 		rmem->pg_arr[i] = ((char *)mz->addr) + sz;
4347 		rmem->dma_arr[i] = mz_phys_addr + sz;
4348 
4349 		if (rmem->nr_pages > 1) {
4350 			if (i == rmem->nr_pages - 2 &&
4351 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4352 				valid_bits |= PTU_PTE_NEXT_TO_LAST;
4353 			else if (i == rmem->nr_pages - 1 &&
4354 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4355 				valid_bits |= PTU_PTE_LAST;
4356 
4357 			rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
4358 							   valid_bits);
4359 		}
4360 	}
4361 
4362 	rmem->mz = mz;
4363 	if (rmem->vmem_size)
4364 		rmem->vmem = (void **)mz->addr;
4365 	rmem->dma_arr[0] = mz_phys_addr;
4366 	return 0;
4367 }
4368 
4369 static void bnxt_free_ctx_mem(struct bnxt *bp)
4370 {
4371 	int i;
4372 
4373 	if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
4374 		return;
4375 
4376 	bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
4377 	rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
4378 	rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
4379 	rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
4380 	rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
4381 	rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
4382 	rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
4383 	rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
4384 	rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
4385 	rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
4386 	rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
4387 
4388 	for (i = 0; i < BNXT_MAX_Q; i++) {
4389 		if (bp->ctx->tqm_mem[i])
4390 			rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
4391 	}
4392 
4393 	rte_free(bp->ctx);
4394 	bp->ctx = NULL;
4395 }
4396 
4397 #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
4398 
4399 #define min_t(type, x, y) ({                    \
4400 	type __min1 = (x);                      \
4401 	type __min2 = (y);                      \
4402 	__min1 < __min2 ? __min1 : __min2; })
4403 
4404 #define max_t(type, x, y) ({                    \
4405 	type __max1 = (x);                      \
4406 	type __max2 = (y);                      \
4407 	__max1 > __max2 ? __max1 : __max2; })
4408 
4409 #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
4410 
4411 int bnxt_alloc_ctx_mem(struct bnxt *bp)
4412 {
4413 	struct bnxt_ctx_pg_info *ctx_pg;
4414 	struct bnxt_ctx_mem_info *ctx;
4415 	uint32_t mem_size, ena, entries;
4416 	int i, rc;
4417 
4418 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
4419 	if (rc) {
4420 		PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
4421 		return rc;
4422 	}
4423 	ctx = bp->ctx;
4424 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
4425 		return 0;
4426 
4427 	ctx_pg = &ctx->qp_mem;
4428 	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
4429 	mem_size = ctx->qp_entry_size * ctx_pg->entries;
4430 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
4431 	if (rc)
4432 		return rc;
4433 
4434 	ctx_pg = &ctx->srq_mem;
4435 	ctx_pg->entries = ctx->srq_max_l2_entries;
4436 	mem_size = ctx->srq_entry_size * ctx_pg->entries;
4437 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
4438 	if (rc)
4439 		return rc;
4440 
4441 	ctx_pg = &ctx->cq_mem;
4442 	ctx_pg->entries = ctx->cq_max_l2_entries;
4443 	mem_size = ctx->cq_entry_size * ctx_pg->entries;
4444 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
4445 	if (rc)
4446 		return rc;
4447 
4448 	ctx_pg = &ctx->vnic_mem;
4449 	ctx_pg->entries = ctx->vnic_max_vnic_entries +
4450 		ctx->vnic_max_ring_table_entries;
4451 	mem_size = ctx->vnic_entry_size * ctx_pg->entries;
4452 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
4453 	if (rc)
4454 		return rc;
4455 
4456 	ctx_pg = &ctx->stat_mem;
4457 	ctx_pg->entries = ctx->stat_max_entries;
4458 	mem_size = ctx->stat_entry_size * ctx_pg->entries;
4459 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
4460 	if (rc)
4461 		return rc;
4462 
4463 	entries = ctx->qp_max_l2_entries +
4464 		  ctx->vnic_max_vnic_entries +
4465 		  ctx->tqm_min_entries_per_ring;
4466 	entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
4467 	entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
4468 			  ctx->tqm_max_entries_per_ring);
4469 	for (i = 0, ena = 0; i < BNXT_MAX_Q; i++) {
4470 		ctx_pg = ctx->tqm_mem[i];
4471 		/* use min tqm entries for now. */
4472 		ctx_pg->entries = entries;
4473 		mem_size = ctx->tqm_entry_size * ctx_pg->entries;
4474 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
4475 		if (rc)
4476 			return rc;
4477 		ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
4478 	}
4479 
4480 	ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
4481 	rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
4482 	if (rc)
4483 		PMD_DRV_LOG(ERR,
4484 			    "Failed to configure context mem: rc = %d\n", rc);
4485 	else
4486 		ctx->flags |= BNXT_CTX_FLAG_INITED;
4487 
4488 	return rc;
4489 }
4490 
4491 static int bnxt_alloc_stats_mem(struct bnxt *bp)
4492 {
4493 	struct rte_pci_device *pci_dev = bp->pdev;
4494 	char mz_name[RTE_MEMZONE_NAMESIZE];
4495 	const struct rte_memzone *mz = NULL;
4496 	uint32_t total_alloc_len;
4497 	rte_iova_t mz_phys_addr;
4498 
4499 	if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
4500 		return 0;
4501 
4502 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4503 		 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4504 		 pci_dev->addr.bus, pci_dev->addr.devid,
4505 		 pci_dev->addr.function, "rx_port_stats");
4506 	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4507 	mz = rte_memzone_lookup(mz_name);
4508 	total_alloc_len =
4509 		RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
4510 				       sizeof(struct rx_port_stats_ext) + 512);
4511 	if (!mz) {
4512 		mz = rte_memzone_reserve(mz_name, total_alloc_len,
4513 					 SOCKET_ID_ANY,
4514 					 RTE_MEMZONE_2MB |
4515 					 RTE_MEMZONE_SIZE_HINT_ONLY |
4516 					 RTE_MEMZONE_IOVA_CONTIG);
4517 		if (mz == NULL)
4518 			return -ENOMEM;
4519 	}
4520 	memset(mz->addr, 0, mz->len);
4521 	mz_phys_addr = mz->iova;
4522 
4523 	bp->rx_mem_zone = (const void *)mz;
4524 	bp->hw_rx_port_stats = mz->addr;
4525 	bp->hw_rx_port_stats_map = mz_phys_addr;
4526 
4527 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4528 		 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4529 		 pci_dev->addr.bus, pci_dev->addr.devid,
4530 		 pci_dev->addr.function, "tx_port_stats");
4531 	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4532 	mz = rte_memzone_lookup(mz_name);
4533 	total_alloc_len =
4534 		RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
4535 				       sizeof(struct tx_port_stats_ext) + 512);
4536 	if (!mz) {
4537 		mz = rte_memzone_reserve(mz_name,
4538 					 total_alloc_len,
4539 					 SOCKET_ID_ANY,
4540 					 RTE_MEMZONE_2MB |
4541 					 RTE_MEMZONE_SIZE_HINT_ONLY |
4542 					 RTE_MEMZONE_IOVA_CONTIG);
4543 		if (mz == NULL)
4544 			return -ENOMEM;
4545 	}
4546 	memset(mz->addr, 0, mz->len);
4547 	mz_phys_addr = mz->iova;
4548 
4549 	bp->tx_mem_zone = (const void *)mz;
4550 	bp->hw_tx_port_stats = mz->addr;
4551 	bp->hw_tx_port_stats_map = mz_phys_addr;
4552 	bp->flags |= BNXT_FLAG_PORT_STATS;
4553 
4554 	/* Display extended statistics if FW supports it */
4555 	if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
4556 	    bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
4557 	    !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
4558 		return 0;
4559 
4560 	bp->hw_rx_port_stats_ext = (void *)
4561 		((uint8_t *)bp->hw_rx_port_stats +
4562 		 sizeof(struct rx_port_stats));
4563 	bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
4564 		sizeof(struct rx_port_stats);
4565 	bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
4566 
4567 	if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
4568 	    bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
4569 		bp->hw_tx_port_stats_ext = (void *)
4570 			((uint8_t *)bp->hw_tx_port_stats +
4571 			 sizeof(struct tx_port_stats));
4572 		bp->hw_tx_port_stats_ext_map =
4573 			bp->hw_tx_port_stats_map +
4574 			sizeof(struct tx_port_stats);
4575 		bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
4576 	}
4577 
4578 	return 0;
4579 }
4580 
4581 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
4582 {
4583 	struct bnxt *bp = eth_dev->data->dev_private;
4584 	int rc = 0;
4585 
4586 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
4587 					       RTE_ETHER_ADDR_LEN *
4588 					       bp->max_l2_ctx,
4589 					       0);
4590 	if (eth_dev->data->mac_addrs == NULL) {
4591 		PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
4592 		return -ENOMEM;
4593 	}
4594 
4595 	if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {
4596 		if (BNXT_PF(bp))
4597 			return -EINVAL;
4598 
4599 		/* Generate a random MAC address, if none was assigned by PF */
4600 		PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
4601 		bnxt_eth_hw_addr_random(bp->mac_addr);
4602 		PMD_DRV_LOG(INFO,
4603 			    "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
4604 			    bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
4605 			    bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
4606 
4607 		rc = bnxt_hwrm_set_mac(bp);
4608 		if (!rc)
4609 			memcpy(&bp->eth_dev->data->mac_addrs[0], bp->mac_addr,
4610 			       RTE_ETHER_ADDR_LEN);
4611 		return rc;
4612 	}
4613 
4614 	/* Copy the permanent MAC from the FUNC_QCAPS response */
4615 	memcpy(bp->mac_addr, bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN);
4616 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
4617 
4618 	return rc;
4619 }
4620 
4621 static int bnxt_restore_dflt_mac(struct bnxt *bp)
4622 {
4623 	int rc = 0;
4624 
4625 	/* MAC is already configured in FW */
4626 	if (!bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN))
4627 		return 0;
4628 
4629 	/* Restore the old MAC configured */
4630 	rc = bnxt_hwrm_set_mac(bp);
4631 	if (rc)
4632 		PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
4633 
4634 	return rc;
4635 }
4636 
4637 static void bnxt_config_vf_req_fwd(struct bnxt *bp)
4638 {
4639 	if (!BNXT_PF(bp))
4640 		return;
4641 
4642 #define ALLOW_FUNC(x)	\
4643 	{ \
4644 		uint32_t arg = (x); \
4645 		bp->pf.vf_req_fwd[((arg) >> 5)] &= \
4646 		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
4647 	}
4648 
4649 	/* Forward all requests if firmware is new enough */
4650 	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
4651 	     (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
4652 	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
4653 		memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
4654 	} else {
4655 		PMD_DRV_LOG(WARNING,
4656 			    "Firmware too old for VF mailbox functionality\n");
4657 		memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
4658 	}
4659 
4660 	/*
4661 	 * The following are used for driver cleanup. If we disallow these,
4662 	 * VF drivers can't clean up cleanly.
4663 	 */
4664 	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
4665 	ALLOW_FUNC(HWRM_VNIC_FREE);
4666 	ALLOW_FUNC(HWRM_RING_FREE);
4667 	ALLOW_FUNC(HWRM_RING_GRP_FREE);
4668 	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
4669 	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
4670 	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
4671 	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
4672 	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
4673 }
4674 
4675 static int bnxt_init_fw(struct bnxt *bp)
4676 {
4677 	uint16_t mtu;
4678 	int rc = 0;
4679 
4680 	bp->fw_cap = 0;
4681 
4682 	rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
4683 	if (rc)
4684 		return rc;
4685 
4686 	rc = bnxt_hwrm_func_reset(bp);
4687 	if (rc)
4688 		return -EIO;
4689 
4690 	rc = bnxt_hwrm_vnic_qcaps(bp);
4691 	if (rc)
4692 		return rc;
4693 
4694 	rc = bnxt_hwrm_queue_qportcfg(bp);
4695 	if (rc)
4696 		return rc;
4697 
4698 	/* Get the MAX capabilities for this function.
4699 	 * This function also allocates context memory for TQM rings and
4700 	 * informs the firmware about this allocated backing store memory.
4701 	 */
4702 	rc = bnxt_hwrm_func_qcaps(bp);
4703 	if (rc)
4704 		return rc;
4705 
4706 	rc = bnxt_hwrm_func_qcfg(bp, &mtu);
4707 	if (rc)
4708 		return rc;
4709 
4710 	rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
4711 	if (rc)
4712 		return rc;
4713 
4714 	/* Get the adapter error recovery support info */
4715 	rc = bnxt_hwrm_error_recovery_qcfg(bp);
4716 	if (rc)
4717 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
4718 
4719 	bnxt_hwrm_port_led_qcaps(bp);
4720 
4721 	return 0;
4722 }
4723 
4724 static int
4725 bnxt_init_locks(struct bnxt *bp)
4726 {
4727 	int err;
4728 
4729 	err = pthread_mutex_init(&bp->flow_lock, NULL);
4730 	if (err) {
4731 		PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
4732 		return err;
4733 	}
4734 
4735 	err = pthread_mutex_init(&bp->def_cp_lock, NULL);
4736 	if (err)
4737 		PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
4738 	return err;
4739 }
4740 
4741 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
4742 {
4743 	int rc;
4744 
4745 	rc = bnxt_init_fw(bp);
4746 	if (rc)
4747 		return rc;
4748 
4749 	if (!reconfig_dev) {
4750 		rc = bnxt_setup_mac_addr(bp->eth_dev);
4751 		if (rc)
4752 			return rc;
4753 	} else {
4754 		rc = bnxt_restore_dflt_mac(bp);
4755 		if (rc)
4756 			return rc;
4757 	}
4758 
4759 	bnxt_config_vf_req_fwd(bp);
4760 
4761 	rc = bnxt_hwrm_func_driver_register(bp);
4762 	if (rc) {
4763 		PMD_DRV_LOG(ERR, "Failed to register driver");
4764 		return -EBUSY;
4765 	}
4766 
4767 	if (BNXT_PF(bp)) {
4768 		if (bp->pdev->max_vfs) {
4769 			rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
4770 			if (rc) {
4771 				PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
4772 				return rc;
4773 			}
4774 		} else {
4775 			rc = bnxt_hwrm_allocate_pf_only(bp);
4776 			if (rc) {
4777 				PMD_DRV_LOG(ERR,
4778 					    "Failed to allocate PF resources");
4779 				return rc;
4780 			}
4781 		}
4782 	}
4783 
4784 	rc = bnxt_alloc_mem(bp, reconfig_dev);
4785 	if (rc)
4786 		return rc;
4787 
4788 	rc = bnxt_setup_int(bp);
4789 	if (rc)
4790 		return rc;
4791 
4792 	rc = bnxt_request_int(bp);
4793 	if (rc)
4794 		return rc;
4795 
4796 	rc = bnxt_init_locks(bp);
4797 	if (rc)
4798 		return rc;
4799 
4800 	return 0;
4801 }
4802 
4803 static int
4804 bnxt_dev_init(struct rte_eth_dev *eth_dev)
4805 {
4806 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4807 	static int version_printed;
4808 	struct bnxt *bp;
4809 	int rc;
4810 
4811 	if (version_printed++ == 0)
4812 		PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
4813 
4814 	eth_dev->dev_ops = &bnxt_dev_ops;
4815 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
4816 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
4817 
4818 	/*
4819 	 * For secondary processes, we don't initialise any further
4820 	 * as primary has already done this work.
4821 	 */
4822 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
4823 		return 0;
4824 
4825 	rte_eth_copy_pci_info(eth_dev, pci_dev);
4826 
4827 	bp = eth_dev->data->dev_private;
4828 
4829 	if (bnxt_vf_pciid(pci_dev->id.device_id))
4830 		bp->flags |= BNXT_FLAG_VF;
4831 
4832 	if (bnxt_thor_device(pci_dev->id.device_id))
4833 		bp->flags |= BNXT_FLAG_THOR_CHIP;
4834 
4835 	if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
4836 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
4837 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
4838 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
4839 		bp->flags |= BNXT_FLAG_STINGRAY;
4840 
4841 	rc = bnxt_init_board(eth_dev);
4842 	if (rc) {
4843 		PMD_DRV_LOG(ERR,
4844 			    "Failed to initialize board rc: %x\n", rc);
4845 		return rc;
4846 	}
4847 
4848 	rc = bnxt_alloc_hwrm_resources(bp);
4849 	if (rc) {
4850 		PMD_DRV_LOG(ERR,
4851 			    "Failed to allocate hwrm resource rc: %x\n", rc);
4852 		goto error_free;
4853 	}
4854 	rc = bnxt_init_resources(bp, false);
4855 	if (rc)
4856 		goto error_free;
4857 
4858 	rc = bnxt_alloc_stats_mem(bp);
4859 	if (rc)
4860 		goto error_free;
4861 
4862 	PMD_DRV_LOG(INFO,
4863 		    DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
4864 		    pci_dev->mem_resource[0].phys_addr,
4865 		    pci_dev->mem_resource[0].addr);
4866 
4867 	return 0;
4868 
4869 error_free:
4870 	bnxt_dev_uninit(eth_dev);
4871 	return rc;
4872 }
4873 
4874 static void
4875 bnxt_uninit_locks(struct bnxt *bp)
4876 {
4877 	pthread_mutex_destroy(&bp->flow_lock);
4878 	pthread_mutex_destroy(&bp->def_cp_lock);
4879 }
4880 
4881 static int
4882 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
4883 {
4884 	int rc;
4885 
4886 	bnxt_free_int(bp);
4887 	bnxt_free_mem(bp, reconfig_dev);
4888 	bnxt_hwrm_func_buf_unrgtr(bp);
4889 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
4890 	bp->flags &= ~BNXT_FLAG_REGISTERED;
4891 	bnxt_free_ctx_mem(bp);
4892 	if (!reconfig_dev) {
4893 		bnxt_free_hwrm_resources(bp);
4894 
4895 		if (bp->recovery_info != NULL) {
4896 			rte_free(bp->recovery_info);
4897 			bp->recovery_info = NULL;
4898 		}
4899 	}
4900 
4901 	bnxt_uninit_locks(bp);
4902 	rte_free(bp->ptp_cfg);
4903 	bp->ptp_cfg = NULL;
4904 	return rc;
4905 }
4906 
4907 static int
4908 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
4909 {
4910 	struct bnxt *bp = eth_dev->data->dev_private;
4911 	int rc;
4912 
4913 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
4914 		return -EPERM;
4915 
4916 	PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
4917 
4918 	rc = bnxt_uninit_resources(bp, false);
4919 
4920 	if (bp->tx_mem_zone) {
4921 		rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
4922 		bp->tx_mem_zone = NULL;
4923 	}
4924 
4925 	if (bp->rx_mem_zone) {
4926 		rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
4927 		bp->rx_mem_zone = NULL;
4928 	}
4929 
4930 	if (eth_dev->data->dev_started)
4931 		bnxt_dev_close_op(eth_dev);
4932 	if (bp->pf.vf_info)
4933 		rte_free(bp->pf.vf_info);
4934 	eth_dev->dev_ops = NULL;
4935 	eth_dev->rx_pkt_burst = NULL;
4936 	eth_dev->tx_pkt_burst = NULL;
4937 
4938 	return rc;
4939 }
4940 
4941 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
4942 	struct rte_pci_device *pci_dev)
4943 {
4944 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
4945 		bnxt_dev_init);
4946 }
4947 
4948 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
4949 {
4950 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4951 		return rte_eth_dev_pci_generic_remove(pci_dev,
4952 				bnxt_dev_uninit);
4953 	else
4954 		return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
4955 }
4956 
4957 static struct rte_pci_driver bnxt_rte_pmd = {
4958 	.id_table = bnxt_pci_id_map,
4959 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
4960 	.probe = bnxt_pci_probe,
4961 	.remove = bnxt_pci_remove,
4962 };
4963 
4964 static bool
4965 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4966 {
4967 	if (strcmp(dev->device->driver->name, drv->driver.name))
4968 		return false;
4969 
4970 	return true;
4971 }
4972 
4973 bool is_bnxt_supported(struct rte_eth_dev *dev)
4974 {
4975 	return is_device_supported(dev, &bnxt_rte_pmd);
4976 }
4977 
4978 RTE_INIT(bnxt_init_log)
4979 {
4980 	bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver");
4981 	if (bnxt_logtype_driver >= 0)
4982 		rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
4983 }
4984 
4985 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
4986 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
4987 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
4988