xref: /f-stack/dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision 4b05018f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 #include <stdbool.h>
8 
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_irq.h"
20 #include "bnxt_ring.h"
21 #include "bnxt_rxq.h"
22 #include "bnxt_rxr.h"
23 #include "bnxt_stats.h"
24 #include "bnxt_txq.h"
25 #include "bnxt_txr.h"
26 #include "bnxt_vnic.h"
27 #include "hsi_struct_def_dpdk.h"
28 #include "bnxt_nvm_defs.h"
29 #include "bnxt_util.h"
30 
31 #define DRV_MODULE_NAME		"bnxt"
32 static const char bnxt_version[] =
33 	"Broadcom NetXtreme driver " DRV_MODULE_NAME "\n";
34 int bnxt_logtype_driver;
35 
36 #define PCI_VENDOR_ID_BROADCOM 0x14E4
37 
38 #define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606
39 #define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609
40 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
41 #define BROADCOM_DEV_ID_57414_VF 0x16c1
42 #define BROADCOM_DEV_ID_57301 0x16c8
43 #define BROADCOM_DEV_ID_57302 0x16c9
44 #define BROADCOM_DEV_ID_57304_PF 0x16ca
45 #define BROADCOM_DEV_ID_57304_VF 0x16cb
46 #define BROADCOM_DEV_ID_57417_MF 0x16cc
47 #define BROADCOM_DEV_ID_NS2 0x16cd
48 #define BROADCOM_DEV_ID_57311 0x16ce
49 #define BROADCOM_DEV_ID_57312 0x16cf
50 #define BROADCOM_DEV_ID_57402 0x16d0
51 #define BROADCOM_DEV_ID_57404 0x16d1
52 #define BROADCOM_DEV_ID_57406_PF 0x16d2
53 #define BROADCOM_DEV_ID_57406_VF 0x16d3
54 #define BROADCOM_DEV_ID_57402_MF 0x16d4
55 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
56 #define BROADCOM_DEV_ID_57412 0x16d6
57 #define BROADCOM_DEV_ID_57414 0x16d7
58 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
59 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
60 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
61 #define BROADCOM_DEV_ID_57412_MF 0x16de
62 #define BROADCOM_DEV_ID_57314 0x16df
63 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
64 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
65 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
66 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
67 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
68 #define BROADCOM_DEV_ID_57404_MF 0x16e7
69 #define BROADCOM_DEV_ID_57406_MF 0x16e8
70 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
71 #define BROADCOM_DEV_ID_57407_MF 0x16ea
72 #define BROADCOM_DEV_ID_57414_MF 0x16ec
73 #define BROADCOM_DEV_ID_57416_MF 0x16ee
74 #define BROADCOM_DEV_ID_58802 0xd802
75 #define BROADCOM_DEV_ID_58804 0xd804
76 #define BROADCOM_DEV_ID_58808 0x16f0
77 #define BROADCOM_DEV_ID_58802_VF 0xd800
78 
79 static const struct rte_pci_id bnxt_pci_id_map[] = {
80 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
81 			 BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
82 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
83 			 BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
84 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
85 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
86 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
87 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
88 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
89 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
90 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
91 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
92 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
93 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
94 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
95 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
96 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
97 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
98 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
99 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
100 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
101 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
102 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
103 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
104 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
105 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
106 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
107 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
108 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
109 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
110 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
111 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
112 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
113 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
114 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
115 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
116 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
122 	{ .vendor_id = 0, /* sentinel */ },
123 };
124 
125 #define BNXT_ETH_RSS_SUPPORT (	\
126 	ETH_RSS_IPV4 |		\
127 	ETH_RSS_NONFRAG_IPV4_TCP |	\
128 	ETH_RSS_NONFRAG_IPV4_UDP |	\
129 	ETH_RSS_IPV6 |		\
130 	ETH_RSS_NONFRAG_IPV6_TCP |	\
131 	ETH_RSS_NONFRAG_IPV6_UDP)
132 
133 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
134 				     DEV_TX_OFFLOAD_IPV4_CKSUM | \
135 				     DEV_TX_OFFLOAD_TCP_CKSUM | \
136 				     DEV_TX_OFFLOAD_UDP_CKSUM | \
137 				     DEV_TX_OFFLOAD_TCP_TSO | \
138 				     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
139 				     DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
140 				     DEV_TX_OFFLOAD_GRE_TNL_TSO | \
141 				     DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
142 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
143 				     DEV_TX_OFFLOAD_MULTI_SEGS)
144 
145 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
146 				     DEV_RX_OFFLOAD_VLAN_STRIP | \
147 				     DEV_RX_OFFLOAD_IPV4_CKSUM | \
148 				     DEV_RX_OFFLOAD_UDP_CKSUM | \
149 				     DEV_RX_OFFLOAD_TCP_CKSUM | \
150 				     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
151 				     DEV_RX_OFFLOAD_JUMBO_FRAME | \
152 				     DEV_RX_OFFLOAD_KEEP_CRC | \
153 				     DEV_RX_OFFLOAD_TCP_LRO)
154 
155 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
156 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
157 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
158 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
159 
160 /***********************/
161 
162 /*
163  * High level utility functions
164  */
165 
166 static void bnxt_free_mem(struct bnxt *bp)
167 {
168 	bnxt_free_filter_mem(bp);
169 	bnxt_free_vnic_attributes(bp);
170 	bnxt_free_vnic_mem(bp);
171 
172 	bnxt_free_stats(bp);
173 	bnxt_free_tx_rings(bp);
174 	bnxt_free_rx_rings(bp);
175 }
176 
177 static int bnxt_alloc_mem(struct bnxt *bp)
178 {
179 	int rc;
180 
181 	rc = bnxt_alloc_vnic_mem(bp);
182 	if (rc)
183 		goto alloc_mem_err;
184 
185 	rc = bnxt_alloc_vnic_attributes(bp);
186 	if (rc)
187 		goto alloc_mem_err;
188 
189 	rc = bnxt_alloc_filter_mem(bp);
190 	if (rc)
191 		goto alloc_mem_err;
192 
193 	return 0;
194 
195 alloc_mem_err:
196 	bnxt_free_mem(bp);
197 	return rc;
198 }
199 
200 static int bnxt_init_chip(struct bnxt *bp)
201 {
202 	struct bnxt_rx_queue *rxq;
203 	struct rte_eth_link new;
204 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
205 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
206 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
207 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
208 	uint32_t intr_vector = 0;
209 	uint32_t queue_id, base = BNXT_MISC_VEC_ID;
210 	uint32_t vec = BNXT_MISC_VEC_ID;
211 	unsigned int i, j;
212 	int rc;
213 
214 	if (bp->eth_dev->data->mtu > ETHER_MTU) {
215 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
216 			DEV_RX_OFFLOAD_JUMBO_FRAME;
217 		bp->flags |= BNXT_FLAG_JUMBO;
218 	} else {
219 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
220 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
221 		bp->flags &= ~BNXT_FLAG_JUMBO;
222 	}
223 
224 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
225 	if (rc) {
226 		PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
227 		goto err_out;
228 	}
229 
230 	rc = bnxt_alloc_hwrm_rings(bp);
231 	if (rc) {
232 		PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
233 		goto err_out;
234 	}
235 
236 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
237 	if (rc) {
238 		PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
239 		goto err_out;
240 	}
241 
242 	rc = bnxt_mq_rx_configure(bp);
243 	if (rc) {
244 		PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
245 		goto err_out;
246 	}
247 
248 	/* VNIC configuration */
249 	for (i = 0; i < bp->nr_vnics; i++) {
250 		struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
251 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
252 		uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
253 
254 		vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
255 		if (!vnic->fw_grp_ids) {
256 			PMD_DRV_LOG(ERR,
257 				    "Failed to alloc %d bytes for group ids\n",
258 				    size);
259 			rc = -ENOMEM;
260 			goto err_out;
261 		}
262 		memset(vnic->fw_grp_ids, -1, size);
263 
264 		PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
265 			    i, vnic, vnic->fw_grp_ids);
266 
267 		rc = bnxt_hwrm_vnic_alloc(bp, vnic);
268 		if (rc) {
269 			PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
270 				i, rc);
271 			goto err_out;
272 		}
273 
274 		/* Alloc RSS context only if RSS mode is enabled */
275 		if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
276 			rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
277 			if (rc) {
278 				PMD_DRV_LOG(ERR,
279 					"HWRM vnic %d ctx alloc failure rc: %x\n",
280 					i, rc);
281 				goto err_out;
282 			}
283 		}
284 
285 		/*
286 		 * Firmware sets pf pair in default vnic cfg. If the VLAN strip
287 		 * setting is not available at this time, it will not be
288 		 * configured correctly in the CFA.
289 		 */
290 		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
291 			vnic->vlan_strip = true;
292 		else
293 			vnic->vlan_strip = false;
294 
295 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
296 		if (rc) {
297 			PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
298 				i, rc);
299 			goto err_out;
300 		}
301 
302 		rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
303 		if (rc) {
304 			PMD_DRV_LOG(ERR,
305 				"HWRM vnic %d filter failure rc: %x\n",
306 				i, rc);
307 			goto err_out;
308 		}
309 
310 		for (j = 0; j < bp->rx_nr_rings; j++) {
311 			rxq = bp->eth_dev->data->rx_queues[j];
312 
313 			PMD_DRV_LOG(DEBUG,
314 				    "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
315 				    j, rxq->vnic, rxq->vnic->fw_grp_ids);
316 
317 			if (rxq->rx_deferred_start)
318 				rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
319 		}
320 
321 		rc = bnxt_vnic_rss_configure(bp, vnic);
322 		if (rc) {
323 			PMD_DRV_LOG(ERR,
324 				    "HWRM vnic set RSS failure rc: %x\n", rc);
325 			goto err_out;
326 		}
327 
328 		bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
329 
330 		if (bp->eth_dev->data->dev_conf.rxmode.offloads &
331 		    DEV_RX_OFFLOAD_TCP_LRO)
332 			bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
333 		else
334 			bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
335 	}
336 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
337 	if (rc) {
338 		PMD_DRV_LOG(ERR,
339 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
340 		goto err_out;
341 	}
342 
343 	/* check and configure queue intr-vector mapping */
344 	if ((rte_intr_cap_multiple(intr_handle) ||
345 	     !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
346 	    bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
347 		intr_vector = bp->eth_dev->data->nb_rx_queues;
348 		PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
349 		if (intr_vector > bp->rx_cp_nr_rings) {
350 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
351 					bp->rx_cp_nr_rings);
352 			return -ENOTSUP;
353 		}
354 		rc = rte_intr_efd_enable(intr_handle, intr_vector);
355 		if (rc)
356 			return rc;
357 	}
358 
359 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
360 		intr_handle->intr_vec =
361 			rte_zmalloc("intr_vec",
362 				    bp->eth_dev->data->nb_rx_queues *
363 				    sizeof(int), 0);
364 		if (intr_handle->intr_vec == NULL) {
365 			PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
366 				" intr_vec", bp->eth_dev->data->nb_rx_queues);
367 			rc = -ENOMEM;
368 			goto err_disable;
369 		}
370 		PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
371 			"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
372 			 intr_handle->intr_vec, intr_handle->nb_efd,
373 			intr_handle->max_intr);
374 		for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
375 		     queue_id++) {
376 			intr_handle->intr_vec[queue_id] =
377 							vec + BNXT_RX_VEC_START;
378 			if (vec < base + intr_handle->nb_efd - 1)
379 				vec++;
380 		}
381 	}
382 
383 	/* enable uio/vfio intr/eventfd mapping */
384 	rc = rte_intr_enable(intr_handle);
385 	if (rc)
386 		goto err_free;
387 
388 	rc = bnxt_get_hwrm_link_config(bp, &new);
389 	if (rc) {
390 		PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
391 		goto err_free;
392 	}
393 
394 	if (!bp->link_info.link_up) {
395 		rc = bnxt_set_hwrm_link_config(bp, true);
396 		if (rc) {
397 			PMD_DRV_LOG(ERR,
398 				"HWRM link config failure rc: %x\n", rc);
399 			goto err_free;
400 		}
401 	}
402 	bnxt_print_link_info(bp->eth_dev);
403 
404 	return 0;
405 
406 err_free:
407 	rte_free(intr_handle->intr_vec);
408 err_disable:
409 	rte_intr_efd_disable(intr_handle);
410 err_out:
411 	/* Some of the error status returned by FW may not be from errno.h */
412 	if (rc > 0)
413 		rc = -EIO;
414 
415 	return rc;
416 }
417 
418 static int bnxt_shutdown_nic(struct bnxt *bp)
419 {
420 	bnxt_free_all_hwrm_resources(bp);
421 	bnxt_free_all_filters(bp);
422 	bnxt_free_all_vnics(bp);
423 	return 0;
424 }
425 
426 static int bnxt_init_nic(struct bnxt *bp)
427 {
428 	int rc;
429 
430 	rc = bnxt_init_ring_grps(bp);
431 	if (rc)
432 		return rc;
433 
434 	bnxt_init_vnics(bp);
435 	bnxt_init_filters(bp);
436 
437 	return 0;
438 }
439 
440 /*
441  * Device configuration and status function
442  */
443 
444 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
445 				  struct rte_eth_dev_info *dev_info)
446 {
447 	struct bnxt *bp = eth_dev->data->dev_private;
448 	uint16_t max_vnics, i, j, vpool, vrxq;
449 	unsigned int max_rx_rings;
450 
451 	/* MAC Specifics */
452 	dev_info->max_mac_addrs = bp->max_l2_ctx;
453 	dev_info->max_hash_mac_addrs = 0;
454 
455 	/* PF/VF specifics */
456 	if (BNXT_PF(bp))
457 		dev_info->max_vfs = bp->pdev->max_vfs;
458 	max_rx_rings = RTE_MIN(bp->max_rx_rings, bp->max_stat_ctx);
459 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
460 	dev_info->max_rx_queues = max_rx_rings;
461 	dev_info->max_tx_queues = max_rx_rings;
462 	dev_info->reta_size = HW_HASH_INDEX_SIZE;
463 	dev_info->hash_key_size = 40;
464 	max_vnics = bp->max_vnics;
465 
466 	/* Fast path specifics */
467 	dev_info->min_rx_bufsize = 1;
468 	dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
469 				  + VLAN_TAG_SIZE * 2;
470 
471 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
472 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
473 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
474 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
475 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
476 
477 	/* *INDENT-OFF* */
478 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
479 		.rx_thresh = {
480 			.pthresh = 8,
481 			.hthresh = 8,
482 			.wthresh = 0,
483 		},
484 		.rx_free_thresh = 32,
485 		/* If no descriptors available, pkts are dropped by default */
486 		.rx_drop_en = 1,
487 	};
488 
489 	dev_info->default_txconf = (struct rte_eth_txconf) {
490 		.tx_thresh = {
491 			.pthresh = 32,
492 			.hthresh = 0,
493 			.wthresh = 0,
494 		},
495 		.tx_free_thresh = 32,
496 		.tx_rs_thresh = 32,
497 	};
498 	eth_dev->data->dev_conf.intr_conf.lsc = 1;
499 
500 	eth_dev->data->dev_conf.intr_conf.rxq = 1;
501 	dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
502 	dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
503 	dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
504 	dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
505 
506 	/* *INDENT-ON* */
507 
508 	/*
509 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
510 	 *       need further investigation.
511 	 */
512 
513 	/* VMDq resources */
514 	vpool = 64; /* ETH_64_POOLS */
515 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
516 	for (i = 0; i < 4; vpool >>= 1, i++) {
517 		if (max_vnics > vpool) {
518 			for (j = 0; j < 5; vrxq >>= 1, j++) {
519 				if (dev_info->max_rx_queues > vrxq) {
520 					if (vpool > vrxq)
521 						vpool = vrxq;
522 					goto found;
523 				}
524 			}
525 			/* Not enough resources to support VMDq */
526 			break;
527 		}
528 	}
529 	/* Not enough resources to support VMDq */
530 	vpool = 0;
531 	vrxq = 0;
532 found:
533 	dev_info->max_vmdq_pools = vpool;
534 	dev_info->vmdq_queue_num = vrxq;
535 
536 	dev_info->vmdq_pool_base = 0;
537 	dev_info->vmdq_queue_base = 0;
538 }
539 
540 /* Configure the device based on the configuration provided */
541 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
542 {
543 	struct bnxt *bp = eth_dev->data->dev_private;
544 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
545 	int rc;
546 
547 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
548 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
549 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
550 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
551 
552 	if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
553 		rc = bnxt_hwrm_check_vf_rings(bp);
554 		if (rc) {
555 			PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
556 			return -ENOSPC;
557 		}
558 
559 		rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
560 		if (rc) {
561 			PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
562 			return -ENOSPC;
563 		}
564 	} else {
565 		/* legacy driver needs to get updated values */
566 		rc = bnxt_hwrm_func_qcaps(bp);
567 		if (rc) {
568 			PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
569 			return rc;
570 		}
571 	}
572 
573 	/* Inherit new configurations */
574 	if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
575 	    eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
576 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
577 	    bp->max_cp_rings ||
578 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
579 	    bp->max_stat_ctx ||
580 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps ||
581 	    (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
582 	     bp->max_vnics < eth_dev->data->nb_rx_queues)) {
583 		PMD_DRV_LOG(ERR,
584 			"Insufficient resources to support requested config\n");
585 		PMD_DRV_LOG(ERR,
586 			"Num Queues Requested: Tx %d, Rx %d\n",
587 			eth_dev->data->nb_tx_queues,
588 			eth_dev->data->nb_rx_queues);
589 		PMD_DRV_LOG(ERR,
590 			"MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
591 			bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
592 			bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
593 		return -ENOSPC;
594 	}
595 
596 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
597 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
598 
599 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
600 		eth_dev->data->mtu =
601 				eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
602 				ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE *
603 				BNXT_NUM_VLANS;
604 		bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
605 	}
606 	return 0;
607 }
608 
609 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
610 {
611 	struct rte_eth_link *link = &eth_dev->data->dev_link;
612 
613 	if (link->link_status)
614 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
615 			eth_dev->data->port_id,
616 			(uint32_t)link->link_speed,
617 			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
618 			("full-duplex") : ("half-duplex\n"));
619 	else
620 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
621 			eth_dev->data->port_id);
622 }
623 
624 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
625 {
626 	struct bnxt *bp = eth_dev->data->dev_private;
627 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
628 	int vlan_mask = 0;
629 	int rc;
630 
631 	if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
632 		PMD_DRV_LOG(ERR,
633 			"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
634 			bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
635 	}
636 
637 	rc = bnxt_init_chip(bp);
638 	if (rc)
639 		goto error;
640 
641 	bnxt_link_update_op(eth_dev, 1);
642 
643 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
644 		vlan_mask |= ETH_VLAN_FILTER_MASK;
645 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
646 		vlan_mask |= ETH_VLAN_STRIP_MASK;
647 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
648 	if (rc)
649 		goto error;
650 
651 	bnxt_enable_int(bp);
652 	bp->flags |= BNXT_FLAG_INIT_DONE;
653 	bp->dev_stopped = 0;
654 	return 0;
655 
656 error:
657 	bnxt_shutdown_nic(bp);
658 	bnxt_free_tx_mbufs(bp);
659 	bnxt_free_rx_mbufs(bp);
660 	return rc;
661 }
662 
663 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
664 {
665 	struct bnxt *bp = eth_dev->data->dev_private;
666 	int rc = 0;
667 
668 	if (!bp->link_info.link_up)
669 		rc = bnxt_set_hwrm_link_config(bp, true);
670 	if (!rc)
671 		eth_dev->data->dev_link.link_status = 1;
672 
673 	bnxt_print_link_info(eth_dev);
674 	return 0;
675 }
676 
677 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
678 {
679 	struct bnxt *bp = eth_dev->data->dev_private;
680 
681 	eth_dev->data->dev_link.link_status = 0;
682 	bnxt_set_hwrm_link_config(bp, false);
683 	bp->link_info.link_up = 0;
684 
685 	return 0;
686 }
687 
688 /* Unload the driver, release resources */
689 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
690 {
691 	struct bnxt *bp = eth_dev->data->dev_private;
692 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
693 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
694 
695 	bnxt_disable_int(bp);
696 
697 	/* disable uio/vfio intr/eventfd mapping */
698 	rte_intr_disable(intr_handle);
699 
700 	bp->flags &= ~BNXT_FLAG_INIT_DONE;
701 	if (bp->eth_dev->data->dev_started) {
702 		/* TBD: STOP HW queues DMA */
703 		eth_dev->data->dev_link.link_status = 0;
704 	}
705 	bnxt_set_hwrm_link_config(bp, false);
706 
707 	/* Clean queue intr-vector mapping */
708 	rte_intr_efd_disable(intr_handle);
709 	if (intr_handle->intr_vec != NULL) {
710 		rte_free(intr_handle->intr_vec);
711 		intr_handle->intr_vec = NULL;
712 	}
713 
714 	bnxt_hwrm_port_clr_stats(bp);
715 	bnxt_free_tx_mbufs(bp);
716 	bnxt_free_rx_mbufs(bp);
717 	bnxt_shutdown_nic(bp);
718 	bp->dev_stopped = 1;
719 }
720 
721 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
722 {
723 	struct bnxt *bp = eth_dev->data->dev_private;
724 
725 	if (bp->dev_stopped == 0)
726 		bnxt_dev_stop_op(eth_dev);
727 
728 	if (eth_dev->data->mac_addrs != NULL) {
729 		rte_free(eth_dev->data->mac_addrs);
730 		eth_dev->data->mac_addrs = NULL;
731 	}
732 	if (bp->grp_info != NULL) {
733 		rte_free(bp->grp_info);
734 		bp->grp_info = NULL;
735 	}
736 
737 	bnxt_dev_uninit(eth_dev);
738 }
739 
740 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
741 				    uint32_t index)
742 {
743 	struct bnxt *bp = eth_dev->data->dev_private;
744 	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
745 	struct bnxt_vnic_info *vnic;
746 	struct bnxt_filter_info *filter, *temp_filter;
747 	uint32_t i;
748 
749 	/*
750 	 * Loop through all VNICs from the specified filter flow pools to
751 	 * remove the corresponding MAC addr filter
752 	 */
753 	for (i = 0; i < bp->nr_vnics; i++) {
754 		if (!(pool_mask & (1ULL << i)))
755 			continue;
756 
757 		vnic = &bp->vnic_info[i];
758 		filter = STAILQ_FIRST(&vnic->filter);
759 		while (filter) {
760 			temp_filter = STAILQ_NEXT(filter, next);
761 			if (filter->mac_index == index) {
762 				STAILQ_REMOVE(&vnic->filter, filter,
763 						bnxt_filter_info, next);
764 				bnxt_hwrm_clear_l2_filter(bp, filter);
765 				filter->mac_index = INVALID_MAC_INDEX;
766 				memset(&filter->l2_addr, 0, ETHER_ADDR_LEN);
767 				STAILQ_INSERT_TAIL(&bp->free_filter_list,
768 						   filter, next);
769 			}
770 			filter = temp_filter;
771 		}
772 	}
773 }
774 
775 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
776 				struct ether_addr *mac_addr,
777 				uint32_t index, uint32_t pool)
778 {
779 	struct bnxt *bp = eth_dev->data->dev_private;
780 	struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
781 	struct bnxt_filter_info *filter;
782 	int rc = 0;
783 
784 	if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
785 		PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
786 		return -ENOTSUP;
787 	}
788 
789 	if (!vnic) {
790 		PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
791 		return -EINVAL;
792 	}
793 	/* Attach requested MAC address to the new l2_filter */
794 	STAILQ_FOREACH(filter, &vnic->filter, next) {
795 		if (filter->mac_index == index) {
796 			PMD_DRV_LOG(ERR,
797 				"MAC addr already existed for pool %d\n", pool);
798 			return 0;
799 		}
800 	}
801 	filter = bnxt_alloc_filter(bp);
802 	if (!filter) {
803 		PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
804 		return -ENODEV;
805 	}
806 
807 	filter->mac_index = index;
808 	memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
809 
810 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
811 	if (!rc) {
812 		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
813 	} else {
814 		filter->mac_index = INVALID_MAC_INDEX;
815 		memset(&filter->l2_addr, 0, ETHER_ADDR_LEN);
816 		bnxt_free_filter(bp, filter);
817 	}
818 
819 	return rc;
820 }
821 
822 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
823 {
824 	int rc = 0;
825 	struct bnxt *bp = eth_dev->data->dev_private;
826 	struct rte_eth_link new;
827 	unsigned int cnt = BNXT_LINK_WAIT_CNT;
828 
829 	memset(&new, 0, sizeof(new));
830 	do {
831 		/* Retrieve link info from hardware */
832 		rc = bnxt_get_hwrm_link_config(bp, &new);
833 		if (rc) {
834 			new.link_speed = ETH_LINK_SPEED_100M;
835 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
836 			PMD_DRV_LOG(ERR,
837 				"Failed to retrieve link rc = 0x%x!\n", rc);
838 			goto out;
839 		}
840 
841 		if (!wait_to_complete || new.link_status)
842 			break;
843 
844 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
845 	} while (cnt--);
846 
847 out:
848 	/* Timed out or success */
849 	if (new.link_status != eth_dev->data->dev_link.link_status ||
850 	new.link_speed != eth_dev->data->dev_link.link_speed) {
851 		memcpy(&eth_dev->data->dev_link, &new,
852 			sizeof(struct rte_eth_link));
853 
854 		_rte_eth_dev_callback_process(eth_dev,
855 					      RTE_ETH_EVENT_INTR_LSC,
856 					      NULL);
857 
858 		bnxt_print_link_info(eth_dev);
859 	}
860 
861 	return rc;
862 }
863 
864 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
865 {
866 	struct bnxt *bp = eth_dev->data->dev_private;
867 	struct bnxt_vnic_info *vnic;
868 
869 	if (bp->vnic_info == NULL)
870 		return;
871 
872 	vnic = &bp->vnic_info[0];
873 
874 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
875 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
876 }
877 
878 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
879 {
880 	struct bnxt *bp = eth_dev->data->dev_private;
881 	struct bnxt_vnic_info *vnic;
882 
883 	if (bp->vnic_info == NULL)
884 		return;
885 
886 	vnic = &bp->vnic_info[0];
887 
888 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
889 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
890 }
891 
892 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
893 {
894 	struct bnxt *bp = eth_dev->data->dev_private;
895 	struct bnxt_vnic_info *vnic;
896 
897 	if (bp->vnic_info == NULL)
898 		return;
899 
900 	vnic = &bp->vnic_info[0];
901 
902 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
903 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
904 }
905 
906 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
907 {
908 	struct bnxt *bp = eth_dev->data->dev_private;
909 	struct bnxt_vnic_info *vnic;
910 
911 	if (bp->vnic_info == NULL)
912 		return;
913 
914 	vnic = &bp->vnic_info[0];
915 
916 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
917 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
918 }
919 
920 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
921 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
922 {
923 	if (qid >= bp->rx_nr_rings)
924 		return NULL;
925 
926 	return bp->eth_dev->data->rx_queues[qid];
927 }
928 
929 /* Return rxq corresponding to a given rss table ring/group ID. */
930 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
931 {
932 	unsigned int i;
933 
934 	for (i = 0; i < bp->rx_nr_rings; i++) {
935 		if (bp->grp_info[i].fw_grp_id == fwr)
936 			return i;
937 	}
938 
939 	return INVALID_HW_RING_ID;
940 }
941 
942 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
943 			    struct rte_eth_rss_reta_entry64 *reta_conf,
944 			    uint16_t reta_size)
945 {
946 	struct bnxt *bp = eth_dev->data->dev_private;
947 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
948 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
949 	uint16_t tbl_size = HW_HASH_INDEX_SIZE;
950 	uint16_t idx, sft;
951 	int i;
952 
953 	if (!vnic->rss_table)
954 		return -EINVAL;
955 
956 	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
957 		return -EINVAL;
958 
959 	if (reta_size != tbl_size) {
960 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
961 			"(%d) must equal the size supported by the hardware "
962 			"(%d)\n", reta_size, tbl_size);
963 		return -EINVAL;
964 	}
965 
966 	for (i = 0; i < reta_size; i++) {
967 		struct bnxt_rx_queue *rxq;
968 
969 		idx = i / RTE_RETA_GROUP_SIZE;
970 		sft = i % RTE_RETA_GROUP_SIZE;
971 
972 		if (!(reta_conf[idx].mask & (1ULL << sft)))
973 			continue;
974 
975 		rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
976 		if (!rxq) {
977 			PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
978 			return -EINVAL;
979 		}
980 
981 		vnic->rss_table[i] =
982 		    vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
983 	}
984 
985 	bnxt_hwrm_vnic_rss_cfg(bp, vnic);
986 	return 0;
987 }
988 
989 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
990 			      struct rte_eth_rss_reta_entry64 *reta_conf,
991 			      uint16_t reta_size)
992 {
993 	struct bnxt *bp = eth_dev->data->dev_private;
994 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
995 	uint16_t tbl_size = HW_HASH_INDEX_SIZE;
996 	uint16_t idx, sft, i;
997 
998 	/* Retrieve from the default VNIC */
999 	if (!vnic)
1000 		return -EINVAL;
1001 	if (!vnic->rss_table)
1002 		return -EINVAL;
1003 
1004 	if (reta_size != tbl_size) {
1005 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1006 			"(%d) must equal the size supported by the hardware "
1007 			"(%d)\n", reta_size, tbl_size);
1008 		return -EINVAL;
1009 	}
1010 
1011 	for (idx = 0, i = 0; i < reta_size; i++) {
1012 		idx = i / RTE_RETA_GROUP_SIZE;
1013 		sft = i % RTE_RETA_GROUP_SIZE;
1014 
1015 		if (reta_conf[idx].mask & (1ULL << sft)) {
1016 			uint16_t qid;
1017 
1018 			qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1019 
1020 			if (qid == INVALID_HW_RING_ID) {
1021 				PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1022 				return -EINVAL;
1023 			}
1024 			reta_conf[idx].reta[sft] = qid;
1025 		}
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1032 				   struct rte_eth_rss_conf *rss_conf)
1033 {
1034 	struct bnxt *bp = eth_dev->data->dev_private;
1035 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1036 	struct bnxt_vnic_info *vnic;
1037 	uint16_t hash_type = 0;
1038 	unsigned int i;
1039 
1040 	/*
1041 	 * If RSS enablement were different than dev_configure,
1042 	 * then return -EINVAL
1043 	 */
1044 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1045 		if (!rss_conf->rss_hf)
1046 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
1047 	} else {
1048 		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1049 			return -EINVAL;
1050 	}
1051 
1052 	bp->flags |= BNXT_FLAG_UPDATE_HASH;
1053 	memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
1054 
1055 	if (rss_conf->rss_hf & ETH_RSS_IPV4)
1056 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1057 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1058 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1059 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1060 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1061 	if (rss_conf->rss_hf & ETH_RSS_IPV6)
1062 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1063 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1064 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1065 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1066 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1067 
1068 	/* Update the RSS VNIC(s) */
1069 	for (i = 0; i < bp->nr_vnics; i++) {
1070 		vnic = &bp->vnic_info[i];
1071 		vnic->hash_type = hash_type;
1072 
1073 		/*
1074 		 * Use the supplied key if the key length is
1075 		 * acceptable and the rss_key is not NULL
1076 		 */
1077 		if (rss_conf->rss_key &&
1078 		    rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
1079 			memcpy(vnic->rss_hash_key, rss_conf->rss_key,
1080 			       rss_conf->rss_key_len);
1081 
1082 		bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1083 	}
1084 	return 0;
1085 }
1086 
1087 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1088 				     struct rte_eth_rss_conf *rss_conf)
1089 {
1090 	struct bnxt *bp = eth_dev->data->dev_private;
1091 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1092 	int len;
1093 	uint32_t hash_types;
1094 
1095 	/* RSS configuration is the same for all VNICs */
1096 	if (vnic && vnic->rss_hash_key) {
1097 		if (rss_conf->rss_key) {
1098 			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1099 			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1100 			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1101 		}
1102 
1103 		hash_types = vnic->hash_type;
1104 		rss_conf->rss_hf = 0;
1105 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1106 			rss_conf->rss_hf |= ETH_RSS_IPV4;
1107 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1108 		}
1109 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1110 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1111 			hash_types &=
1112 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1113 		}
1114 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1115 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1116 			hash_types &=
1117 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1118 		}
1119 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1120 			rss_conf->rss_hf |= ETH_RSS_IPV6;
1121 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1122 		}
1123 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1124 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1125 			hash_types &=
1126 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1127 		}
1128 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1129 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1130 			hash_types &=
1131 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1132 		}
1133 		if (hash_types) {
1134 			PMD_DRV_LOG(ERR,
1135 				"Unknwon RSS config from firmware (%08x), RSS disabled",
1136 				vnic->hash_type);
1137 			return -ENOTSUP;
1138 		}
1139 	} else {
1140 		rss_conf->rss_hf = 0;
1141 	}
1142 	return 0;
1143 }
1144 
1145 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1146 			       struct rte_eth_fc_conf *fc_conf)
1147 {
1148 	struct bnxt *bp = dev->data->dev_private;
1149 	struct rte_eth_link link_info;
1150 	int rc;
1151 
1152 	rc = bnxt_get_hwrm_link_config(bp, &link_info);
1153 	if (rc)
1154 		return rc;
1155 
1156 	memset(fc_conf, 0, sizeof(*fc_conf));
1157 	if (bp->link_info.auto_pause)
1158 		fc_conf->autoneg = 1;
1159 	switch (bp->link_info.pause) {
1160 	case 0:
1161 		fc_conf->mode = RTE_FC_NONE;
1162 		break;
1163 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1164 		fc_conf->mode = RTE_FC_TX_PAUSE;
1165 		break;
1166 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1167 		fc_conf->mode = RTE_FC_RX_PAUSE;
1168 		break;
1169 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1170 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1171 		fc_conf->mode = RTE_FC_FULL;
1172 		break;
1173 	}
1174 	return 0;
1175 }
1176 
1177 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1178 			       struct rte_eth_fc_conf *fc_conf)
1179 {
1180 	struct bnxt *bp = dev->data->dev_private;
1181 
1182 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1183 		PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1184 		return -ENOTSUP;
1185 	}
1186 
1187 	switch (fc_conf->mode) {
1188 	case RTE_FC_NONE:
1189 		bp->link_info.auto_pause = 0;
1190 		bp->link_info.force_pause = 0;
1191 		break;
1192 	case RTE_FC_RX_PAUSE:
1193 		if (fc_conf->autoneg) {
1194 			bp->link_info.auto_pause =
1195 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1196 			bp->link_info.force_pause = 0;
1197 		} else {
1198 			bp->link_info.auto_pause = 0;
1199 			bp->link_info.force_pause =
1200 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1201 		}
1202 		break;
1203 	case RTE_FC_TX_PAUSE:
1204 		if (fc_conf->autoneg) {
1205 			bp->link_info.auto_pause =
1206 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1207 			bp->link_info.force_pause = 0;
1208 		} else {
1209 			bp->link_info.auto_pause = 0;
1210 			bp->link_info.force_pause =
1211 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1212 		}
1213 		break;
1214 	case RTE_FC_FULL:
1215 		if (fc_conf->autoneg) {
1216 			bp->link_info.auto_pause =
1217 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1218 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1219 			bp->link_info.force_pause = 0;
1220 		} else {
1221 			bp->link_info.auto_pause = 0;
1222 			bp->link_info.force_pause =
1223 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1224 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1225 		}
1226 		break;
1227 	}
1228 	return bnxt_set_hwrm_link_config(bp, true);
1229 }
1230 
1231 /* Add UDP tunneling port */
1232 static int
1233 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1234 			 struct rte_eth_udp_tunnel *udp_tunnel)
1235 {
1236 	struct bnxt *bp = eth_dev->data->dev_private;
1237 	uint16_t tunnel_type = 0;
1238 	int rc = 0;
1239 
1240 	switch (udp_tunnel->prot_type) {
1241 	case RTE_TUNNEL_TYPE_VXLAN:
1242 		if (bp->vxlan_port_cnt) {
1243 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1244 				udp_tunnel->udp_port);
1245 			if (bp->vxlan_port != udp_tunnel->udp_port) {
1246 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1247 				return -ENOSPC;
1248 			}
1249 			bp->vxlan_port_cnt++;
1250 			return 0;
1251 		}
1252 		tunnel_type =
1253 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1254 		bp->vxlan_port_cnt++;
1255 		break;
1256 	case RTE_TUNNEL_TYPE_GENEVE:
1257 		if (bp->geneve_port_cnt) {
1258 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1259 				udp_tunnel->udp_port);
1260 			if (bp->geneve_port != udp_tunnel->udp_port) {
1261 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
1262 				return -ENOSPC;
1263 			}
1264 			bp->geneve_port_cnt++;
1265 			return 0;
1266 		}
1267 		tunnel_type =
1268 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1269 		bp->geneve_port_cnt++;
1270 		break;
1271 	default:
1272 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1273 		return -ENOTSUP;
1274 	}
1275 	rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1276 					     tunnel_type);
1277 	return rc;
1278 }
1279 
1280 static int
1281 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1282 			 struct rte_eth_udp_tunnel *udp_tunnel)
1283 {
1284 	struct bnxt *bp = eth_dev->data->dev_private;
1285 	uint16_t tunnel_type = 0;
1286 	uint16_t port = 0;
1287 	int rc = 0;
1288 
1289 	switch (udp_tunnel->prot_type) {
1290 	case RTE_TUNNEL_TYPE_VXLAN:
1291 		if (!bp->vxlan_port_cnt) {
1292 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1293 			return -EINVAL;
1294 		}
1295 		if (bp->vxlan_port != udp_tunnel->udp_port) {
1296 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1297 				udp_tunnel->udp_port, bp->vxlan_port);
1298 			return -EINVAL;
1299 		}
1300 		if (--bp->vxlan_port_cnt)
1301 			return 0;
1302 
1303 		tunnel_type =
1304 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1305 		port = bp->vxlan_fw_dst_port_id;
1306 		break;
1307 	case RTE_TUNNEL_TYPE_GENEVE:
1308 		if (!bp->geneve_port_cnt) {
1309 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1310 			return -EINVAL;
1311 		}
1312 		if (bp->geneve_port != udp_tunnel->udp_port) {
1313 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1314 				udp_tunnel->udp_port, bp->geneve_port);
1315 			return -EINVAL;
1316 		}
1317 		if (--bp->geneve_port_cnt)
1318 			return 0;
1319 
1320 		tunnel_type =
1321 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1322 		port = bp->geneve_fw_dst_port_id;
1323 		break;
1324 	default:
1325 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1326 		return -ENOTSUP;
1327 	}
1328 
1329 	rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1330 	if (!rc) {
1331 		if (tunnel_type ==
1332 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1333 			bp->vxlan_port = 0;
1334 		if (tunnel_type ==
1335 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1336 			bp->geneve_port = 0;
1337 	}
1338 	return rc;
1339 }
1340 
1341 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1342 {
1343 	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1344 	struct bnxt_vnic_info *vnic;
1345 	unsigned int i;
1346 	int rc = 0;
1347 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1348 
1349 	/* Cycle through all VNICs */
1350 	for (i = 0; i < bp->nr_vnics; i++) {
1351 		/*
1352 		 * For each VNIC and each associated filter(s)
1353 		 * if VLAN exists && VLAN matches vlan_id
1354 		 *      remove the MAC+VLAN filter
1355 		 *      add a new MAC only filter
1356 		 * else
1357 		 *      VLAN filter doesn't exist, just skip and continue
1358 		 */
1359 		vnic = &bp->vnic_info[i];
1360 		filter = STAILQ_FIRST(&vnic->filter);
1361 		while (filter) {
1362 			temp_filter = STAILQ_NEXT(filter, next);
1363 
1364 			if (filter->enables & chk &&
1365 			    filter->l2_ovlan == vlan_id) {
1366 				/* Must delete the filter */
1367 				STAILQ_REMOVE(&vnic->filter, filter,
1368 					      bnxt_filter_info, next);
1369 				bnxt_hwrm_clear_l2_filter(bp, filter);
1370 				STAILQ_INSERT_TAIL(&bp->free_filter_list,
1371 						   filter, next);
1372 
1373 				/*
1374 				 * Need to examine to see if the MAC
1375 				 * filter already existed or not before
1376 				 * allocating a new one
1377 				 */
1378 
1379 				new_filter = bnxt_alloc_filter(bp);
1380 				if (!new_filter) {
1381 					PMD_DRV_LOG(ERR,
1382 							"MAC/VLAN filter alloc failed\n");
1383 					rc = -ENOMEM;
1384 					goto exit;
1385 				}
1386 				STAILQ_INSERT_TAIL(&vnic->filter,
1387 						new_filter, next);
1388 				/* Inherit MAC from previous filter */
1389 				new_filter->mac_index =
1390 					filter->mac_index;
1391 				memcpy(new_filter->l2_addr, filter->l2_addr,
1392 				       ETHER_ADDR_LEN);
1393 				/* MAC only filter */
1394 				rc = bnxt_hwrm_set_l2_filter(bp,
1395 							     vnic->fw_vnic_id,
1396 							     new_filter);
1397 				if (rc)
1398 					goto exit;
1399 				PMD_DRV_LOG(INFO,
1400 					    "Del Vlan filter for %d\n",
1401 					    vlan_id);
1402 			}
1403 			filter = temp_filter;
1404 		}
1405 	}
1406 exit:
1407 	return rc;
1408 }
1409 
1410 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1411 {
1412 	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1413 	struct bnxt_vnic_info *vnic;
1414 	unsigned int i;
1415 	int rc = 0;
1416 	uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
1417 		HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
1418 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
1419 
1420 	/* Cycle through all VNICs */
1421 	for (i = 0; i < bp->nr_vnics; i++) {
1422 		/*
1423 		 * For each VNIC and each associated filter(s)
1424 		 * if VLAN exists:
1425 		 *   if VLAN matches vlan_id
1426 		 *      VLAN filter already exists, just skip and continue
1427 		 *   else
1428 		 *      add a new MAC+VLAN filter
1429 		 * else
1430 		 *   Remove the old MAC only filter
1431 		 *    Add a new MAC+VLAN filter
1432 		 */
1433 		vnic = &bp->vnic_info[i];
1434 		filter = STAILQ_FIRST(&vnic->filter);
1435 		while (filter) {
1436 			temp_filter = STAILQ_NEXT(filter, next);
1437 
1438 			if (filter->enables & chk) {
1439 				if (filter->l2_ivlan == vlan_id)
1440 					goto cont;
1441 			} else {
1442 				/* Must delete the MAC filter */
1443 				STAILQ_REMOVE(&vnic->filter, filter,
1444 						bnxt_filter_info, next);
1445 				bnxt_hwrm_clear_l2_filter(bp, filter);
1446 				filter->l2_ovlan = 0;
1447 				STAILQ_INSERT_TAIL(&bp->free_filter_list,
1448 						   filter, next);
1449 			}
1450 			new_filter = bnxt_alloc_filter(bp);
1451 			if (!new_filter) {
1452 				PMD_DRV_LOG(ERR,
1453 						"MAC/VLAN filter alloc failed\n");
1454 				rc = -ENOMEM;
1455 				goto exit;
1456 			}
1457 			STAILQ_INSERT_TAIL(&vnic->filter, new_filter, next);
1458 			/* Inherit MAC from the previous filter */
1459 			new_filter->mac_index = filter->mac_index;
1460 			memcpy(new_filter->l2_addr, filter->l2_addr,
1461 			       ETHER_ADDR_LEN);
1462 			/* MAC + VLAN ID filter */
1463 			new_filter->l2_ivlan = vlan_id;
1464 			new_filter->l2_ivlan_mask = 0xF000;
1465 			new_filter->enables |= en;
1466 			rc = bnxt_hwrm_set_l2_filter(bp,
1467 					vnic->fw_vnic_id,
1468 					new_filter);
1469 			if (rc)
1470 				goto exit;
1471 			PMD_DRV_LOG(INFO,
1472 				    "Added Vlan filter for %d\n", vlan_id);
1473 cont:
1474 			filter = temp_filter;
1475 		}
1476 	}
1477 exit:
1478 	return rc;
1479 }
1480 
1481 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1482 		uint16_t vlan_id, int on)
1483 {
1484 	struct bnxt *bp = eth_dev->data->dev_private;
1485 
1486 	/* These operations apply to ALL existing MAC/VLAN filters */
1487 	if (on)
1488 		return bnxt_add_vlan_filter(bp, vlan_id);
1489 	else
1490 		return bnxt_del_vlan_filter(bp, vlan_id);
1491 }
1492 
1493 static int
1494 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1495 {
1496 	struct bnxt *bp = dev->data->dev_private;
1497 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1498 	unsigned int i;
1499 
1500 	if (mask & ETH_VLAN_FILTER_MASK) {
1501 		if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
1502 			/* Remove any VLAN filters programmed */
1503 			for (i = 0; i < 4095; i++)
1504 				bnxt_del_vlan_filter(bp, i);
1505 		}
1506 		PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
1507 			!!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
1508 	}
1509 
1510 	if (mask & ETH_VLAN_STRIP_MASK) {
1511 		/* Enable or disable VLAN stripping */
1512 		for (i = 0; i < bp->nr_vnics; i++) {
1513 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1514 			if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1515 				vnic->vlan_strip = true;
1516 			else
1517 				vnic->vlan_strip = false;
1518 			bnxt_hwrm_vnic_cfg(bp, vnic);
1519 		}
1520 		PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
1521 			!!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
1522 	}
1523 
1524 	if (mask & ETH_VLAN_EXTEND_MASK)
1525 		PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");
1526 
1527 	return 0;
1528 }
1529 
1530 static int
1531 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
1532 {
1533 	struct bnxt *bp = dev->data->dev_private;
1534 	/* Default Filter is tied to VNIC 0 */
1535 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1536 	struct bnxt_filter_info *filter;
1537 	int rc;
1538 
1539 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1540 		return -EPERM;
1541 
1542 	if (is_zero_ether_addr(addr))
1543 		return -EINVAL;
1544 
1545 	STAILQ_FOREACH(filter, &vnic->filter, next) {
1546 		/* Default Filter is at Index 0 */
1547 		if (filter->mac_index != 0)
1548 			continue;
1549 
1550 		memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
1551 		memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
1552 		filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
1553 		filter->enables |=
1554 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
1555 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
1556 
1557 		rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1558 		if (rc)
1559 			return rc;
1560 
1561 		memcpy(bp->mac_addr, addr, ETHER_ADDR_LEN);
1562 		PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
1563 		return 0;
1564 	}
1565 
1566 	return 0;
1567 }
1568 
1569 static int
1570 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
1571 			  struct ether_addr *mc_addr_set,
1572 			  uint32_t nb_mc_addr)
1573 {
1574 	struct bnxt *bp = eth_dev->data->dev_private;
1575 	char *mc_addr_list = (char *)mc_addr_set;
1576 	struct bnxt_vnic_info *vnic;
1577 	uint32_t off = 0, i = 0;
1578 
1579 	vnic = &bp->vnic_info[0];
1580 
1581 	if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
1582 		vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1583 		goto allmulti;
1584 	}
1585 
1586 	/* TODO Check for Duplicate mcast addresses */
1587 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1588 	for (i = 0; i < nb_mc_addr; i++) {
1589 		memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
1590 		off += ETHER_ADDR_LEN;
1591 	}
1592 
1593 	vnic->mc_addr_cnt = i;
1594 
1595 allmulti:
1596 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1597 }
1598 
1599 static int
1600 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1601 {
1602 	struct bnxt *bp = dev->data->dev_private;
1603 	uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
1604 	uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
1605 	uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
1606 	int ret;
1607 
1608 	ret = snprintf(fw_version, fw_size, "%d.%d.%d",
1609 			fw_major, fw_minor, fw_updt);
1610 
1611 	ret += 1; /* add the size of '\0' */
1612 	if (fw_size < (uint32_t)ret)
1613 		return ret;
1614 	else
1615 		return 0;
1616 }
1617 
1618 static void
1619 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1620 	struct rte_eth_rxq_info *qinfo)
1621 {
1622 	struct bnxt_rx_queue *rxq;
1623 
1624 	rxq = dev->data->rx_queues[queue_id];
1625 
1626 	qinfo->mp = rxq->mb_pool;
1627 	qinfo->scattered_rx = dev->data->scattered_rx;
1628 	qinfo->nb_desc = rxq->nb_rx_desc;
1629 
1630 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1631 	qinfo->conf.rx_drop_en = 0;
1632 	qinfo->conf.rx_deferred_start = 0;
1633 }
1634 
1635 static void
1636 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1637 	struct rte_eth_txq_info *qinfo)
1638 {
1639 	struct bnxt_tx_queue *txq;
1640 
1641 	txq = dev->data->tx_queues[queue_id];
1642 
1643 	qinfo->nb_desc = txq->nb_tx_desc;
1644 
1645 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1646 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1647 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1648 
1649 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1650 	qinfo->conf.tx_rs_thresh = 0;
1651 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1652 }
1653 
1654 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
1655 {
1656 	struct bnxt *bp = eth_dev->data->dev_private;
1657 	struct rte_eth_dev_info dev_info;
1658 	uint32_t rc = 0;
1659 	uint32_t i;
1660 
1661 	bnxt_dev_info_get_op(eth_dev, &dev_info);
1662 
1663 	if (new_mtu < ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
1664 		PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
1665 			ETHER_MIN_MTU, BNXT_MAX_MTU);
1666 		return -EINVAL;
1667 	}
1668 
1669 	if (new_mtu > ETHER_MTU) {
1670 		bp->flags |= BNXT_FLAG_JUMBO;
1671 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
1672 			DEV_RX_OFFLOAD_JUMBO_FRAME;
1673 	} else {
1674 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
1675 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
1676 		bp->flags &= ~BNXT_FLAG_JUMBO;
1677 	}
1678 
1679 	eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
1680 		new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1681 
1682 	eth_dev->data->mtu = new_mtu;
1683 	PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
1684 
1685 	for (i = 0; i < bp->nr_vnics; i++) {
1686 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1687 		uint16_t size = 0;
1688 
1689 		vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1690 					ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1691 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
1692 		if (rc)
1693 			break;
1694 
1695 		size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1696 		size -= RTE_PKTMBUF_HEADROOM;
1697 
1698 		if (size < new_mtu) {
1699 			rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
1700 			if (rc)
1701 				return rc;
1702 		}
1703 	}
1704 
1705 	return rc;
1706 }
1707 
1708 static int
1709 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
1710 {
1711 	struct bnxt *bp = dev->data->dev_private;
1712 	uint16_t vlan = bp->vlan;
1713 	int rc;
1714 
1715 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1716 		PMD_DRV_LOG(ERR,
1717 			"PVID cannot be modified for this function\n");
1718 		return -ENOTSUP;
1719 	}
1720 	bp->vlan = on ? pvid : 0;
1721 
1722 	rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
1723 	if (rc)
1724 		bp->vlan = vlan;
1725 	return rc;
1726 }
1727 
1728 static int
1729 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
1730 {
1731 	struct bnxt *bp = dev->data->dev_private;
1732 
1733 	return bnxt_hwrm_port_led_cfg(bp, true);
1734 }
1735 
1736 static int
1737 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
1738 {
1739 	struct bnxt *bp = dev->data->dev_private;
1740 
1741 	return bnxt_hwrm_port_led_cfg(bp, false);
1742 }
1743 
1744 static uint32_t
1745 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1746 {
1747 	uint32_t desc = 0, raw_cons = 0, cons;
1748 	struct bnxt_cp_ring_info *cpr;
1749 	struct bnxt_rx_queue *rxq;
1750 	struct rx_pkt_cmpl *rxcmp;
1751 	uint16_t cmp_type;
1752 	uint8_t cmp = 1;
1753 	bool valid;
1754 
1755 	rxq = dev->data->rx_queues[rx_queue_id];
1756 	cpr = rxq->cp_ring;
1757 	valid = cpr->valid;
1758 
1759 	while (raw_cons < rxq->nb_rx_desc) {
1760 		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1761 		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1762 
1763 		if (!CMPL_VALID(rxcmp, valid))
1764 			goto nothing_to_do;
1765 		valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
1766 		cmp_type = CMP_TYPE(rxcmp);
1767 		if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
1768 			cmp = (rte_le_to_cpu_32(
1769 					((struct rx_tpa_end_cmpl *)
1770 					 (rxcmp))->agg_bufs_v1) &
1771 			       RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
1772 				RX_TPA_END_CMPL_AGG_BUFS_SFT;
1773 			desc++;
1774 		} else if (cmp_type == 0x11) {
1775 			desc++;
1776 			cmp = (rxcmp->agg_bufs_v1 &
1777 				   RX_PKT_CMPL_AGG_BUFS_MASK) >>
1778 				RX_PKT_CMPL_AGG_BUFS_SFT;
1779 		} else {
1780 			cmp = 1;
1781 		}
1782 nothing_to_do:
1783 		raw_cons += cmp ? cmp : 2;
1784 	}
1785 
1786 	return desc;
1787 }
1788 
1789 static int
1790 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
1791 {
1792 	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
1793 	struct bnxt_rx_ring_info *rxr;
1794 	struct bnxt_cp_ring_info *cpr;
1795 	struct bnxt_sw_rx_bd *rx_buf;
1796 	struct rx_pkt_cmpl *rxcmp;
1797 	uint32_t cons, cp_cons;
1798 
1799 	if (!rxq)
1800 		return -EINVAL;
1801 
1802 	cpr = rxq->cp_ring;
1803 	rxr = rxq->rx_ring;
1804 
1805 	if (offset >= rxq->nb_rx_desc)
1806 		return -EINVAL;
1807 
1808 	cons = RING_CMP(cpr->cp_ring_struct, offset);
1809 	cp_cons = cpr->cp_raw_cons;
1810 	rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1811 
1812 	if (cons > cp_cons) {
1813 		if (CMPL_VALID(rxcmp, cpr->valid))
1814 			return RTE_ETH_RX_DESC_DONE;
1815 	} else {
1816 		if (CMPL_VALID(rxcmp, !cpr->valid))
1817 			return RTE_ETH_RX_DESC_DONE;
1818 	}
1819 	rx_buf = &rxr->rx_buf_ring[cons];
1820 	if (rx_buf->mbuf == NULL)
1821 		return RTE_ETH_RX_DESC_UNAVAIL;
1822 
1823 
1824 	return RTE_ETH_RX_DESC_AVAIL;
1825 }
1826 
1827 static int
1828 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
1829 {
1830 	struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
1831 	struct bnxt_tx_ring_info *txr;
1832 	struct bnxt_cp_ring_info *cpr;
1833 	struct bnxt_sw_tx_bd *tx_buf;
1834 	struct tx_pkt_cmpl *txcmp;
1835 	uint32_t cons, cp_cons;
1836 
1837 	if (!txq)
1838 		return -EINVAL;
1839 
1840 	cpr = txq->cp_ring;
1841 	txr = txq->tx_ring;
1842 
1843 	if (offset >= txq->nb_tx_desc)
1844 		return -EINVAL;
1845 
1846 	cons = RING_CMP(cpr->cp_ring_struct, offset);
1847 	txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1848 	cp_cons = cpr->cp_raw_cons;
1849 
1850 	if (cons > cp_cons) {
1851 		if (CMPL_VALID(txcmp, cpr->valid))
1852 			return RTE_ETH_TX_DESC_UNAVAIL;
1853 	} else {
1854 		if (CMPL_VALID(txcmp, !cpr->valid))
1855 			return RTE_ETH_TX_DESC_UNAVAIL;
1856 	}
1857 	tx_buf = &txr->tx_buf_ring[cons];
1858 	if (tx_buf->mbuf == NULL)
1859 		return RTE_ETH_TX_DESC_DONE;
1860 
1861 	return RTE_ETH_TX_DESC_FULL;
1862 }
1863 
1864 static struct bnxt_filter_info *
1865 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
1866 				struct rte_eth_ethertype_filter *efilter,
1867 				struct bnxt_vnic_info *vnic0,
1868 				struct bnxt_vnic_info *vnic,
1869 				int *ret)
1870 {
1871 	struct bnxt_filter_info *mfilter = NULL;
1872 	int match = 0;
1873 	*ret = 0;
1874 
1875 	if (efilter->ether_type == ETHER_TYPE_IPv4 ||
1876 		efilter->ether_type == ETHER_TYPE_IPv6) {
1877 		PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
1878 			" ethertype filter.", efilter->ether_type);
1879 		*ret = -EINVAL;
1880 		goto exit;
1881 	}
1882 	if (efilter->queue >= bp->rx_nr_rings) {
1883 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
1884 		*ret = -EINVAL;
1885 		goto exit;
1886 	}
1887 
1888 	vnic0 = &bp->vnic_info[0];
1889 	vnic = &bp->vnic_info[efilter->queue];
1890 	if (vnic == NULL) {
1891 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
1892 		*ret = -EINVAL;
1893 		goto exit;
1894 	}
1895 
1896 	if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1897 		STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
1898 			if ((!memcmp(efilter->mac_addr.addr_bytes,
1899 				     mfilter->l2_addr, ETHER_ADDR_LEN) &&
1900 			     mfilter->flags ==
1901 			     HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
1902 			     mfilter->ethertype == efilter->ether_type)) {
1903 				match = 1;
1904 				break;
1905 			}
1906 		}
1907 	} else {
1908 		STAILQ_FOREACH(mfilter, &vnic->filter, next)
1909 			if ((!memcmp(efilter->mac_addr.addr_bytes,
1910 				     mfilter->l2_addr, ETHER_ADDR_LEN) &&
1911 			     mfilter->ethertype == efilter->ether_type &&
1912 			     mfilter->flags ==
1913 			     HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
1914 				match = 1;
1915 				break;
1916 			}
1917 	}
1918 
1919 	if (match)
1920 		*ret = -EEXIST;
1921 
1922 exit:
1923 	return mfilter;
1924 }
1925 
1926 static int
1927 bnxt_ethertype_filter(struct rte_eth_dev *dev,
1928 			enum rte_filter_op filter_op,
1929 			void *arg)
1930 {
1931 	struct bnxt *bp = dev->data->dev_private;
1932 	struct rte_eth_ethertype_filter *efilter =
1933 			(struct rte_eth_ethertype_filter *)arg;
1934 	struct bnxt_filter_info *bfilter, *filter1;
1935 	struct bnxt_vnic_info *vnic, *vnic0;
1936 	int ret;
1937 
1938 	if (filter_op == RTE_ETH_FILTER_NOP)
1939 		return 0;
1940 
1941 	if (arg == NULL) {
1942 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
1943 			    filter_op);
1944 		return -EINVAL;
1945 	}
1946 
1947 	vnic0 = &bp->vnic_info[0];
1948 	vnic = &bp->vnic_info[efilter->queue];
1949 
1950 	switch (filter_op) {
1951 	case RTE_ETH_FILTER_ADD:
1952 		bnxt_match_and_validate_ether_filter(bp, efilter,
1953 							vnic0, vnic, &ret);
1954 		if (ret < 0)
1955 			return ret;
1956 
1957 		bfilter = bnxt_get_unused_filter(bp);
1958 		if (bfilter == NULL) {
1959 			PMD_DRV_LOG(ERR,
1960 				"Not enough resources for a new filter.\n");
1961 			return -ENOMEM;
1962 		}
1963 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
1964 		memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
1965 		       ETHER_ADDR_LEN);
1966 		memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
1967 		       ETHER_ADDR_LEN);
1968 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
1969 		bfilter->ethertype = efilter->ether_type;
1970 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
1971 
1972 		filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
1973 		if (filter1 == NULL) {
1974 			ret = -EINVAL;
1975 			goto cleanup;
1976 		}
1977 		bfilter->enables |=
1978 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1979 		bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1980 
1981 		bfilter->dst_id = vnic->fw_vnic_id;
1982 
1983 		if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1984 			bfilter->flags =
1985 				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1986 		}
1987 
1988 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
1989 		if (ret)
1990 			goto cleanup;
1991 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
1992 		break;
1993 	case RTE_ETH_FILTER_DELETE:
1994 		filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
1995 							vnic0, vnic, &ret);
1996 		if (ret == -EEXIST) {
1997 			ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
1998 
1999 			STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
2000 				      next);
2001 			bnxt_free_filter(bp, filter1);
2002 		} else if (ret == 0) {
2003 			PMD_DRV_LOG(ERR, "No matching filter found\n");
2004 		}
2005 		break;
2006 	default:
2007 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2008 		ret = -EINVAL;
2009 		goto error;
2010 	}
2011 	return ret;
2012 cleanup:
2013 	bnxt_free_filter(bp, bfilter);
2014 error:
2015 	return ret;
2016 }
2017 
2018 static inline int
2019 parse_ntuple_filter(struct bnxt *bp,
2020 		    struct rte_eth_ntuple_filter *nfilter,
2021 		    struct bnxt_filter_info *bfilter)
2022 {
2023 	uint32_t en = 0;
2024 
2025 	if (nfilter->queue >= bp->rx_nr_rings) {
2026 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
2027 		return -EINVAL;
2028 	}
2029 
2030 	switch (nfilter->dst_port_mask) {
2031 	case UINT16_MAX:
2032 		bfilter->dst_port_mask = -1;
2033 		bfilter->dst_port = nfilter->dst_port;
2034 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
2035 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2036 		break;
2037 	default:
2038 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
2039 		return -EINVAL;
2040 	}
2041 
2042 	bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2043 	en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2044 
2045 	switch (nfilter->proto_mask) {
2046 	case UINT8_MAX:
2047 		if (nfilter->proto == 17) /* IPPROTO_UDP */
2048 			bfilter->ip_protocol = 17;
2049 		else if (nfilter->proto == 6) /* IPPROTO_TCP */
2050 			bfilter->ip_protocol = 6;
2051 		else
2052 			return -EINVAL;
2053 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2054 		break;
2055 	default:
2056 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
2057 		return -EINVAL;
2058 	}
2059 
2060 	switch (nfilter->dst_ip_mask) {
2061 	case UINT32_MAX:
2062 		bfilter->dst_ipaddr_mask[0] = -1;
2063 		bfilter->dst_ipaddr[0] = nfilter->dst_ip;
2064 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
2065 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2066 		break;
2067 	default:
2068 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
2069 		return -EINVAL;
2070 	}
2071 
2072 	switch (nfilter->src_ip_mask) {
2073 	case UINT32_MAX:
2074 		bfilter->src_ipaddr_mask[0] = -1;
2075 		bfilter->src_ipaddr[0] = nfilter->src_ip;
2076 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
2077 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2078 		break;
2079 	default:
2080 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2081 		return -EINVAL;
2082 	}
2083 
2084 	switch (nfilter->src_port_mask) {
2085 	case UINT16_MAX:
2086 		bfilter->src_port_mask = -1;
2087 		bfilter->src_port = nfilter->src_port;
2088 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
2089 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2090 		break;
2091 	default:
2092 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
2093 		return -EINVAL;
2094 	}
2095 
2096 	//TODO Priority
2097 	//nfilter->priority = (uint8_t)filter->priority;
2098 
2099 	bfilter->enables = en;
2100 	return 0;
2101 }
2102 
2103 static struct bnxt_filter_info*
2104 bnxt_match_ntuple_filter(struct bnxt *bp,
2105 			 struct bnxt_filter_info *bfilter,
2106 			 struct bnxt_vnic_info **mvnic)
2107 {
2108 	struct bnxt_filter_info *mfilter = NULL;
2109 	int i;
2110 
2111 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
2112 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2113 		STAILQ_FOREACH(mfilter, &vnic->filter, next) {
2114 			if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
2115 			    bfilter->src_ipaddr_mask[0] ==
2116 			    mfilter->src_ipaddr_mask[0] &&
2117 			    bfilter->src_port == mfilter->src_port &&
2118 			    bfilter->src_port_mask == mfilter->src_port_mask &&
2119 			    bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
2120 			    bfilter->dst_ipaddr_mask[0] ==
2121 			    mfilter->dst_ipaddr_mask[0] &&
2122 			    bfilter->dst_port == mfilter->dst_port &&
2123 			    bfilter->dst_port_mask == mfilter->dst_port_mask &&
2124 			    bfilter->flags == mfilter->flags &&
2125 			    bfilter->enables == mfilter->enables) {
2126 				if (mvnic)
2127 					*mvnic = vnic;
2128 				return mfilter;
2129 			}
2130 		}
2131 	}
2132 	return NULL;
2133 }
2134 
2135 static int
2136 bnxt_cfg_ntuple_filter(struct bnxt *bp,
2137 		       struct rte_eth_ntuple_filter *nfilter,
2138 		       enum rte_filter_op filter_op)
2139 {
2140 	struct bnxt_filter_info *bfilter, *mfilter, *filter1;
2141 	struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
2142 	int ret;
2143 
2144 	if (nfilter->flags != RTE_5TUPLE_FLAGS) {
2145 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
2146 		return -EINVAL;
2147 	}
2148 
2149 	if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
2150 		PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
2151 		return -EINVAL;
2152 	}
2153 
2154 	bfilter = bnxt_get_unused_filter(bp);
2155 	if (bfilter == NULL) {
2156 		PMD_DRV_LOG(ERR,
2157 			"Not enough resources for a new filter.\n");
2158 		return -ENOMEM;
2159 	}
2160 	ret = parse_ntuple_filter(bp, nfilter, bfilter);
2161 	if (ret < 0)
2162 		goto free_filter;
2163 
2164 	vnic = &bp->vnic_info[nfilter->queue];
2165 	vnic0 = &bp->vnic_info[0];
2166 	filter1 = STAILQ_FIRST(&vnic0->filter);
2167 	if (filter1 == NULL) {
2168 		ret = -EINVAL;
2169 		goto free_filter;
2170 	}
2171 
2172 	bfilter->dst_id = vnic->fw_vnic_id;
2173 	bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2174 	bfilter->enables |=
2175 		HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2176 	bfilter->ethertype = 0x800;
2177 	bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2178 
2179 	mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
2180 
2181 	if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2182 	    bfilter->dst_id == mfilter->dst_id) {
2183 		PMD_DRV_LOG(ERR, "filter exists.\n");
2184 		ret = -EEXIST;
2185 		goto free_filter;
2186 	} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2187 		   bfilter->dst_id != mfilter->dst_id) {
2188 		mfilter->dst_id = vnic->fw_vnic_id;
2189 		ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
2190 		STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
2191 		STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
2192 		PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
2193 		PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
2194 		goto free_filter;
2195 	}
2196 	if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2197 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
2198 		ret = -ENOENT;
2199 		goto free_filter;
2200 	}
2201 
2202 	if (filter_op == RTE_ETH_FILTER_ADD) {
2203 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2204 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2205 		if (ret)
2206 			goto free_filter;
2207 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2208 	} else {
2209 		if (mfilter == NULL) {
2210 			/* This should not happen. But for Coverity! */
2211 			ret = -ENOENT;
2212 			goto free_filter;
2213 		}
2214 		ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
2215 
2216 		STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
2217 		bnxt_free_filter(bp, mfilter);
2218 		mfilter->fw_l2_filter_id = -1;
2219 		bnxt_free_filter(bp, bfilter);
2220 		bfilter->fw_l2_filter_id = -1;
2221 	}
2222 
2223 	return 0;
2224 free_filter:
2225 	bfilter->fw_l2_filter_id = -1;
2226 	bnxt_free_filter(bp, bfilter);
2227 	return ret;
2228 }
2229 
2230 static int
2231 bnxt_ntuple_filter(struct rte_eth_dev *dev,
2232 			enum rte_filter_op filter_op,
2233 			void *arg)
2234 {
2235 	struct bnxt *bp = dev->data->dev_private;
2236 	int ret;
2237 
2238 	if (filter_op == RTE_ETH_FILTER_NOP)
2239 		return 0;
2240 
2241 	if (arg == NULL) {
2242 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2243 			    filter_op);
2244 		return -EINVAL;
2245 	}
2246 
2247 	switch (filter_op) {
2248 	case RTE_ETH_FILTER_ADD:
2249 		ret = bnxt_cfg_ntuple_filter(bp,
2250 			(struct rte_eth_ntuple_filter *)arg,
2251 			filter_op);
2252 		break;
2253 	case RTE_ETH_FILTER_DELETE:
2254 		ret = bnxt_cfg_ntuple_filter(bp,
2255 			(struct rte_eth_ntuple_filter *)arg,
2256 			filter_op);
2257 		break;
2258 	default:
2259 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2260 		ret = -EINVAL;
2261 		break;
2262 	}
2263 	return ret;
2264 }
2265 
2266 static int
2267 bnxt_parse_fdir_filter(struct bnxt *bp,
2268 		       struct rte_eth_fdir_filter *fdir,
2269 		       struct bnxt_filter_info *filter)
2270 {
2271 	enum rte_fdir_mode fdir_mode =
2272 		bp->eth_dev->data->dev_conf.fdir_conf.mode;
2273 	struct bnxt_vnic_info *vnic0, *vnic;
2274 	struct bnxt_filter_info *filter1;
2275 	uint32_t en = 0;
2276 	int i;
2277 
2278 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2279 		return -EINVAL;
2280 
2281 	filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
2282 	en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
2283 
2284 	switch (fdir->input.flow_type) {
2285 	case RTE_ETH_FLOW_IPV4:
2286 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2287 		/* FALLTHROUGH */
2288 		filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
2289 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2290 		filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
2291 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2292 		filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
2293 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2294 		filter->ip_addr_type =
2295 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2296 		filter->src_ipaddr_mask[0] = 0xffffffff;
2297 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2298 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2299 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2300 		filter->ethertype = 0x800;
2301 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2302 		break;
2303 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2304 		filter->src_port = fdir->input.flow.tcp4_flow.src_port;
2305 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2306 		filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
2307 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2308 		filter->dst_port_mask = 0xffff;
2309 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2310 		filter->src_port_mask = 0xffff;
2311 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2312 		filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
2313 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2314 		filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
2315 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2316 		filter->ip_protocol = 6;
2317 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2318 		filter->ip_addr_type =
2319 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2320 		filter->src_ipaddr_mask[0] = 0xffffffff;
2321 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2322 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2323 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2324 		filter->ethertype = 0x800;
2325 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2326 		break;
2327 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2328 		filter->src_port = fdir->input.flow.udp4_flow.src_port;
2329 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2330 		filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
2331 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2332 		filter->dst_port_mask = 0xffff;
2333 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2334 		filter->src_port_mask = 0xffff;
2335 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2336 		filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
2337 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2338 		filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
2339 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2340 		filter->ip_protocol = 17;
2341 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2342 		filter->ip_addr_type =
2343 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2344 		filter->src_ipaddr_mask[0] = 0xffffffff;
2345 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2346 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2347 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2348 		filter->ethertype = 0x800;
2349 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2350 		break;
2351 	case RTE_ETH_FLOW_IPV6:
2352 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2353 		/* FALLTHROUGH */
2354 		filter->ip_addr_type =
2355 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2356 		filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
2357 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2358 		rte_memcpy(filter->src_ipaddr,
2359 			   fdir->input.flow.ipv6_flow.src_ip, 16);
2360 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2361 		rte_memcpy(filter->dst_ipaddr,
2362 			   fdir->input.flow.ipv6_flow.dst_ip, 16);
2363 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2364 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2365 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2366 		memset(filter->src_ipaddr_mask, 0xff, 16);
2367 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2368 		filter->ethertype = 0x86dd;
2369 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2370 		break;
2371 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2372 		filter->src_port = fdir->input.flow.tcp6_flow.src_port;
2373 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2374 		filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
2375 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2376 		filter->dst_port_mask = 0xffff;
2377 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2378 		filter->src_port_mask = 0xffff;
2379 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2380 		filter->ip_addr_type =
2381 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2382 		filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
2383 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2384 		rte_memcpy(filter->src_ipaddr,
2385 			   fdir->input.flow.tcp6_flow.ip.src_ip, 16);
2386 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2387 		rte_memcpy(filter->dst_ipaddr,
2388 			   fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
2389 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2390 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2391 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2392 		memset(filter->src_ipaddr_mask, 0xff, 16);
2393 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2394 		filter->ethertype = 0x86dd;
2395 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2396 		break;
2397 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2398 		filter->src_port = fdir->input.flow.udp6_flow.src_port;
2399 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2400 		filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
2401 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2402 		filter->dst_port_mask = 0xffff;
2403 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2404 		filter->src_port_mask = 0xffff;
2405 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2406 		filter->ip_addr_type =
2407 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2408 		filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
2409 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2410 		rte_memcpy(filter->src_ipaddr,
2411 			   fdir->input.flow.udp6_flow.ip.src_ip, 16);
2412 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2413 		rte_memcpy(filter->dst_ipaddr,
2414 			   fdir->input.flow.udp6_flow.ip.dst_ip, 16);
2415 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2416 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2417 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2418 		memset(filter->src_ipaddr_mask, 0xff, 16);
2419 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2420 		filter->ethertype = 0x86dd;
2421 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2422 		break;
2423 	case RTE_ETH_FLOW_L2_PAYLOAD:
2424 		filter->ethertype = fdir->input.flow.l2_flow.ether_type;
2425 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2426 		break;
2427 	case RTE_ETH_FLOW_VXLAN:
2428 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2429 			return -EINVAL;
2430 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2431 		filter->tunnel_type =
2432 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
2433 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2434 		break;
2435 	case RTE_ETH_FLOW_NVGRE:
2436 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2437 			return -EINVAL;
2438 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2439 		filter->tunnel_type =
2440 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
2441 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2442 		break;
2443 	case RTE_ETH_FLOW_UNKNOWN:
2444 	case RTE_ETH_FLOW_RAW:
2445 	case RTE_ETH_FLOW_FRAG_IPV4:
2446 	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
2447 	case RTE_ETH_FLOW_FRAG_IPV6:
2448 	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
2449 	case RTE_ETH_FLOW_IPV6_EX:
2450 	case RTE_ETH_FLOW_IPV6_TCP_EX:
2451 	case RTE_ETH_FLOW_IPV6_UDP_EX:
2452 	case RTE_ETH_FLOW_GENEVE:
2453 		/* FALLTHROUGH */
2454 	default:
2455 		return -EINVAL;
2456 	}
2457 
2458 	vnic0 = &bp->vnic_info[0];
2459 	vnic = &bp->vnic_info[fdir->action.rx_queue];
2460 	if (vnic == NULL) {
2461 		PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
2462 		return -EINVAL;
2463 	}
2464 
2465 
2466 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2467 		rte_memcpy(filter->dst_macaddr,
2468 			fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
2469 			en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2470 	}
2471 
2472 	if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
2473 		filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2474 		filter1 = STAILQ_FIRST(&vnic0->filter);
2475 		//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
2476 	} else {
2477 		filter->dst_id = vnic->fw_vnic_id;
2478 		for (i = 0; i < ETHER_ADDR_LEN; i++)
2479 			if (filter->dst_macaddr[i] == 0x00)
2480 				filter1 = STAILQ_FIRST(&vnic0->filter);
2481 			else
2482 				filter1 = bnxt_get_l2_filter(bp, filter, vnic);
2483 	}
2484 
2485 	if (filter1 == NULL)
2486 		return -EINVAL;
2487 
2488 	en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2489 	filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2490 
2491 	filter->enables = en;
2492 
2493 	return 0;
2494 }
2495 
2496 static struct bnxt_filter_info *
2497 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
2498 		struct bnxt_vnic_info **mvnic)
2499 {
2500 	struct bnxt_filter_info *mf = NULL;
2501 	int i;
2502 
2503 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
2504 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2505 
2506 		STAILQ_FOREACH(mf, &vnic->filter, next) {
2507 			if (mf->filter_type == nf->filter_type &&
2508 			    mf->flags == nf->flags &&
2509 			    mf->src_port == nf->src_port &&
2510 			    mf->src_port_mask == nf->src_port_mask &&
2511 			    mf->dst_port == nf->dst_port &&
2512 			    mf->dst_port_mask == nf->dst_port_mask &&
2513 			    mf->ip_protocol == nf->ip_protocol &&
2514 			    mf->ip_addr_type == nf->ip_addr_type &&
2515 			    mf->ethertype == nf->ethertype &&
2516 			    mf->vni == nf->vni &&
2517 			    mf->tunnel_type == nf->tunnel_type &&
2518 			    mf->l2_ovlan == nf->l2_ovlan &&
2519 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
2520 			    mf->l2_ivlan == nf->l2_ivlan &&
2521 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
2522 			    !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
2523 			    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
2524 				    ETHER_ADDR_LEN) &&
2525 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
2526 				    ETHER_ADDR_LEN) &&
2527 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
2528 				    ETHER_ADDR_LEN) &&
2529 			    !memcmp(mf->src_ipaddr, nf->src_ipaddr,
2530 				    sizeof(nf->src_ipaddr)) &&
2531 			    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
2532 				    sizeof(nf->src_ipaddr_mask)) &&
2533 			    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
2534 				    sizeof(nf->dst_ipaddr)) &&
2535 			    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
2536 				    sizeof(nf->dst_ipaddr_mask))) {
2537 				if (mvnic)
2538 					*mvnic = vnic;
2539 				return mf;
2540 			}
2541 		}
2542 	}
2543 	return NULL;
2544 }
2545 
2546 static int
2547 bnxt_fdir_filter(struct rte_eth_dev *dev,
2548 		 enum rte_filter_op filter_op,
2549 		 void *arg)
2550 {
2551 	struct bnxt *bp = dev->data->dev_private;
2552 	struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
2553 	struct bnxt_filter_info *filter, *match;
2554 	struct bnxt_vnic_info *vnic, *mvnic;
2555 	int ret = 0, i;
2556 
2557 	if (filter_op == RTE_ETH_FILTER_NOP)
2558 		return 0;
2559 
2560 	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2561 		return -EINVAL;
2562 
2563 	switch (filter_op) {
2564 	case RTE_ETH_FILTER_ADD:
2565 	case RTE_ETH_FILTER_DELETE:
2566 		/* FALLTHROUGH */
2567 		filter = bnxt_get_unused_filter(bp);
2568 		if (filter == NULL) {
2569 			PMD_DRV_LOG(ERR,
2570 				"Not enough resources for a new flow.\n");
2571 			return -ENOMEM;
2572 		}
2573 
2574 		ret = bnxt_parse_fdir_filter(bp, fdir, filter);
2575 		if (ret != 0)
2576 			goto free_filter;
2577 		filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2578 
2579 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2580 			vnic = &bp->vnic_info[0];
2581 		else
2582 			vnic = &bp->vnic_info[fdir->action.rx_queue];
2583 
2584 		match = bnxt_match_fdir(bp, filter, &mvnic);
2585 		if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
2586 			if (match->dst_id == vnic->fw_vnic_id) {
2587 				PMD_DRV_LOG(ERR, "Flow already exists.\n");
2588 				ret = -EEXIST;
2589 				goto free_filter;
2590 			} else {
2591 				match->dst_id = vnic->fw_vnic_id;
2592 				ret = bnxt_hwrm_set_ntuple_filter(bp,
2593 								  match->dst_id,
2594 								  match);
2595 				STAILQ_REMOVE(&mvnic->filter, match,
2596 					      bnxt_filter_info, next);
2597 				STAILQ_INSERT_TAIL(&vnic->filter, match, next);
2598 				PMD_DRV_LOG(ERR,
2599 					"Filter with matching pattern exist\n");
2600 				PMD_DRV_LOG(ERR,
2601 					"Updated it to new destination q\n");
2602 				goto free_filter;
2603 			}
2604 		}
2605 		if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2606 			PMD_DRV_LOG(ERR, "Flow does not exist.\n");
2607 			ret = -ENOENT;
2608 			goto free_filter;
2609 		}
2610 
2611 		if (filter_op == RTE_ETH_FILTER_ADD) {
2612 			ret = bnxt_hwrm_set_ntuple_filter(bp,
2613 							  filter->dst_id,
2614 							  filter);
2615 			if (ret)
2616 				goto free_filter;
2617 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2618 		} else {
2619 			ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
2620 			STAILQ_REMOVE(&vnic->filter, match,
2621 				      bnxt_filter_info, next);
2622 			bnxt_free_filter(bp, match);
2623 			filter->fw_l2_filter_id = -1;
2624 			bnxt_free_filter(bp, filter);
2625 		}
2626 		break;
2627 	case RTE_ETH_FILTER_FLUSH:
2628 		for (i = bp->nr_vnics - 1; i >= 0; i--) {
2629 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2630 
2631 			STAILQ_FOREACH(filter, &vnic->filter, next) {
2632 				if (filter->filter_type ==
2633 				    HWRM_CFA_NTUPLE_FILTER) {
2634 					ret =
2635 					bnxt_hwrm_clear_ntuple_filter(bp,
2636 								      filter);
2637 					STAILQ_REMOVE(&vnic->filter, filter,
2638 						      bnxt_filter_info, next);
2639 				}
2640 			}
2641 		}
2642 		return ret;
2643 	case RTE_ETH_FILTER_UPDATE:
2644 	case RTE_ETH_FILTER_STATS:
2645 	case RTE_ETH_FILTER_INFO:
2646 		PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
2647 		break;
2648 	default:
2649 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
2650 		ret = -EINVAL;
2651 		break;
2652 	}
2653 	return ret;
2654 
2655 free_filter:
2656 	filter->fw_l2_filter_id = -1;
2657 	bnxt_free_filter(bp, filter);
2658 	return ret;
2659 }
2660 
2661 static int
2662 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
2663 		    enum rte_filter_type filter_type,
2664 		    enum rte_filter_op filter_op, void *arg)
2665 {
2666 	int ret = 0;
2667 
2668 	switch (filter_type) {
2669 	case RTE_ETH_FILTER_TUNNEL:
2670 		PMD_DRV_LOG(ERR,
2671 			"filter type: %d: To be implemented\n", filter_type);
2672 		break;
2673 	case RTE_ETH_FILTER_FDIR:
2674 		ret = bnxt_fdir_filter(dev, filter_op, arg);
2675 		break;
2676 	case RTE_ETH_FILTER_NTUPLE:
2677 		ret = bnxt_ntuple_filter(dev, filter_op, arg);
2678 		break;
2679 	case RTE_ETH_FILTER_ETHERTYPE:
2680 		ret = bnxt_ethertype_filter(dev, filter_op, arg);
2681 		break;
2682 	case RTE_ETH_FILTER_GENERIC:
2683 		if (filter_op != RTE_ETH_FILTER_GET)
2684 			return -EINVAL;
2685 		*(const void **)arg = &bnxt_flow_ops;
2686 		break;
2687 	default:
2688 		PMD_DRV_LOG(ERR,
2689 			"Filter type (%d) not supported", filter_type);
2690 		ret = -EINVAL;
2691 		break;
2692 	}
2693 	return ret;
2694 }
2695 
2696 static const uint32_t *
2697 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
2698 {
2699 	static const uint32_t ptypes[] = {
2700 		RTE_PTYPE_L2_ETHER_VLAN,
2701 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2702 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2703 		RTE_PTYPE_L4_ICMP,
2704 		RTE_PTYPE_L4_TCP,
2705 		RTE_PTYPE_L4_UDP,
2706 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2707 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2708 		RTE_PTYPE_INNER_L4_ICMP,
2709 		RTE_PTYPE_INNER_L4_TCP,
2710 		RTE_PTYPE_INNER_L4_UDP,
2711 		RTE_PTYPE_UNKNOWN
2712 	};
2713 
2714 	if (dev->rx_pkt_burst == bnxt_recv_pkts)
2715 		return ptypes;
2716 	return NULL;
2717 }
2718 
2719 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
2720 			 int reg_win)
2721 {
2722 	uint32_t reg_base = *reg_arr & 0xfffff000;
2723 	uint32_t win_off;
2724 	int i;
2725 
2726 	for (i = 0; i < count; i++) {
2727 		if ((reg_arr[i] & 0xfffff000) != reg_base)
2728 			return -ERANGE;
2729 	}
2730 	win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
2731 	rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
2732 	return 0;
2733 }
2734 
2735 static int bnxt_map_ptp_regs(struct bnxt *bp)
2736 {
2737 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2738 	uint32_t *reg_arr;
2739 	int rc, i;
2740 
2741 	reg_arr = ptp->rx_regs;
2742 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
2743 	if (rc)
2744 		return rc;
2745 
2746 	reg_arr = ptp->tx_regs;
2747 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
2748 	if (rc)
2749 		return rc;
2750 
2751 	for (i = 0; i < BNXT_PTP_RX_REGS; i++)
2752 		ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
2753 
2754 	for (i = 0; i < BNXT_PTP_TX_REGS; i++)
2755 		ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
2756 
2757 	return 0;
2758 }
2759 
2760 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
2761 {
2762 	rte_write32(0, (uint8_t *)bp->bar0 +
2763 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
2764 	rte_write32(0, (uint8_t *)bp->bar0 +
2765 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
2766 }
2767 
2768 static uint64_t bnxt_cc_read(struct bnxt *bp)
2769 {
2770 	uint64_t ns;
2771 
2772 	ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2773 			      BNXT_GRCPF_REG_SYNC_TIME));
2774 	ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2775 					  BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
2776 	return ns;
2777 }
2778 
2779 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
2780 {
2781 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2782 	uint32_t fifo;
2783 
2784 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2785 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
2786 	if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
2787 		return -EAGAIN;
2788 
2789 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2790 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
2791 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2792 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
2793 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2794 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
2795 
2796 	return 0;
2797 }
2798 
2799 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
2800 {
2801 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2802 	struct bnxt_pf_info *pf = &bp->pf;
2803 	uint16_t port_id;
2804 	uint32_t fifo;
2805 
2806 	if (!ptp)
2807 		return -ENODEV;
2808 
2809 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2810 				ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
2811 	if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
2812 		return -EAGAIN;
2813 
2814 	port_id = pf->port_id;
2815 	rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
2816 	       ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
2817 
2818 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2819 				   ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
2820 	if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
2821 /*		bnxt_clr_rx_ts(bp);	  TBD  */
2822 		return -EBUSY;
2823 	}
2824 
2825 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2826 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
2827 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2828 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
2829 
2830 	return 0;
2831 }
2832 
2833 static int
2834 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2835 {
2836 	uint64_t ns;
2837 	struct bnxt *bp = dev->data->dev_private;
2838 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2839 
2840 	if (!ptp)
2841 		return 0;
2842 
2843 	ns = rte_timespec_to_ns(ts);
2844 	/* Set the timecounters to a new value. */
2845 	ptp->tc.nsec = ns;
2846 
2847 	return 0;
2848 }
2849 
2850 static int
2851 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2852 {
2853 	uint64_t ns, systime_cycles;
2854 	struct bnxt *bp = dev->data->dev_private;
2855 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2856 
2857 	if (!ptp)
2858 		return 0;
2859 
2860 	systime_cycles = bnxt_cc_read(bp);
2861 	ns = rte_timecounter_update(&ptp->tc, systime_cycles);
2862 	*ts = rte_ns_to_timespec(ns);
2863 
2864 	return 0;
2865 }
2866 static int
2867 bnxt_timesync_enable(struct rte_eth_dev *dev)
2868 {
2869 	struct bnxt *bp = dev->data->dev_private;
2870 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2871 	uint32_t shift = 0;
2872 
2873 	if (!ptp)
2874 		return 0;
2875 
2876 	ptp->rx_filter = 1;
2877 	ptp->tx_tstamp_en = 1;
2878 	ptp->rxctl = BNXT_PTP_MSG_EVENTS;
2879 
2880 	if (!bnxt_hwrm_ptp_cfg(bp))
2881 		bnxt_map_ptp_regs(bp);
2882 
2883 	memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
2884 	memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2885 	memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2886 
2887 	ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2888 	ptp->tc.cc_shift = shift;
2889 	ptp->tc.nsec_mask = (1ULL << shift) - 1;
2890 
2891 	ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2892 	ptp->rx_tstamp_tc.cc_shift = shift;
2893 	ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2894 
2895 	ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2896 	ptp->tx_tstamp_tc.cc_shift = shift;
2897 	ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2898 
2899 	return 0;
2900 }
2901 
2902 static int
2903 bnxt_timesync_disable(struct rte_eth_dev *dev)
2904 {
2905 	struct bnxt *bp = dev->data->dev_private;
2906 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2907 
2908 	if (!ptp)
2909 		return 0;
2910 
2911 	ptp->rx_filter = 0;
2912 	ptp->tx_tstamp_en = 0;
2913 	ptp->rxctl = 0;
2914 
2915 	bnxt_hwrm_ptp_cfg(bp);
2916 
2917 	bnxt_unmap_ptp_regs(bp);
2918 
2919 	return 0;
2920 }
2921 
2922 static int
2923 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2924 				 struct timespec *timestamp,
2925 				 uint32_t flags __rte_unused)
2926 {
2927 	struct bnxt *bp = dev->data->dev_private;
2928 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2929 	uint64_t rx_tstamp_cycles = 0;
2930 	uint64_t ns;
2931 
2932 	if (!ptp)
2933 		return 0;
2934 
2935 	bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
2936 	ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
2937 	*timestamp = rte_ns_to_timespec(ns);
2938 	return  0;
2939 }
2940 
2941 static int
2942 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2943 				 struct timespec *timestamp)
2944 {
2945 	struct bnxt *bp = dev->data->dev_private;
2946 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2947 	uint64_t tx_tstamp_cycles = 0;
2948 	uint64_t ns;
2949 
2950 	if (!ptp)
2951 		return 0;
2952 
2953 	bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
2954 	ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
2955 	*timestamp = rte_ns_to_timespec(ns);
2956 
2957 	return 0;
2958 }
2959 
2960 static int
2961 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2962 {
2963 	struct bnxt *bp = dev->data->dev_private;
2964 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2965 
2966 	if (!ptp)
2967 		return 0;
2968 
2969 	ptp->tc.nsec += delta;
2970 
2971 	return 0;
2972 }
2973 
2974 static int
2975 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
2976 {
2977 	struct bnxt *bp = dev->data->dev_private;
2978 	int rc;
2979 	uint32_t dir_entries;
2980 	uint32_t entry_length;
2981 
2982 	PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
2983 		bp->pdev->addr.domain, bp->pdev->addr.bus,
2984 		bp->pdev->addr.devid, bp->pdev->addr.function);
2985 
2986 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
2987 	if (rc != 0)
2988 		return rc;
2989 
2990 	return dir_entries * entry_length;
2991 }
2992 
2993 static int
2994 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
2995 		struct rte_dev_eeprom_info *in_eeprom)
2996 {
2997 	struct bnxt *bp = dev->data->dev_private;
2998 	uint32_t index;
2999 	uint32_t offset;
3000 
3001 	PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
3002 		"len = %d\n", bp->pdev->addr.domain,
3003 		bp->pdev->addr.bus, bp->pdev->addr.devid,
3004 		bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
3005 
3006 	if (in_eeprom->offset == 0) /* special offset value to get directory */
3007 		return bnxt_get_nvram_directory(bp, in_eeprom->length,
3008 						in_eeprom->data);
3009 
3010 	index = in_eeprom->offset >> 24;
3011 	offset = in_eeprom->offset & 0xffffff;
3012 
3013 	if (index != 0)
3014 		return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
3015 					   in_eeprom->length, in_eeprom->data);
3016 
3017 	return 0;
3018 }
3019 
3020 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
3021 {
3022 	switch (dir_type) {
3023 	case BNX_DIR_TYPE_CHIMP_PATCH:
3024 	case BNX_DIR_TYPE_BOOTCODE:
3025 	case BNX_DIR_TYPE_BOOTCODE_2:
3026 	case BNX_DIR_TYPE_APE_FW:
3027 	case BNX_DIR_TYPE_APE_PATCH:
3028 	case BNX_DIR_TYPE_KONG_FW:
3029 	case BNX_DIR_TYPE_KONG_PATCH:
3030 	case BNX_DIR_TYPE_BONO_FW:
3031 	case BNX_DIR_TYPE_BONO_PATCH:
3032 		/* FALLTHROUGH */
3033 		return true;
3034 	}
3035 
3036 	return false;
3037 }
3038 
3039 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
3040 {
3041 	switch (dir_type) {
3042 	case BNX_DIR_TYPE_AVS:
3043 	case BNX_DIR_TYPE_EXP_ROM_MBA:
3044 	case BNX_DIR_TYPE_PCIE:
3045 	case BNX_DIR_TYPE_TSCF_UCODE:
3046 	case BNX_DIR_TYPE_EXT_PHY:
3047 	case BNX_DIR_TYPE_CCM:
3048 	case BNX_DIR_TYPE_ISCSI_BOOT:
3049 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3050 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3051 		/* FALLTHROUGH */
3052 		return true;
3053 	}
3054 
3055 	return false;
3056 }
3057 
3058 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
3059 {
3060 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3061 		bnxt_dir_type_is_other_exec_format(dir_type);
3062 }
3063 
3064 static int
3065 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
3066 		struct rte_dev_eeprom_info *in_eeprom)
3067 {
3068 	struct bnxt *bp = dev->data->dev_private;
3069 	uint8_t index, dir_op;
3070 	uint16_t type, ext, ordinal, attr;
3071 
3072 	PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
3073 		"len = %d\n", bp->pdev->addr.domain,
3074 		bp->pdev->addr.bus, bp->pdev->addr.devid,
3075 		bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
3076 
3077 	if (!BNXT_PF(bp)) {
3078 		PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
3079 		return -EINVAL;
3080 	}
3081 
3082 	type = in_eeprom->magic >> 16;
3083 
3084 	if (type == 0xffff) { /* special value for directory operations */
3085 		index = in_eeprom->magic & 0xff;
3086 		dir_op = in_eeprom->magic >> 8;
3087 		if (index == 0)
3088 			return -EINVAL;
3089 		switch (dir_op) {
3090 		case 0x0e: /* erase */
3091 			if (in_eeprom->offset != ~in_eeprom->magic)
3092 				return -EINVAL;
3093 			return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
3094 		default:
3095 			return -EINVAL;
3096 		}
3097 	}
3098 
3099 	/* Create or re-write an NVM item: */
3100 	if (bnxt_dir_type_is_executable(type) == true)
3101 		return -EOPNOTSUPP;
3102 	ext = in_eeprom->magic & 0xffff;
3103 	ordinal = in_eeprom->offset >> 16;
3104 	attr = in_eeprom->offset & 0xffff;
3105 
3106 	return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
3107 				     in_eeprom->data, in_eeprom->length);
3108 }
3109 
3110 /*
3111  * Initialization
3112  */
3113 
3114 static const struct eth_dev_ops bnxt_dev_ops = {
3115 	.dev_infos_get = bnxt_dev_info_get_op,
3116 	.dev_close = bnxt_dev_close_op,
3117 	.dev_configure = bnxt_dev_configure_op,
3118 	.dev_start = bnxt_dev_start_op,
3119 	.dev_stop = bnxt_dev_stop_op,
3120 	.dev_set_link_up = bnxt_dev_set_link_up_op,
3121 	.dev_set_link_down = bnxt_dev_set_link_down_op,
3122 	.stats_get = bnxt_stats_get_op,
3123 	.stats_reset = bnxt_stats_reset_op,
3124 	.rx_queue_setup = bnxt_rx_queue_setup_op,
3125 	.rx_queue_release = bnxt_rx_queue_release_op,
3126 	.tx_queue_setup = bnxt_tx_queue_setup_op,
3127 	.tx_queue_release = bnxt_tx_queue_release_op,
3128 	.rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
3129 	.rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
3130 	.reta_update = bnxt_reta_update_op,
3131 	.reta_query = bnxt_reta_query_op,
3132 	.rss_hash_update = bnxt_rss_hash_update_op,
3133 	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
3134 	.link_update = bnxt_link_update_op,
3135 	.promiscuous_enable = bnxt_promiscuous_enable_op,
3136 	.promiscuous_disable = bnxt_promiscuous_disable_op,
3137 	.allmulticast_enable = bnxt_allmulticast_enable_op,
3138 	.allmulticast_disable = bnxt_allmulticast_disable_op,
3139 	.mac_addr_add = bnxt_mac_addr_add_op,
3140 	.mac_addr_remove = bnxt_mac_addr_remove_op,
3141 	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
3142 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
3143 	.udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
3144 	.udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
3145 	.vlan_filter_set = bnxt_vlan_filter_set_op,
3146 	.vlan_offload_set = bnxt_vlan_offload_set_op,
3147 	.vlan_pvid_set = bnxt_vlan_pvid_set_op,
3148 	.mtu_set = bnxt_mtu_set_op,
3149 	.mac_addr_set = bnxt_set_default_mac_addr_op,
3150 	.xstats_get = bnxt_dev_xstats_get_op,
3151 	.xstats_get_names = bnxt_dev_xstats_get_names_op,
3152 	.xstats_reset = bnxt_dev_xstats_reset_op,
3153 	.fw_version_get = bnxt_fw_version_get,
3154 	.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
3155 	.rxq_info_get = bnxt_rxq_info_get_op,
3156 	.txq_info_get = bnxt_txq_info_get_op,
3157 	.dev_led_on = bnxt_dev_led_on_op,
3158 	.dev_led_off = bnxt_dev_led_off_op,
3159 	.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
3160 	.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
3161 	.rx_queue_count = bnxt_rx_queue_count_op,
3162 	.rx_descriptor_status = bnxt_rx_descriptor_status_op,
3163 	.tx_descriptor_status = bnxt_tx_descriptor_status_op,
3164 	.rx_queue_start = bnxt_rx_queue_start,
3165 	.rx_queue_stop = bnxt_rx_queue_stop,
3166 	.tx_queue_start = bnxt_tx_queue_start,
3167 	.tx_queue_stop = bnxt_tx_queue_stop,
3168 	.filter_ctrl = bnxt_filter_ctrl_op,
3169 	.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
3170 	.get_eeprom_length    = bnxt_get_eeprom_length_op,
3171 	.get_eeprom           = bnxt_get_eeprom_op,
3172 	.set_eeprom           = bnxt_set_eeprom_op,
3173 	.timesync_enable      = bnxt_timesync_enable,
3174 	.timesync_disable     = bnxt_timesync_disable,
3175 	.timesync_read_time   = bnxt_timesync_read_time,
3176 	.timesync_write_time   = bnxt_timesync_write_time,
3177 	.timesync_adjust_time = bnxt_timesync_adjust_time,
3178 	.timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
3179 	.timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
3180 };
3181 
3182 static bool bnxt_vf_pciid(uint16_t id)
3183 {
3184 	if (id == BROADCOM_DEV_ID_57304_VF ||
3185 	    id == BROADCOM_DEV_ID_57406_VF ||
3186 	    id == BROADCOM_DEV_ID_5731X_VF ||
3187 	    id == BROADCOM_DEV_ID_5741X_VF ||
3188 	    id == BROADCOM_DEV_ID_57414_VF ||
3189 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
3190 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||
3191 	    id == BROADCOM_DEV_ID_58802_VF)
3192 		return true;
3193 	return false;
3194 }
3195 
3196 bool bnxt_stratus_device(struct bnxt *bp)
3197 {
3198 	uint16_t id = bp->pdev->id.device_id;
3199 
3200 	if (id == BROADCOM_DEV_ID_STRATUS_NIC ||
3201 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
3202 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF2)
3203 		return true;
3204 	return false;
3205 }
3206 
3207 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
3208 {
3209 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3210 	struct bnxt *bp = eth_dev->data->dev_private;
3211 
3212 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
3213 	bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
3214 	bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
3215 	if (!bp->bar0 || !bp->doorbell_base) {
3216 		PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
3217 		return -ENODEV;
3218 	}
3219 
3220 	bp->eth_dev = eth_dev;
3221 	bp->pdev = pci_dev;
3222 
3223 	return 0;
3224 }
3225 
3226 
3227 #define ALLOW_FUNC(x)	\
3228 	{ \
3229 		uint32_t arg = (x); \
3230 		bp->pf.vf_req_fwd[((arg) >> 5)] &= \
3231 		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
3232 	}
3233 static int
3234 bnxt_dev_init(struct rte_eth_dev *eth_dev)
3235 {
3236 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3237 	char mz_name[RTE_MEMZONE_NAMESIZE];
3238 	const struct rte_memzone *mz = NULL;
3239 	static int version_printed;
3240 	uint32_t total_alloc_len;
3241 	rte_iova_t mz_phys_addr;
3242 	struct bnxt *bp;
3243 	int rc;
3244 
3245 	if (version_printed++ == 0)
3246 		PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
3247 
3248 	rte_eth_copy_pci_info(eth_dev, pci_dev);
3249 
3250 	bp = eth_dev->data->dev_private;
3251 
3252 	bp->dev_stopped = 1;
3253 
3254 	eth_dev->dev_ops = &bnxt_dev_ops;
3255 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
3256 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
3257 
3258 	/*
3259 	 * For secondary processes, we don't initialise any further
3260 	 * as primary has already done this work.
3261 	 */
3262 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3263 		return 0;
3264 
3265 	if (bnxt_vf_pciid(pci_dev->id.device_id))
3266 		bp->flags |= BNXT_FLAG_VF;
3267 
3268 	rc = bnxt_init_board(eth_dev);
3269 	if (rc) {
3270 		PMD_DRV_LOG(ERR,
3271 			"Board initialization failed rc: %x\n", rc);
3272 		goto error;
3273 	}
3274 
3275 	if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
3276 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
3277 			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
3278 			 pci_dev->addr.bus, pci_dev->addr.devid,
3279 			 pci_dev->addr.function, "rx_port_stats");
3280 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
3281 		mz = rte_memzone_lookup(mz_name);
3282 		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
3283 					sizeof(struct rx_port_stats) +
3284 					sizeof(struct rx_port_stats_ext) +
3285 					512);
3286 		if (!mz) {
3287 			mz = rte_memzone_reserve(mz_name, total_alloc_len,
3288 					SOCKET_ID_ANY,
3289 					RTE_MEMZONE_2MB |
3290 					RTE_MEMZONE_SIZE_HINT_ONLY |
3291 					RTE_MEMZONE_IOVA_CONTIG);
3292 			if (mz == NULL)
3293 				return -ENOMEM;
3294 		}
3295 		memset(mz->addr, 0, mz->len);
3296 		mz_phys_addr = mz->iova;
3297 		if ((unsigned long)mz->addr == mz_phys_addr) {
3298 			PMD_DRV_LOG(INFO,
3299 				"Memzone physical address same as virtual using rte_mem_virt2iova()\n");
3300 			mz_phys_addr = rte_mem_virt2iova(mz->addr);
3301 			if (mz_phys_addr == 0) {
3302 				PMD_DRV_LOG(ERR,
3303 				"unable to map address to physical memory\n");
3304 				return -ENOMEM;
3305 			}
3306 		}
3307 
3308 		bp->rx_mem_zone = (const void *)mz;
3309 		bp->hw_rx_port_stats = mz->addr;
3310 		bp->hw_rx_port_stats_map = mz_phys_addr;
3311 
3312 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
3313 			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
3314 			 pci_dev->addr.bus, pci_dev->addr.devid,
3315 			 pci_dev->addr.function, "tx_port_stats");
3316 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
3317 		mz = rte_memzone_lookup(mz_name);
3318 		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
3319 					sizeof(struct tx_port_stats) +
3320 					sizeof(struct tx_port_stats_ext) +
3321 					512);
3322 		if (!mz) {
3323 			mz = rte_memzone_reserve(mz_name,
3324 					total_alloc_len,
3325 					SOCKET_ID_ANY,
3326 					RTE_MEMZONE_2MB |
3327 					RTE_MEMZONE_SIZE_HINT_ONLY |
3328 					RTE_MEMZONE_IOVA_CONTIG);
3329 			if (mz == NULL)
3330 				return -ENOMEM;
3331 		}
3332 		memset(mz->addr, 0, mz->len);
3333 		mz_phys_addr = mz->iova;
3334 		if ((unsigned long)mz->addr == mz_phys_addr) {
3335 			PMD_DRV_LOG(WARNING,
3336 				"Memzone physical address same as virtual.\n");
3337 			PMD_DRV_LOG(WARNING,
3338 				"Using rte_mem_virt2iova()\n");
3339 			mz_phys_addr = rte_mem_virt2iova(mz->addr);
3340 			if (mz_phys_addr == 0) {
3341 				PMD_DRV_LOG(ERR,
3342 				"unable to map address to physical memory\n");
3343 				return -ENOMEM;
3344 			}
3345 		}
3346 
3347 		bp->tx_mem_zone = (const void *)mz;
3348 		bp->hw_tx_port_stats = mz->addr;
3349 		bp->hw_tx_port_stats_map = mz_phys_addr;
3350 
3351 		bp->flags |= BNXT_FLAG_PORT_STATS;
3352 
3353 		/* Display extended statistics if FW supports it */
3354 		if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
3355 		    bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0)
3356 			goto skip_ext_stats;
3357 
3358 		bp->hw_rx_port_stats_ext = (void *)
3359 			((uint8_t *)bp->hw_rx_port_stats +
3360 			 sizeof(struct rx_port_stats));
3361 		bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
3362 			sizeof(struct rx_port_stats);
3363 		bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
3364 
3365 
3366 		if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2) {
3367 			bp->hw_tx_port_stats_ext = (void *)
3368 				((uint8_t *)bp->hw_tx_port_stats +
3369 				 sizeof(struct tx_port_stats));
3370 			bp->hw_tx_port_stats_ext_map =
3371 				bp->hw_tx_port_stats_map +
3372 				sizeof(struct tx_port_stats);
3373 			bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
3374 		}
3375 	}
3376 
3377 skip_ext_stats:
3378 	rc = bnxt_alloc_hwrm_resources(bp);
3379 	if (rc) {
3380 		PMD_DRV_LOG(ERR,
3381 			"hwrm resource allocation failure rc: %x\n", rc);
3382 		goto error_free;
3383 	}
3384 	rc = bnxt_hwrm_ver_get(bp);
3385 	if (rc)
3386 		goto error_free;
3387 	rc = bnxt_hwrm_queue_qportcfg(bp);
3388 	if (rc) {
3389 		PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
3390 		goto error_free;
3391 	}
3392 
3393 	rc = bnxt_hwrm_func_qcfg(bp);
3394 	if (rc) {
3395 		PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
3396 		goto error_free;
3397 	}
3398 
3399 	/* Get the MAX capabilities for this function */
3400 	rc = bnxt_hwrm_func_qcaps(bp);
3401 	if (rc) {
3402 		PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
3403 		goto error_free;
3404 	}
3405 	if (bp->max_tx_rings == 0) {
3406 		PMD_DRV_LOG(ERR, "No TX rings available!\n");
3407 		rc = -EBUSY;
3408 		goto error_free;
3409 	}
3410 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
3411 					ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
3412 	if (eth_dev->data->mac_addrs == NULL) {
3413 		PMD_DRV_LOG(ERR,
3414 			"Failed to alloc %u bytes needed to store MAC addr tbl",
3415 			ETHER_ADDR_LEN * bp->max_l2_ctx);
3416 		rc = -ENOMEM;
3417 		goto error_free;
3418 	}
3419 
3420 	if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
3421 		PMD_DRV_LOG(ERR,
3422 			    "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
3423 			    bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
3424 			    bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
3425 			    bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
3426 		rc = -EINVAL;
3427 		goto error_free;
3428 	}
3429 	/* Copy the permanent MAC from the qcap response address now. */
3430 	memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
3431 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
3432 
3433 	if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
3434 		/* 1 ring is for default completion ring */
3435 		PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
3436 		rc = -ENOSPC;
3437 		goto error_free;
3438 	}
3439 
3440 	bp->grp_info = rte_zmalloc("bnxt_grp_info",
3441 				sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
3442 	if (!bp->grp_info) {
3443 		PMD_DRV_LOG(ERR,
3444 			"Failed to alloc %zu bytes to store group info table\n",
3445 			sizeof(*bp->grp_info) * bp->max_ring_grps);
3446 		rc = -ENOMEM;
3447 		goto error_free;
3448 	}
3449 
3450 	/* Forward all requests if firmware is new enough */
3451 	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
3452 	    (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
3453 	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
3454 		memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
3455 	} else {
3456 		PMD_DRV_LOG(WARNING,
3457 			"Firmware too old for VF mailbox functionality\n");
3458 		memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
3459 	}
3460 
3461 	/*
3462 	 * The following are used for driver cleanup.  If we disallow these,
3463 	 * VF drivers can't clean up cleanly.
3464 	 */
3465 	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
3466 	ALLOW_FUNC(HWRM_VNIC_FREE);
3467 	ALLOW_FUNC(HWRM_RING_FREE);
3468 	ALLOW_FUNC(HWRM_RING_GRP_FREE);
3469 	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
3470 	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
3471 	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
3472 	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
3473 	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
3474 	rc = bnxt_hwrm_func_driver_register(bp);
3475 	if (rc) {
3476 		PMD_DRV_LOG(ERR,
3477 			"Failed to register driver");
3478 		rc = -EBUSY;
3479 		goto error_free;
3480 	}
3481 
3482 	PMD_DRV_LOG(INFO,
3483 		DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
3484 		pci_dev->mem_resource[0].phys_addr,
3485 		pci_dev->mem_resource[0].addr);
3486 
3487 	rc = bnxt_hwrm_func_reset(bp);
3488 	if (rc) {
3489 		PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
3490 		rc = -EIO;
3491 		goto error_free;
3492 	}
3493 
3494 	if (BNXT_PF(bp)) {
3495 		//if (bp->pf.active_vfs) {
3496 			// TODO: Deallocate VF resources?
3497 		//}
3498 		if (bp->pdev->max_vfs) {
3499 			rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
3500 			if (rc) {
3501 				PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
3502 				goto error_free;
3503 			}
3504 		} else {
3505 			rc = bnxt_hwrm_allocate_pf_only(bp);
3506 			if (rc) {
3507 				PMD_DRV_LOG(ERR,
3508 					"Failed to allocate PF resources\n");
3509 				goto error_free;
3510 			}
3511 		}
3512 	}
3513 
3514 	bnxt_hwrm_port_led_qcaps(bp);
3515 
3516 	rc = bnxt_setup_int(bp);
3517 	if (rc)
3518 		goto error_free;
3519 
3520 	rc = bnxt_alloc_mem(bp);
3521 	if (rc)
3522 		goto error_free;
3523 
3524 	bnxt_init_nic(bp);
3525 
3526 	rc = bnxt_request_int(bp);
3527 	if (rc)
3528 		goto error_free;
3529 
3530 	return 0;
3531 
3532 error_free:
3533 	bnxt_dev_uninit(eth_dev);
3534 error:
3535 	return rc;
3536 }
3537 
3538 static int
3539 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
3540 {
3541 	struct bnxt *bp = eth_dev->data->dev_private;
3542 	int rc;
3543 
3544 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3545 		return -EPERM;
3546 
3547 	PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
3548 	bnxt_disable_int(bp);
3549 	bnxt_free_int(bp);
3550 	bnxt_free_mem(bp);
3551 
3552 	bnxt_hwrm_func_buf_unrgtr(bp);
3553 
3554 	if (bp->grp_info != NULL) {
3555 		rte_free(bp->grp_info);
3556 		bp->grp_info = NULL;
3557 	}
3558 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
3559 	bnxt_free_hwrm_resources(bp);
3560 
3561 	if (bp->tx_mem_zone) {
3562 		rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
3563 		bp->tx_mem_zone = NULL;
3564 	}
3565 
3566 	if (bp->rx_mem_zone) {
3567 		rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
3568 		bp->rx_mem_zone = NULL;
3569 	}
3570 
3571 	if (bp->dev_stopped == 0)
3572 		bnxt_dev_close_op(eth_dev);
3573 	if (bp->pf.vf_info)
3574 		rte_free(bp->pf.vf_info);
3575 	eth_dev->dev_ops = NULL;
3576 	eth_dev->rx_pkt_burst = NULL;
3577 	eth_dev->tx_pkt_burst = NULL;
3578 
3579 	return rc;
3580 }
3581 
3582 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3583 	struct rte_pci_device *pci_dev)
3584 {
3585 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
3586 		bnxt_dev_init);
3587 }
3588 
3589 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
3590 {
3591 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3592 		return rte_eth_dev_pci_generic_remove(pci_dev,
3593 				bnxt_dev_uninit);
3594 	else
3595 		return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
3596 }
3597 
3598 static struct rte_pci_driver bnxt_rte_pmd = {
3599 	.id_table = bnxt_pci_id_map,
3600 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
3601 		RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_IOVA_AS_VA,
3602 	.probe = bnxt_pci_probe,
3603 	.remove = bnxt_pci_remove,
3604 };
3605 
3606 static bool
3607 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
3608 {
3609 	if (strcmp(dev->device->driver->name, drv->driver.name))
3610 		return false;
3611 
3612 	return true;
3613 }
3614 
3615 bool is_bnxt_supported(struct rte_eth_dev *dev)
3616 {
3617 	return is_device_supported(dev, &bnxt_rte_pmd);
3618 }
3619 
3620 RTE_INIT(bnxt_init_log)
3621 {
3622 	bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver");
3623 	if (bnxt_logtype_driver >= 0)
3624 		rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO);
3625 }
3626 
3627 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
3628 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
3629 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
3630