xref: /f-stack/dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision 2bfe3f2e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <inttypes.h>
35 #include <stdbool.h>
36 
37 #include <rte_dev.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_pci.h>
40 #include <rte_malloc.h>
41 #include <rte_cycles.h>
42 
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_irq.h"
48 #include "bnxt_ring.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_stats.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56 #include "bnxt_nvm_defs.h"
57 
58 #define DRV_MODULE_NAME		"bnxt"
59 static const char bnxt_version[] =
60 	"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
61 
62 #define PCI_VENDOR_ID_BROADCOM 0x14E4
63 
64 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
65 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
66 #define BROADCOM_DEV_ID_57414_VF 0x16c1
67 #define BROADCOM_DEV_ID_57301 0x16c8
68 #define BROADCOM_DEV_ID_57302 0x16c9
69 #define BROADCOM_DEV_ID_57304_PF 0x16ca
70 #define BROADCOM_DEV_ID_57304_VF 0x16cb
71 #define BROADCOM_DEV_ID_57417_MF 0x16cc
72 #define BROADCOM_DEV_ID_NS2 0x16cd
73 #define BROADCOM_DEV_ID_57311 0x16ce
74 #define BROADCOM_DEV_ID_57312 0x16cf
75 #define BROADCOM_DEV_ID_57402 0x16d0
76 #define BROADCOM_DEV_ID_57404 0x16d1
77 #define BROADCOM_DEV_ID_57406_PF 0x16d2
78 #define BROADCOM_DEV_ID_57406_VF 0x16d3
79 #define BROADCOM_DEV_ID_57402_MF 0x16d4
80 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
81 #define BROADCOM_DEV_ID_57412 0x16d6
82 #define BROADCOM_DEV_ID_57414 0x16d7
83 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
84 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
85 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
86 #define BROADCOM_DEV_ID_57412_MF 0x16de
87 #define BROADCOM_DEV_ID_57314 0x16df
88 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
89 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
90 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
91 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
92 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
93 #define BROADCOM_DEV_ID_57404_MF 0x16e7
94 #define BROADCOM_DEV_ID_57406_MF 0x16e8
95 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
96 #define BROADCOM_DEV_ID_57407_MF 0x16ea
97 #define BROADCOM_DEV_ID_57414_MF 0x16ec
98 #define BROADCOM_DEV_ID_57416_MF 0x16ee
99 
100 static const struct rte_pci_id bnxt_pci_id_map[] = {
101 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
102 			 BROADCOM_DEV_ID_STRATUS_NIC_VF) },
103 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
104 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
105 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
106 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
107 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
108 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
109 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
110 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
111 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
112 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
113 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
114 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
115 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
116 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
117 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
118 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
119 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
120 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
121 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
122 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
123 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
124 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
129 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
130 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
137 	{ .vendor_id = 0, /* sentinel */ },
138 };
139 
140 #define BNXT_ETH_RSS_SUPPORT (	\
141 	ETH_RSS_IPV4 |		\
142 	ETH_RSS_NONFRAG_IPV4_TCP |	\
143 	ETH_RSS_NONFRAG_IPV4_UDP |	\
144 	ETH_RSS_IPV6 |		\
145 	ETH_RSS_NONFRAG_IPV6_TCP |	\
146 	ETH_RSS_NONFRAG_IPV6_UDP)
147 
148 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
149 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
150 
151 /***********************/
152 
153 /*
154  * High level utility functions
155  */
156 
157 static void bnxt_free_mem(struct bnxt *bp)
158 {
159 	bnxt_free_filter_mem(bp);
160 	bnxt_free_vnic_attributes(bp);
161 	bnxt_free_vnic_mem(bp);
162 
163 	bnxt_free_stats(bp);
164 	bnxt_free_tx_rings(bp);
165 	bnxt_free_rx_rings(bp);
166 	bnxt_free_def_cp_ring(bp);
167 }
168 
169 static int bnxt_alloc_mem(struct bnxt *bp)
170 {
171 	int rc;
172 
173 	/* Default completion ring */
174 	rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
175 	if (rc)
176 		goto alloc_mem_err;
177 
178 	rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
179 			      bp->def_cp_ring, "def_cp");
180 	if (rc)
181 		goto alloc_mem_err;
182 
183 	rc = bnxt_alloc_vnic_mem(bp);
184 	if (rc)
185 		goto alloc_mem_err;
186 
187 	rc = bnxt_alloc_vnic_attributes(bp);
188 	if (rc)
189 		goto alloc_mem_err;
190 
191 	rc = bnxt_alloc_filter_mem(bp);
192 	if (rc)
193 		goto alloc_mem_err;
194 
195 	return 0;
196 
197 alloc_mem_err:
198 	bnxt_free_mem(bp);
199 	return rc;
200 }
201 
202 static int bnxt_init_chip(struct bnxt *bp)
203 {
204 	unsigned int i, rss_idx, fw_idx;
205 	struct rte_eth_link new;
206 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
207 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
208 	uint32_t intr_vector = 0;
209 	uint32_t queue_id, base = BNXT_MISC_VEC_ID;
210 	uint32_t vec = BNXT_MISC_VEC_ID;
211 	int rc;
212 
213 	/* disable uio/vfio intr/eventfd mapping */
214 	rte_intr_disable(intr_handle);
215 
216 	if (bp->eth_dev->data->mtu > ETHER_MTU) {
217 		bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
218 		bp->flags |= BNXT_FLAG_JUMBO;
219 	} else {
220 		bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
221 		bp->flags &= ~BNXT_FLAG_JUMBO;
222 	}
223 
224 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
225 	if (rc) {
226 		RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
227 		goto err_out;
228 	}
229 
230 	rc = bnxt_alloc_hwrm_rings(bp);
231 	if (rc) {
232 		RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
233 		goto err_out;
234 	}
235 
236 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
237 	if (rc) {
238 		RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
239 		goto err_out;
240 	}
241 
242 	rc = bnxt_mq_rx_configure(bp);
243 	if (rc) {
244 		RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
245 		goto err_out;
246 	}
247 
248 	/* VNIC configuration */
249 	for (i = 0; i < bp->nr_vnics; i++) {
250 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
251 
252 		rc = bnxt_hwrm_vnic_alloc(bp, vnic);
253 		if (rc) {
254 			RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
255 				i, rc);
256 			goto err_out;
257 		}
258 
259 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
260 		if (rc) {
261 			RTE_LOG(ERR, PMD,
262 				"HWRM vnic %d ctx alloc failure rc: %x\n",
263 				i, rc);
264 			goto err_out;
265 		}
266 
267 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
268 		if (rc) {
269 			RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
270 				i, rc);
271 			goto err_out;
272 		}
273 
274 		rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
275 		if (rc) {
276 			RTE_LOG(ERR, PMD,
277 				"HWRM vnic %d filter failure rc: %x\n",
278 				i, rc);
279 			goto err_out;
280 		}
281 		if (vnic->rss_table && vnic->hash_type) {
282 			/*
283 			 * Fill the RSS hash & redirection table with
284 			 * ring group ids for all VNICs
285 			 */
286 			for (rss_idx = 0, fw_idx = 0;
287 			     rss_idx < HW_HASH_INDEX_SIZE;
288 			     rss_idx++, fw_idx++) {
289 				if (vnic->fw_grp_ids[fw_idx] ==
290 				    INVALID_HW_RING_ID)
291 					fw_idx = 0;
292 				vnic->rss_table[rss_idx] =
293 						vnic->fw_grp_ids[fw_idx];
294 			}
295 			rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
296 			if (rc) {
297 				RTE_LOG(ERR, PMD,
298 					"HWRM vnic %d set RSS failure rc: %x\n",
299 					i, rc);
300 				goto err_out;
301 			}
302 		}
303 
304 		bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
305 
306 		if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
307 			bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
308 		else
309 			bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
310 	}
311 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
312 	if (rc) {
313 		RTE_LOG(ERR, PMD,
314 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
315 		goto err_out;
316 	}
317 
318 	/* check and configure queue intr-vector mapping */
319 	if ((rte_intr_cap_multiple(intr_handle) ||
320 	     !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
321 	    bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
322 		intr_vector = bp->eth_dev->data->nb_rx_queues;
323 		RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__,
324 			intr_vector);
325 		if (intr_vector > bp->rx_cp_nr_rings) {
326 			RTE_LOG(ERR, PMD, "At most %d intr queues supported",
327 					bp->rx_cp_nr_rings);
328 			return -ENOTSUP;
329 		}
330 		if (rte_intr_efd_enable(intr_handle, intr_vector))
331 			return -1;
332 	}
333 
334 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
335 		intr_handle->intr_vec =
336 			rte_zmalloc("intr_vec",
337 				    bp->eth_dev->data->nb_rx_queues *
338 				    sizeof(int), 0);
339 		if (intr_handle->intr_vec == NULL) {
340 			RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues"
341 				" intr_vec", bp->eth_dev->data->nb_rx_queues);
342 			return -ENOMEM;
343 		}
344 		RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p "
345 			"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
346 			 __func__, intr_handle->intr_vec, intr_handle->nb_efd,
347 			intr_handle->max_intr);
348 	}
349 
350 	for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
351 	     queue_id++) {
352 		intr_handle->intr_vec[queue_id] = vec;
353 		if (vec < base + intr_handle->nb_efd - 1)
354 			vec++;
355 	}
356 
357 	/* enable uio/vfio intr/eventfd mapping */
358 	rte_intr_enable(intr_handle);
359 
360 	rc = bnxt_get_hwrm_link_config(bp, &new);
361 	if (rc) {
362 		RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
363 		goto err_out;
364 	}
365 
366 	if (!bp->link_info.link_up) {
367 		rc = bnxt_set_hwrm_link_config(bp, true);
368 		if (rc) {
369 			RTE_LOG(ERR, PMD,
370 				"HWRM link config failure rc: %x\n", rc);
371 			goto err_out;
372 		}
373 	}
374 	bnxt_print_link_info(bp->eth_dev);
375 
376 	return 0;
377 
378 err_out:
379 	bnxt_free_all_hwrm_resources(bp);
380 
381 	return rc;
382 }
383 
384 static int bnxt_shutdown_nic(struct bnxt *bp)
385 {
386 	bnxt_free_all_hwrm_resources(bp);
387 	bnxt_free_all_filters(bp);
388 	bnxt_free_all_vnics(bp);
389 	return 0;
390 }
391 
392 static int bnxt_init_nic(struct bnxt *bp)
393 {
394 	int rc;
395 
396 	rc = bnxt_init_ring_grps(bp);
397 	if (rc)
398 		return rc;
399 
400 	bnxt_init_vnics(bp);
401 	bnxt_init_filters(bp);
402 
403 	rc = bnxt_init_chip(bp);
404 	if (rc)
405 		return rc;
406 
407 	return 0;
408 }
409 
410 /*
411  * Device configuration and status function
412  */
413 
414 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
415 				  struct rte_eth_dev_info *dev_info)
416 {
417 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
418 	uint16_t max_vnics, i, j, vpool, vrxq;
419 	unsigned int max_rx_rings;
420 
421 	dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
422 
423 	/* MAC Specifics */
424 	dev_info->max_mac_addrs = bp->max_l2_ctx;
425 	dev_info->max_hash_mac_addrs = 0;
426 
427 	/* PF/VF specifics */
428 	if (BNXT_PF(bp))
429 		dev_info->max_vfs = bp->pdev->max_vfs;
430 	max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
431 						RTE_MIN(bp->max_rsscos_ctx,
432 						bp->max_stat_ctx)));
433 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
434 	dev_info->max_rx_queues = max_rx_rings;
435 	dev_info->max_tx_queues = max_rx_rings;
436 	dev_info->reta_size = bp->max_rsscos_ctx;
437 	dev_info->hash_key_size = 40;
438 	max_vnics = bp->max_vnics;
439 
440 	/* Fast path specifics */
441 	dev_info->min_rx_bufsize = 1;
442 	dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
443 				  + VLAN_TAG_SIZE;
444 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
445 					DEV_RX_OFFLOAD_IPV4_CKSUM |
446 					DEV_RX_OFFLOAD_UDP_CKSUM |
447 					DEV_RX_OFFLOAD_TCP_CKSUM |
448 					DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
449 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
450 					DEV_TX_OFFLOAD_IPV4_CKSUM |
451 					DEV_TX_OFFLOAD_TCP_CKSUM |
452 					DEV_TX_OFFLOAD_UDP_CKSUM |
453 					DEV_TX_OFFLOAD_TCP_TSO |
454 					DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
455 					DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
456 					DEV_TX_OFFLOAD_GRE_TNL_TSO |
457 					DEV_TX_OFFLOAD_IPIP_TNL_TSO |
458 					DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
459 
460 	/* *INDENT-OFF* */
461 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
462 		.rx_thresh = {
463 			.pthresh = 8,
464 			.hthresh = 8,
465 			.wthresh = 0,
466 		},
467 		.rx_free_thresh = 32,
468 		.rx_drop_en = 0,
469 	};
470 
471 	dev_info->default_txconf = (struct rte_eth_txconf) {
472 		.tx_thresh = {
473 			.pthresh = 32,
474 			.hthresh = 0,
475 			.wthresh = 0,
476 		},
477 		.tx_free_thresh = 32,
478 		.tx_rs_thresh = 32,
479 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
480 			     ETH_TXQ_FLAGS_NOOFFLOADS,
481 	};
482 	eth_dev->data->dev_conf.intr_conf.lsc = 1;
483 
484 	eth_dev->data->dev_conf.intr_conf.rxq = 1;
485 
486 	/* *INDENT-ON* */
487 
488 	/*
489 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
490 	 *       need further investigation.
491 	 */
492 
493 	/* VMDq resources */
494 	vpool = 64; /* ETH_64_POOLS */
495 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
496 	for (i = 0; i < 4; vpool >>= 1, i++) {
497 		if (max_vnics > vpool) {
498 			for (j = 0; j < 5; vrxq >>= 1, j++) {
499 				if (dev_info->max_rx_queues > vrxq) {
500 					if (vpool > vrxq)
501 						vpool = vrxq;
502 					goto found;
503 				}
504 			}
505 			/* Not enough resources to support VMDq */
506 			break;
507 		}
508 	}
509 	/* Not enough resources to support VMDq */
510 	vpool = 0;
511 	vrxq = 0;
512 found:
513 	dev_info->max_vmdq_pools = vpool;
514 	dev_info->vmdq_queue_num = vrxq;
515 
516 	dev_info->vmdq_pool_base = 0;
517 	dev_info->vmdq_queue_base = 0;
518 }
519 
520 /* Configure the device based on the configuration provided */
521 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
522 {
523 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
524 
525 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
526 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
527 
528 	/* Inherit new configurations */
529 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
530 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
531 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
532 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
533 
534 	if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
535 		eth_dev->data->mtu =
536 				eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
537 				ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
538 	return 0;
539 }
540 
541 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
542 {
543 	struct rte_eth_link *link = &eth_dev->data->dev_link;
544 
545 	if (link->link_status)
546 		RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
547 			eth_dev->data->port_id,
548 			(uint32_t)link->link_speed,
549 			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
550 			("full-duplex") : ("half-duplex\n"));
551 	else
552 		RTE_LOG(INFO, PMD, "Port %d Link Down\n",
553 			eth_dev->data->port_id);
554 }
555 
556 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
557 {
558 	bnxt_print_link_info(eth_dev);
559 	return 0;
560 }
561 
562 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
563 {
564 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
565 	int vlan_mask = 0;
566 	int rc;
567 
568 	if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
569 		RTE_LOG(ERR, PMD,
570 			"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
571 			bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
572 	}
573 	bp->dev_stopped = 0;
574 
575 	rc = bnxt_init_nic(bp);
576 	if (rc)
577 		goto error;
578 
579 	bnxt_link_update_op(eth_dev, 1);
580 
581 	if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
582 		vlan_mask |= ETH_VLAN_FILTER_MASK;
583 	if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
584 		vlan_mask |= ETH_VLAN_STRIP_MASK;
585 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
586 	if (rc)
587 		goto error;
588 
589 	return 0;
590 
591 error:
592 	bnxt_shutdown_nic(bp);
593 	bnxt_free_tx_mbufs(bp);
594 	bnxt_free_rx_mbufs(bp);
595 	return rc;
596 }
597 
598 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
599 {
600 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
601 	int rc = 0;
602 
603 	if (!bp->link_info.link_up)
604 		rc = bnxt_set_hwrm_link_config(bp, true);
605 	if (!rc)
606 		eth_dev->data->dev_link.link_status = 1;
607 
608 	bnxt_print_link_info(eth_dev);
609 	return 0;
610 }
611 
612 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
613 {
614 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
615 
616 	eth_dev->data->dev_link.link_status = 0;
617 	bnxt_set_hwrm_link_config(bp, false);
618 	bp->link_info.link_up = 0;
619 
620 	return 0;
621 }
622 
623 /* Unload the driver, release resources */
624 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
625 {
626 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
627 
628 	if (bp->eth_dev->data->dev_started) {
629 		/* TBD: STOP HW queues DMA */
630 		eth_dev->data->dev_link.link_status = 0;
631 	}
632 	bnxt_set_hwrm_link_config(bp, false);
633 	bnxt_hwrm_port_clr_stats(bp);
634 	bnxt_shutdown_nic(bp);
635 	bp->dev_stopped = 1;
636 }
637 
638 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
639 {
640 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
641 
642 	if (bp->dev_stopped == 0)
643 		bnxt_dev_stop_op(eth_dev);
644 
645 	bnxt_free_tx_mbufs(bp);
646 	bnxt_free_rx_mbufs(bp);
647 	bnxt_free_mem(bp);
648 	if (eth_dev->data->mac_addrs != NULL) {
649 		rte_free(eth_dev->data->mac_addrs);
650 		eth_dev->data->mac_addrs = NULL;
651 	}
652 	if (bp->grp_info != NULL) {
653 		rte_free(bp->grp_info);
654 		bp->grp_info = NULL;
655 	}
656 }
657 
658 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
659 				    uint32_t index)
660 {
661 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
662 	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
663 	struct bnxt_vnic_info *vnic;
664 	struct bnxt_filter_info *filter, *temp_filter;
665 	uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS);
666 	uint32_t i;
667 
668 	/*
669 	 * Loop through all VNICs from the specified filter flow pools to
670 	 * remove the corresponding MAC addr filter
671 	 */
672 	for (i = 0; i < pool; i++) {
673 		if (!(pool_mask & (1ULL << i)))
674 			continue;
675 
676 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
677 			filter = STAILQ_FIRST(&vnic->filter);
678 			while (filter) {
679 				temp_filter = STAILQ_NEXT(filter, next);
680 				if (filter->mac_index == index) {
681 					STAILQ_REMOVE(&vnic->filter, filter,
682 						      bnxt_filter_info, next);
683 					bnxt_hwrm_clear_l2_filter(bp, filter);
684 					filter->mac_index = INVALID_MAC_INDEX;
685 					memset(&filter->l2_addr, 0,
686 					       ETHER_ADDR_LEN);
687 					STAILQ_INSERT_TAIL(
688 							&bp->free_filter_list,
689 							filter, next);
690 				}
691 				filter = temp_filter;
692 			}
693 		}
694 	}
695 }
696 
697 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
698 				struct ether_addr *mac_addr,
699 				uint32_t index, uint32_t pool)
700 {
701 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
702 	struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
703 	struct bnxt_filter_info *filter;
704 
705 	if (BNXT_VF(bp)) {
706 		RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
707 		return -ENOTSUP;
708 	}
709 
710 	if (!vnic) {
711 		RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
712 		return -EINVAL;
713 	}
714 	/* Attach requested MAC address to the new l2_filter */
715 	STAILQ_FOREACH(filter, &vnic->filter, next) {
716 		if (filter->mac_index == index) {
717 			RTE_LOG(ERR, PMD,
718 				"MAC addr already existed for pool %d\n", pool);
719 			return 0;
720 		}
721 	}
722 	filter = bnxt_alloc_filter(bp);
723 	if (!filter) {
724 		RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
725 		return -ENODEV;
726 	}
727 	STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
728 	filter->mac_index = index;
729 	memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
730 	return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
731 }
732 
733 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
734 {
735 	int rc = 0;
736 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
737 	struct rte_eth_link new;
738 	unsigned int cnt = BNXT_LINK_WAIT_CNT;
739 
740 	memset(&new, 0, sizeof(new));
741 	do {
742 		/* Retrieve link info from hardware */
743 		rc = bnxt_get_hwrm_link_config(bp, &new);
744 		if (rc) {
745 			new.link_speed = ETH_LINK_SPEED_100M;
746 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
747 			RTE_LOG(ERR, PMD,
748 				"Failed to retrieve link rc = 0x%x!\n", rc);
749 			goto out;
750 		}
751 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
752 
753 		if (!wait_to_complete)
754 			break;
755 	} while (!new.link_status && cnt--);
756 
757 out:
758 	/* Timed out or success */
759 	if (new.link_status != eth_dev->data->dev_link.link_status ||
760 	new.link_speed != eth_dev->data->dev_link.link_speed) {
761 		memcpy(&eth_dev->data->dev_link, &new,
762 			sizeof(struct rte_eth_link));
763 		bnxt_print_link_info(eth_dev);
764 	}
765 
766 	return rc;
767 }
768 
769 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
770 {
771 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
772 	struct bnxt_vnic_info *vnic;
773 
774 	if (bp->vnic_info == NULL)
775 		return;
776 
777 	vnic = &bp->vnic_info[0];
778 
779 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
780 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
781 }
782 
783 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
784 {
785 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
786 	struct bnxt_vnic_info *vnic;
787 
788 	if (bp->vnic_info == NULL)
789 		return;
790 
791 	vnic = &bp->vnic_info[0];
792 
793 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
794 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
795 }
796 
797 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
798 {
799 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
800 	struct bnxt_vnic_info *vnic;
801 
802 	if (bp->vnic_info == NULL)
803 		return;
804 
805 	vnic = &bp->vnic_info[0];
806 
807 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
808 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
809 }
810 
811 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
812 {
813 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
814 	struct bnxt_vnic_info *vnic;
815 
816 	if (bp->vnic_info == NULL)
817 		return;
818 
819 	vnic = &bp->vnic_info[0];
820 
821 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
822 	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
823 }
824 
825 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
826 			    struct rte_eth_rss_reta_entry64 *reta_conf,
827 			    uint16_t reta_size)
828 {
829 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
830 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
831 	struct bnxt_vnic_info *vnic;
832 	int i;
833 
834 	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
835 		return -EINVAL;
836 
837 	if (reta_size != HW_HASH_INDEX_SIZE) {
838 		RTE_LOG(ERR, PMD, "The configured hash table lookup size "
839 			"(%d) must equal the size supported by the hardware "
840 			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
841 		return -EINVAL;
842 	}
843 	/* Update the RSS VNIC(s) */
844 	for (i = 0; i < MAX_FF_POOLS; i++) {
845 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
846 			memcpy(vnic->rss_table, reta_conf, reta_size);
847 
848 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
849 		}
850 	}
851 	return 0;
852 }
853 
854 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
855 			      struct rte_eth_rss_reta_entry64 *reta_conf,
856 			      uint16_t reta_size)
857 {
858 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
859 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
860 	struct rte_intr_handle *intr_handle
861 		= &bp->pdev->intr_handle;
862 
863 	/* Retrieve from the default VNIC */
864 	if (!vnic)
865 		return -EINVAL;
866 	if (!vnic->rss_table)
867 		return -EINVAL;
868 
869 	if (reta_size != HW_HASH_INDEX_SIZE) {
870 		RTE_LOG(ERR, PMD, "The configured hash table lookup size "
871 			"(%d) must equal the size supported by the hardware "
872 			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
873 		return -EINVAL;
874 	}
875 	/* EW - need to revisit here copying from u64 to u16 */
876 	memcpy(reta_conf, vnic->rss_table, reta_size);
877 
878 	if (rte_intr_allow_others(intr_handle)) {
879 		if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
880 			bnxt_dev_lsc_intr_setup(eth_dev);
881 	}
882 
883 	return 0;
884 }
885 
886 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
887 				   struct rte_eth_rss_conf *rss_conf)
888 {
889 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
890 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
891 	struct bnxt_vnic_info *vnic;
892 	uint16_t hash_type = 0;
893 	int i;
894 
895 	/*
896 	 * If RSS enablement were different than dev_configure,
897 	 * then return -EINVAL
898 	 */
899 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
900 		if (!rss_conf->rss_hf)
901 			RTE_LOG(ERR, PMD, "Hash type NONE\n");
902 	} else {
903 		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
904 			return -EINVAL;
905 	}
906 
907 	bp->flags |= BNXT_FLAG_UPDATE_HASH;
908 	memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
909 
910 	if (rss_conf->rss_hf & ETH_RSS_IPV4)
911 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
912 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
913 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
914 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
915 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
916 	if (rss_conf->rss_hf & ETH_RSS_IPV6)
917 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
918 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
919 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
920 	if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
921 		hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
922 
923 	/* Update the RSS VNIC(s) */
924 	for (i = 0; i < MAX_FF_POOLS; i++) {
925 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
926 			vnic->hash_type = hash_type;
927 
928 			/*
929 			 * Use the supplied key if the key length is
930 			 * acceptable and the rss_key is not NULL
931 			 */
932 			if (rss_conf->rss_key &&
933 			    rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
934 				memcpy(vnic->rss_hash_key, rss_conf->rss_key,
935 				       rss_conf->rss_key_len);
936 
937 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
938 		}
939 	}
940 	return 0;
941 }
942 
943 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
944 				     struct rte_eth_rss_conf *rss_conf)
945 {
946 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
947 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
948 	int len;
949 	uint32_t hash_types;
950 
951 	/* RSS configuration is the same for all VNICs */
952 	if (vnic && vnic->rss_hash_key) {
953 		if (rss_conf->rss_key) {
954 			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
955 			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
956 			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
957 		}
958 
959 		hash_types = vnic->hash_type;
960 		rss_conf->rss_hf = 0;
961 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
962 			rss_conf->rss_hf |= ETH_RSS_IPV4;
963 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
964 		}
965 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
966 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
967 			hash_types &=
968 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
969 		}
970 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
971 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
972 			hash_types &=
973 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
974 		}
975 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
976 			rss_conf->rss_hf |= ETH_RSS_IPV6;
977 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
978 		}
979 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
980 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
981 			hash_types &=
982 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
983 		}
984 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
985 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
986 			hash_types &=
987 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
988 		}
989 		if (hash_types) {
990 			RTE_LOG(ERR, PMD,
991 				"Unknwon RSS config from firmware (%08x), RSS disabled",
992 				vnic->hash_type);
993 			return -ENOTSUP;
994 		}
995 	} else {
996 		rss_conf->rss_hf = 0;
997 	}
998 	return 0;
999 }
1000 
1001 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1002 			       struct rte_eth_fc_conf *fc_conf)
1003 {
1004 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1005 	struct rte_eth_link link_info;
1006 	int rc;
1007 
1008 	rc = bnxt_get_hwrm_link_config(bp, &link_info);
1009 	if (rc)
1010 		return rc;
1011 
1012 	memset(fc_conf, 0, sizeof(*fc_conf));
1013 	if (bp->link_info.auto_pause)
1014 		fc_conf->autoneg = 1;
1015 	switch (bp->link_info.pause) {
1016 	case 0:
1017 		fc_conf->mode = RTE_FC_NONE;
1018 		break;
1019 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1020 		fc_conf->mode = RTE_FC_TX_PAUSE;
1021 		break;
1022 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1023 		fc_conf->mode = RTE_FC_RX_PAUSE;
1024 		break;
1025 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1026 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1027 		fc_conf->mode = RTE_FC_FULL;
1028 		break;
1029 	}
1030 	return 0;
1031 }
1032 
1033 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1034 			       struct rte_eth_fc_conf *fc_conf)
1035 {
1036 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1037 
1038 	if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
1039 		RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
1040 		return -ENOTSUP;
1041 	}
1042 
1043 	switch (fc_conf->mode) {
1044 	case RTE_FC_NONE:
1045 		bp->link_info.auto_pause = 0;
1046 		bp->link_info.force_pause = 0;
1047 		break;
1048 	case RTE_FC_RX_PAUSE:
1049 		if (fc_conf->autoneg) {
1050 			bp->link_info.auto_pause =
1051 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1052 			bp->link_info.force_pause = 0;
1053 		} else {
1054 			bp->link_info.auto_pause = 0;
1055 			bp->link_info.force_pause =
1056 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1057 		}
1058 		break;
1059 	case RTE_FC_TX_PAUSE:
1060 		if (fc_conf->autoneg) {
1061 			bp->link_info.auto_pause =
1062 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1063 			bp->link_info.force_pause = 0;
1064 		} else {
1065 			bp->link_info.auto_pause = 0;
1066 			bp->link_info.force_pause =
1067 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1068 		}
1069 		break;
1070 	case RTE_FC_FULL:
1071 		if (fc_conf->autoneg) {
1072 			bp->link_info.auto_pause =
1073 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1074 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1075 			bp->link_info.force_pause = 0;
1076 		} else {
1077 			bp->link_info.auto_pause = 0;
1078 			bp->link_info.force_pause =
1079 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1080 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1081 		}
1082 		break;
1083 	}
1084 	return bnxt_set_hwrm_link_config(bp, true);
1085 }
1086 
1087 /* Add UDP tunneling port */
1088 static int
1089 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1090 			 struct rte_eth_udp_tunnel *udp_tunnel)
1091 {
1092 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1093 	uint16_t tunnel_type = 0;
1094 	int rc = 0;
1095 
1096 	switch (udp_tunnel->prot_type) {
1097 	case RTE_TUNNEL_TYPE_VXLAN:
1098 		if (bp->vxlan_port_cnt) {
1099 			RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
1100 				udp_tunnel->udp_port);
1101 			if (bp->vxlan_port != udp_tunnel->udp_port) {
1102 				RTE_LOG(ERR, PMD, "Only one port allowed\n");
1103 				return -ENOSPC;
1104 			}
1105 			bp->vxlan_port_cnt++;
1106 			return 0;
1107 		}
1108 		tunnel_type =
1109 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1110 		bp->vxlan_port_cnt++;
1111 		break;
1112 	case RTE_TUNNEL_TYPE_GENEVE:
1113 		if (bp->geneve_port_cnt) {
1114 			RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
1115 				udp_tunnel->udp_port);
1116 			if (bp->geneve_port != udp_tunnel->udp_port) {
1117 				RTE_LOG(ERR, PMD, "Only one port allowed\n");
1118 				return -ENOSPC;
1119 			}
1120 			bp->geneve_port_cnt++;
1121 			return 0;
1122 		}
1123 		tunnel_type =
1124 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1125 		bp->geneve_port_cnt++;
1126 		break;
1127 	default:
1128 		RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
1129 		return -ENOTSUP;
1130 	}
1131 	rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1132 					     tunnel_type);
1133 	return rc;
1134 }
1135 
1136 static int
1137 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1138 			 struct rte_eth_udp_tunnel *udp_tunnel)
1139 {
1140 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1141 	uint16_t tunnel_type = 0;
1142 	uint16_t port = 0;
1143 	int rc = 0;
1144 
1145 	switch (udp_tunnel->prot_type) {
1146 	case RTE_TUNNEL_TYPE_VXLAN:
1147 		if (!bp->vxlan_port_cnt) {
1148 			RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
1149 			return -EINVAL;
1150 		}
1151 		if (bp->vxlan_port != udp_tunnel->udp_port) {
1152 			RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
1153 				udp_tunnel->udp_port, bp->vxlan_port);
1154 			return -EINVAL;
1155 		}
1156 		if (--bp->vxlan_port_cnt)
1157 			return 0;
1158 
1159 		tunnel_type =
1160 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1161 		port = bp->vxlan_fw_dst_port_id;
1162 		break;
1163 	case RTE_TUNNEL_TYPE_GENEVE:
1164 		if (!bp->geneve_port_cnt) {
1165 			RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
1166 			return -EINVAL;
1167 		}
1168 		if (bp->geneve_port != udp_tunnel->udp_port) {
1169 			RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
1170 				udp_tunnel->udp_port, bp->geneve_port);
1171 			return -EINVAL;
1172 		}
1173 		if (--bp->geneve_port_cnt)
1174 			return 0;
1175 
1176 		tunnel_type =
1177 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1178 		port = bp->geneve_fw_dst_port_id;
1179 		break;
1180 	default:
1181 		RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
1182 		return -ENOTSUP;
1183 	}
1184 
1185 	rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1186 	if (!rc) {
1187 		if (tunnel_type ==
1188 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1189 			bp->vxlan_port = 0;
1190 		if (tunnel_type ==
1191 		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1192 			bp->geneve_port = 0;
1193 	}
1194 	return rc;
1195 }
1196 
1197 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1198 {
1199 	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1200 	struct bnxt_vnic_info *vnic;
1201 	unsigned int i;
1202 	int rc = 0;
1203 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1204 
1205 	/* Cycle through all VNICs */
1206 	for (i = 0; i < bp->nr_vnics; i++) {
1207 		/*
1208 		 * For each VNIC and each associated filter(s)
1209 		 * if VLAN exists && VLAN matches vlan_id
1210 		 *      remove the MAC+VLAN filter
1211 		 *      add a new MAC only filter
1212 		 * else
1213 		 *      VLAN filter doesn't exist, just skip and continue
1214 		 */
1215 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1216 			filter = STAILQ_FIRST(&vnic->filter);
1217 			while (filter) {
1218 				temp_filter = STAILQ_NEXT(filter, next);
1219 
1220 				if (filter->enables & chk &&
1221 				    filter->l2_ovlan == vlan_id) {
1222 					/* Must delete the filter */
1223 					STAILQ_REMOVE(&vnic->filter, filter,
1224 						      bnxt_filter_info, next);
1225 					bnxt_hwrm_clear_l2_filter(bp, filter);
1226 					STAILQ_INSERT_TAIL(
1227 							&bp->free_filter_list,
1228 							filter, next);
1229 
1230 					/*
1231 					 * Need to examine to see if the MAC
1232 					 * filter already existed or not before
1233 					 * allocating a new one
1234 					 */
1235 
1236 					new_filter = bnxt_alloc_filter(bp);
1237 					if (!new_filter) {
1238 						RTE_LOG(ERR, PMD,
1239 							"MAC/VLAN filter alloc failed\n");
1240 						rc = -ENOMEM;
1241 						goto exit;
1242 					}
1243 					STAILQ_INSERT_TAIL(&vnic->filter,
1244 							   new_filter, next);
1245 					/* Inherit MAC from previous filter */
1246 					new_filter->mac_index =
1247 							filter->mac_index;
1248 					memcpy(new_filter->l2_addr,
1249 					       filter->l2_addr, ETHER_ADDR_LEN);
1250 					/* MAC only filter */
1251 					rc = bnxt_hwrm_set_l2_filter(bp,
1252 							vnic->fw_vnic_id,
1253 							new_filter);
1254 					if (rc)
1255 						goto exit;
1256 					RTE_LOG(INFO, PMD,
1257 						"Del Vlan filter for %d\n",
1258 						vlan_id);
1259 				}
1260 				filter = temp_filter;
1261 			}
1262 		}
1263 	}
1264 exit:
1265 	return rc;
1266 }
1267 
1268 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1269 {
1270 	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1271 	struct bnxt_vnic_info *vnic;
1272 	unsigned int i;
1273 	int rc = 0;
1274 	uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
1275 		HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
1276 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1277 
1278 	/* Cycle through all VNICs */
1279 	for (i = 0; i < bp->nr_vnics; i++) {
1280 		/*
1281 		 * For each VNIC and each associated filter(s)
1282 		 * if VLAN exists:
1283 		 *   if VLAN matches vlan_id
1284 		 *      VLAN filter already exists, just skip and continue
1285 		 *   else
1286 		 *      add a new MAC+VLAN filter
1287 		 * else
1288 		 *   Remove the old MAC only filter
1289 		 *    Add a new MAC+VLAN filter
1290 		 */
1291 		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1292 			filter = STAILQ_FIRST(&vnic->filter);
1293 			while (filter) {
1294 				temp_filter = STAILQ_NEXT(filter, next);
1295 
1296 				if (filter->enables & chk) {
1297 					if (filter->l2_ovlan == vlan_id)
1298 						goto cont;
1299 				} else {
1300 					/* Must delete the MAC filter */
1301 					STAILQ_REMOVE(&vnic->filter, filter,
1302 						      bnxt_filter_info, next);
1303 					bnxt_hwrm_clear_l2_filter(bp, filter);
1304 					filter->l2_ovlan = 0;
1305 					STAILQ_INSERT_TAIL(
1306 							&bp->free_filter_list,
1307 							filter, next);
1308 				}
1309 				new_filter = bnxt_alloc_filter(bp);
1310 				if (!new_filter) {
1311 					RTE_LOG(ERR, PMD,
1312 						"MAC/VLAN filter alloc failed\n");
1313 					rc = -ENOMEM;
1314 					goto exit;
1315 				}
1316 				STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
1317 						   next);
1318 				/* Inherit MAC from the previous filter */
1319 				new_filter->mac_index = filter->mac_index;
1320 				memcpy(new_filter->l2_addr, filter->l2_addr,
1321 				       ETHER_ADDR_LEN);
1322 				/* MAC + VLAN ID filter */
1323 				new_filter->l2_ovlan = vlan_id;
1324 				new_filter->l2_ovlan_mask = 0xF000;
1325 				new_filter->enables |= en;
1326 				rc = bnxt_hwrm_set_l2_filter(bp,
1327 							     vnic->fw_vnic_id,
1328 							     new_filter);
1329 				if (rc)
1330 					goto exit;
1331 				RTE_LOG(INFO, PMD,
1332 					"Added Vlan filter for %d\n", vlan_id);
1333 cont:
1334 				filter = temp_filter;
1335 			}
1336 		}
1337 	}
1338 exit:
1339 	return rc;
1340 }
1341 
1342 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1343 				   uint16_t vlan_id, int on)
1344 {
1345 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1346 
1347 	/* These operations apply to ALL existing MAC/VLAN filters */
1348 	if (on)
1349 		return bnxt_add_vlan_filter(bp, vlan_id);
1350 	else
1351 		return bnxt_del_vlan_filter(bp, vlan_id);
1352 }
1353 
1354 static int
1355 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1356 {
1357 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1358 	unsigned int i;
1359 
1360 	if (mask & ETH_VLAN_FILTER_MASK) {
1361 		if (!dev->data->dev_conf.rxmode.hw_vlan_filter) {
1362 			/* Remove any VLAN filters programmed */
1363 			for (i = 0; i < 4095; i++)
1364 				bnxt_del_vlan_filter(bp, i);
1365 		}
1366 		RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
1367 			dev->data->dev_conf.rxmode.hw_vlan_filter);
1368 	}
1369 
1370 	if (mask & ETH_VLAN_STRIP_MASK) {
1371 		/* Enable or disable VLAN stripping */
1372 		for (i = 0; i < bp->nr_vnics; i++) {
1373 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1374 			if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1375 				vnic->vlan_strip = true;
1376 			else
1377 				vnic->vlan_strip = false;
1378 			bnxt_hwrm_vnic_cfg(bp, vnic);
1379 		}
1380 		RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
1381 			dev->data->dev_conf.rxmode.hw_vlan_strip);
1382 	}
1383 
1384 	if (mask & ETH_VLAN_EXTEND_MASK)
1385 		RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
1386 
1387 	return 0;
1388 }
1389 
1390 static void
1391 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
1392 {
1393 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1394 	/* Default Filter is tied to VNIC 0 */
1395 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1396 	struct bnxt_filter_info *filter;
1397 	int rc;
1398 
1399 	if (BNXT_VF(bp))
1400 		return;
1401 
1402 	memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
1403 	memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
1404 
1405 	STAILQ_FOREACH(filter, &vnic->filter, next) {
1406 		/* Default Filter is at Index 0 */
1407 		if (filter->mac_index != 0)
1408 			continue;
1409 		rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1410 		if (rc)
1411 			break;
1412 		memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
1413 		memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
1414 		filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
1415 		filter->enables |=
1416 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
1417 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
1418 		rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1419 		if (rc)
1420 			break;
1421 		filter->mac_index = 0;
1422 		RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
1423 	}
1424 }
1425 
1426 static int
1427 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
1428 			  struct ether_addr *mc_addr_set,
1429 			  uint32_t nb_mc_addr)
1430 {
1431 	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1432 	char *mc_addr_list = (char *)mc_addr_set;
1433 	struct bnxt_vnic_info *vnic;
1434 	uint32_t off = 0, i = 0;
1435 
1436 	vnic = &bp->vnic_info[0];
1437 
1438 	if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
1439 		vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1440 		goto allmulti;
1441 	}
1442 
1443 	/* TODO Check for Duplicate mcast addresses */
1444 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1445 	for (i = 0; i < nb_mc_addr; i++) {
1446 		memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
1447 		off += ETHER_ADDR_LEN;
1448 	}
1449 
1450 	vnic->mc_addr_cnt = i;
1451 
1452 allmulti:
1453 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1454 }
1455 
1456 static int
1457 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1458 {
1459 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1460 	uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
1461 	uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
1462 	uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
1463 	int ret;
1464 
1465 	ret = snprintf(fw_version, fw_size, "%d.%d.%d",
1466 			fw_major, fw_minor, fw_updt);
1467 
1468 	ret += 1; /* add the size of '\0' */
1469 	if (fw_size < (uint32_t)ret)
1470 		return ret;
1471 	else
1472 		return 0;
1473 }
1474 
1475 static void
1476 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1477 	struct rte_eth_rxq_info *qinfo)
1478 {
1479 	struct bnxt_rx_queue *rxq;
1480 
1481 	rxq = dev->data->rx_queues[queue_id];
1482 
1483 	qinfo->mp = rxq->mb_pool;
1484 	qinfo->scattered_rx = dev->data->scattered_rx;
1485 	qinfo->nb_desc = rxq->nb_rx_desc;
1486 
1487 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1488 	qinfo->conf.rx_drop_en = 0;
1489 	qinfo->conf.rx_deferred_start = 0;
1490 }
1491 
1492 static void
1493 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1494 	struct rte_eth_txq_info *qinfo)
1495 {
1496 	struct bnxt_tx_queue *txq;
1497 
1498 	txq = dev->data->tx_queues[queue_id];
1499 
1500 	qinfo->nb_desc = txq->nb_tx_desc;
1501 
1502 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1503 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1504 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1505 
1506 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1507 	qinfo->conf.tx_rs_thresh = 0;
1508 	qinfo->conf.txq_flags = txq->txq_flags;
1509 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1510 }
1511 
1512 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
1513 {
1514 	struct bnxt *bp = eth_dev->data->dev_private;
1515 	struct rte_eth_dev_info dev_info;
1516 	uint32_t max_dev_mtu;
1517 	uint32_t rc = 0;
1518 	uint32_t i;
1519 
1520 	bnxt_dev_info_get_op(eth_dev, &dev_info);
1521 	max_dev_mtu = dev_info.max_rx_pktlen -
1522 		      ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
1523 
1524 	if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
1525 		RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
1526 			ETHER_MIN_MTU, max_dev_mtu);
1527 		return -EINVAL;
1528 	}
1529 
1530 
1531 	if (new_mtu > ETHER_MTU) {
1532 		bp->flags |= BNXT_FLAG_JUMBO;
1533 		eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
1534 	} else {
1535 		eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
1536 		bp->flags &= ~BNXT_FLAG_JUMBO;
1537 	}
1538 
1539 	eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
1540 		new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1541 
1542 	eth_dev->data->mtu = new_mtu;
1543 	RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
1544 
1545 	for (i = 0; i < bp->nr_vnics; i++) {
1546 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1547 
1548 		vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1549 					ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1550 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
1551 		if (rc)
1552 			break;
1553 
1554 		rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
1555 		if (rc)
1556 			return rc;
1557 	}
1558 
1559 	return rc;
1560 }
1561 
1562 static int
1563 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
1564 {
1565 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1566 	uint16_t vlan = bp->vlan;
1567 	int rc;
1568 
1569 	if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
1570 		RTE_LOG(ERR, PMD,
1571 			"PVID cannot be modified for this function\n");
1572 		return -ENOTSUP;
1573 	}
1574 	bp->vlan = on ? pvid : 0;
1575 
1576 	rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
1577 	if (rc)
1578 		bp->vlan = vlan;
1579 	return rc;
1580 }
1581 
1582 static int
1583 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
1584 {
1585 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1586 
1587 	return bnxt_hwrm_port_led_cfg(bp, true);
1588 }
1589 
1590 static int
1591 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
1592 {
1593 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1594 
1595 	return bnxt_hwrm_port_led_cfg(bp, false);
1596 }
1597 
1598 static uint32_t
1599 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1600 {
1601 	uint32_t desc = 0, raw_cons = 0, cons;
1602 	struct bnxt_cp_ring_info *cpr;
1603 	struct bnxt_rx_queue *rxq;
1604 	struct rx_pkt_cmpl *rxcmp;
1605 	uint16_t cmp_type;
1606 	uint8_t cmp = 1;
1607 	bool valid;
1608 
1609 	rxq = dev->data->rx_queues[rx_queue_id];
1610 	cpr = rxq->cp_ring;
1611 	valid = cpr->valid;
1612 
1613 	while (raw_cons < rxq->nb_rx_desc) {
1614 		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1615 		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1616 
1617 		if (!CMPL_VALID(rxcmp, valid))
1618 			goto nothing_to_do;
1619 		valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
1620 		cmp_type = CMP_TYPE(rxcmp);
1621 		if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
1622 			cmp = (rte_le_to_cpu_32(
1623 					((struct rx_tpa_end_cmpl *)
1624 					 (rxcmp))->agg_bufs_v1) &
1625 			       RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
1626 				RX_TPA_END_CMPL_AGG_BUFS_SFT;
1627 			desc++;
1628 		} else if (cmp_type == 0x11) {
1629 			desc++;
1630 			cmp = (rxcmp->agg_bufs_v1 &
1631 				   RX_PKT_CMPL_AGG_BUFS_MASK) >>
1632 				RX_PKT_CMPL_AGG_BUFS_SFT;
1633 		} else {
1634 			cmp = 1;
1635 		}
1636 nothing_to_do:
1637 		raw_cons += cmp ? cmp : 2;
1638 	}
1639 
1640 	return desc;
1641 }
1642 
1643 static int
1644 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
1645 {
1646 	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
1647 	struct bnxt_rx_ring_info *rxr;
1648 	struct bnxt_cp_ring_info *cpr;
1649 	struct bnxt_sw_rx_bd *rx_buf;
1650 	struct rx_pkt_cmpl *rxcmp;
1651 	uint32_t cons, cp_cons;
1652 
1653 	if (!rxq)
1654 		return -EINVAL;
1655 
1656 	cpr = rxq->cp_ring;
1657 	rxr = rxq->rx_ring;
1658 
1659 	if (offset >= rxq->nb_rx_desc)
1660 		return -EINVAL;
1661 
1662 	cons = RING_CMP(cpr->cp_ring_struct, offset);
1663 	cp_cons = cpr->cp_raw_cons;
1664 	rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1665 
1666 	if (cons > cp_cons) {
1667 		if (CMPL_VALID(rxcmp, cpr->valid))
1668 			return RTE_ETH_RX_DESC_DONE;
1669 	} else {
1670 		if (CMPL_VALID(rxcmp, !cpr->valid))
1671 			return RTE_ETH_RX_DESC_DONE;
1672 	}
1673 	rx_buf = &rxr->rx_buf_ring[cons];
1674 	if (rx_buf->mbuf == NULL)
1675 		return RTE_ETH_RX_DESC_UNAVAIL;
1676 
1677 
1678 	return RTE_ETH_RX_DESC_AVAIL;
1679 }
1680 
1681 static int
1682 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
1683 {
1684 	struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
1685 	struct bnxt_tx_ring_info *txr;
1686 	struct bnxt_cp_ring_info *cpr;
1687 	struct bnxt_sw_tx_bd *tx_buf;
1688 	struct tx_pkt_cmpl *txcmp;
1689 	uint32_t cons, cp_cons;
1690 
1691 	if (!txq)
1692 		return -EINVAL;
1693 
1694 	cpr = txq->cp_ring;
1695 	txr = txq->tx_ring;
1696 
1697 	if (offset >= txq->nb_tx_desc)
1698 		return -EINVAL;
1699 
1700 	cons = RING_CMP(cpr->cp_ring_struct, offset);
1701 	txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1702 	cp_cons = cpr->cp_raw_cons;
1703 
1704 	if (cons > cp_cons) {
1705 		if (CMPL_VALID(txcmp, cpr->valid))
1706 			return RTE_ETH_TX_DESC_UNAVAIL;
1707 	} else {
1708 		if (CMPL_VALID(txcmp, !cpr->valid))
1709 			return RTE_ETH_TX_DESC_UNAVAIL;
1710 	}
1711 	tx_buf = &txr->tx_buf_ring[cons];
1712 	if (tx_buf->mbuf == NULL)
1713 		return RTE_ETH_TX_DESC_DONE;
1714 
1715 	return RTE_ETH_TX_DESC_FULL;
1716 }
1717 
1718 static struct bnxt_filter_info *
1719 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
1720 				struct rte_eth_ethertype_filter *efilter,
1721 				struct bnxt_vnic_info *vnic0,
1722 				struct bnxt_vnic_info *vnic,
1723 				int *ret)
1724 {
1725 	struct bnxt_filter_info *mfilter = NULL;
1726 	int match = 0;
1727 	*ret = 0;
1728 
1729 	if (efilter->ether_type == ETHER_TYPE_IPv4 ||
1730 		efilter->ether_type == ETHER_TYPE_IPv6) {
1731 		RTE_LOG(ERR, PMD, "invalid ether_type(0x%04x) in"
1732 			" ethertype filter.", efilter->ether_type);
1733 		*ret = -EINVAL;
1734 		goto exit;
1735 	}
1736 	if (efilter->queue >= bp->rx_nr_rings) {
1737 		RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
1738 		*ret = -EINVAL;
1739 		goto exit;
1740 	}
1741 
1742 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1743 	vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1744 	if (vnic == NULL) {
1745 		RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
1746 		*ret = -EINVAL;
1747 		goto exit;
1748 	}
1749 
1750 	if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1751 		STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
1752 			if ((!memcmp(efilter->mac_addr.addr_bytes,
1753 				     mfilter->l2_addr, ETHER_ADDR_LEN) &&
1754 			     mfilter->flags ==
1755 			     HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
1756 			     mfilter->ethertype == efilter->ether_type)) {
1757 				match = 1;
1758 				break;
1759 			}
1760 		}
1761 	} else {
1762 		STAILQ_FOREACH(mfilter, &vnic->filter, next)
1763 			if ((!memcmp(efilter->mac_addr.addr_bytes,
1764 				     mfilter->l2_addr, ETHER_ADDR_LEN) &&
1765 			     mfilter->ethertype == efilter->ether_type &&
1766 			     mfilter->flags ==
1767 			     HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
1768 				match = 1;
1769 				break;
1770 			}
1771 	}
1772 
1773 	if (match)
1774 		*ret = -EEXIST;
1775 
1776 exit:
1777 	return mfilter;
1778 }
1779 
1780 static int
1781 bnxt_ethertype_filter(struct rte_eth_dev *dev,
1782 			enum rte_filter_op filter_op,
1783 			void *arg)
1784 {
1785 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1786 	struct rte_eth_ethertype_filter *efilter =
1787 			(struct rte_eth_ethertype_filter *)arg;
1788 	struct bnxt_filter_info *bfilter, *filter1;
1789 	struct bnxt_vnic_info *vnic, *vnic0;
1790 	int ret;
1791 
1792 	if (filter_op == RTE_ETH_FILTER_NOP)
1793 		return 0;
1794 
1795 	if (arg == NULL) {
1796 		RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
1797 			    filter_op);
1798 		return -EINVAL;
1799 	}
1800 
1801 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1802 	vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1803 
1804 	switch (filter_op) {
1805 	case RTE_ETH_FILTER_ADD:
1806 		bnxt_match_and_validate_ether_filter(bp, efilter,
1807 							vnic0, vnic, &ret);
1808 		if (ret < 0)
1809 			return ret;
1810 
1811 		bfilter = bnxt_get_unused_filter(bp);
1812 		if (bfilter == NULL) {
1813 			RTE_LOG(ERR, PMD,
1814 				"Not enough resources for a new filter.\n");
1815 			return -ENOMEM;
1816 		}
1817 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
1818 		memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
1819 		       ETHER_ADDR_LEN);
1820 		memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
1821 		       ETHER_ADDR_LEN);
1822 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
1823 		bfilter->ethertype = efilter->ether_type;
1824 		bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
1825 
1826 		filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
1827 		if (filter1 == NULL) {
1828 			ret = -1;
1829 			goto cleanup;
1830 		}
1831 		bfilter->enables |=
1832 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1833 		bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1834 
1835 		bfilter->dst_id = vnic->fw_vnic_id;
1836 
1837 		if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1838 			bfilter->flags =
1839 				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1840 		}
1841 
1842 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
1843 		if (ret)
1844 			goto cleanup;
1845 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
1846 		break;
1847 	case RTE_ETH_FILTER_DELETE:
1848 		filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
1849 							vnic0, vnic, &ret);
1850 		if (ret == -EEXIST) {
1851 			ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
1852 
1853 			STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
1854 				      next);
1855 			bnxt_free_filter(bp, filter1);
1856 		} else if (ret == 0) {
1857 			RTE_LOG(ERR, PMD, "No matching filter found\n");
1858 		}
1859 		break;
1860 	default:
1861 		RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
1862 		ret = -EINVAL;
1863 		goto error;
1864 	}
1865 	return ret;
1866 cleanup:
1867 	bnxt_free_filter(bp, bfilter);
1868 error:
1869 	return ret;
1870 }
1871 
1872 static inline int
1873 parse_ntuple_filter(struct bnxt *bp,
1874 		    struct rte_eth_ntuple_filter *nfilter,
1875 		    struct bnxt_filter_info *bfilter)
1876 {
1877 	uint32_t en = 0;
1878 
1879 	if (nfilter->queue >= bp->rx_nr_rings) {
1880 		RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
1881 		return -EINVAL;
1882 	}
1883 
1884 	switch (nfilter->dst_port_mask) {
1885 	case UINT16_MAX:
1886 		bfilter->dst_port_mask = -1;
1887 		bfilter->dst_port = nfilter->dst_port;
1888 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
1889 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
1890 		break;
1891 	default:
1892 		RTE_LOG(ERR, PMD, "invalid dst_port mask.");
1893 		return -EINVAL;
1894 	}
1895 
1896 	bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
1897 	en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1898 
1899 	switch (nfilter->proto_mask) {
1900 	case UINT8_MAX:
1901 		if (nfilter->proto == 17) /* IPPROTO_UDP */
1902 			bfilter->ip_protocol = 17;
1903 		else if (nfilter->proto == 6) /* IPPROTO_TCP */
1904 			bfilter->ip_protocol = 6;
1905 		else
1906 			return -EINVAL;
1907 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1908 		break;
1909 	default:
1910 		RTE_LOG(ERR, PMD, "invalid protocol mask.");
1911 		return -EINVAL;
1912 	}
1913 
1914 	switch (nfilter->dst_ip_mask) {
1915 	case UINT32_MAX:
1916 		bfilter->dst_ipaddr_mask[0] = -1;
1917 		bfilter->dst_ipaddr[0] = nfilter->dst_ip;
1918 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
1919 			NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
1920 		break;
1921 	default:
1922 		RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
1923 		return -EINVAL;
1924 	}
1925 
1926 	switch (nfilter->src_ip_mask) {
1927 	case UINT32_MAX:
1928 		bfilter->src_ipaddr_mask[0] = -1;
1929 		bfilter->src_ipaddr[0] = nfilter->src_ip;
1930 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
1931 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
1932 		break;
1933 	default:
1934 		RTE_LOG(ERR, PMD, "invalid src_ip mask.");
1935 		return -EINVAL;
1936 	}
1937 
1938 	switch (nfilter->src_port_mask) {
1939 	case UINT16_MAX:
1940 		bfilter->src_port_mask = -1;
1941 		bfilter->src_port = nfilter->src_port;
1942 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
1943 			NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
1944 		break;
1945 	default:
1946 		RTE_LOG(ERR, PMD, "invalid src_port mask.");
1947 		return -EINVAL;
1948 	}
1949 
1950 	//TODO Priority
1951 	//nfilter->priority = (uint8_t)filter->priority;
1952 
1953 	bfilter->enables = en;
1954 	return 0;
1955 }
1956 
1957 static struct bnxt_filter_info*
1958 bnxt_match_ntuple_filter(struct bnxt *bp,
1959 			 struct bnxt_filter_info *bfilter,
1960 			 struct bnxt_vnic_info **mvnic)
1961 {
1962 	struct bnxt_filter_info *mfilter = NULL;
1963 	int i;
1964 
1965 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
1966 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1967 		STAILQ_FOREACH(mfilter, &vnic->filter, next) {
1968 			if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
1969 			    bfilter->src_ipaddr_mask[0] ==
1970 			    mfilter->src_ipaddr_mask[0] &&
1971 			    bfilter->src_port == mfilter->src_port &&
1972 			    bfilter->src_port_mask == mfilter->src_port_mask &&
1973 			    bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
1974 			    bfilter->dst_ipaddr_mask[0] ==
1975 			    mfilter->dst_ipaddr_mask[0] &&
1976 			    bfilter->dst_port == mfilter->dst_port &&
1977 			    bfilter->dst_port_mask == mfilter->dst_port_mask &&
1978 			    bfilter->flags == mfilter->flags &&
1979 			    bfilter->enables == mfilter->enables) {
1980 				if (mvnic)
1981 					*mvnic = vnic;
1982 				return mfilter;
1983 			}
1984 		}
1985 	}
1986 	return NULL;
1987 }
1988 
1989 static int
1990 bnxt_cfg_ntuple_filter(struct bnxt *bp,
1991 		       struct rte_eth_ntuple_filter *nfilter,
1992 		       enum rte_filter_op filter_op)
1993 {
1994 	struct bnxt_filter_info *bfilter, *mfilter, *filter1;
1995 	struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
1996 	int ret;
1997 
1998 	if (nfilter->flags != RTE_5TUPLE_FLAGS) {
1999 		RTE_LOG(ERR, PMD, "only 5tuple is supported.");
2000 		return -EINVAL;
2001 	}
2002 
2003 	if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
2004 		RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
2005 		return -EINVAL;
2006 	}
2007 
2008 	bfilter = bnxt_get_unused_filter(bp);
2009 	if (bfilter == NULL) {
2010 		RTE_LOG(ERR, PMD,
2011 			"Not enough resources for a new filter.\n");
2012 		return -ENOMEM;
2013 	}
2014 	ret = parse_ntuple_filter(bp, nfilter, bfilter);
2015 	if (ret < 0)
2016 		goto free_filter;
2017 
2018 	vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]);
2019 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2020 	filter1 = STAILQ_FIRST(&vnic0->filter);
2021 	if (filter1 == NULL) {
2022 		ret = -1;
2023 		goto free_filter;
2024 	}
2025 
2026 	bfilter->dst_id = vnic->fw_vnic_id;
2027 	bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2028 	bfilter->enables |=
2029 		HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2030 	bfilter->ethertype = 0x800;
2031 	bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2032 
2033 	mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
2034 
2035 	if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2036 	    bfilter->dst_id == mfilter->dst_id) {
2037 		RTE_LOG(ERR, PMD, "filter exists.\n");
2038 		ret = -EEXIST;
2039 		goto free_filter;
2040 	} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2041 		   bfilter->dst_id != mfilter->dst_id) {
2042 		mfilter->dst_id = vnic->fw_vnic_id;
2043 		ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
2044 		STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
2045 		STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
2046 		RTE_LOG(ERR, PMD, "filter with matching pattern exists.\n");
2047 		RTE_LOG(ERR, PMD, " Updated it to the new destination queue\n");
2048 		goto free_filter;
2049 	}
2050 	if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2051 		RTE_LOG(ERR, PMD, "filter doesn't exist.");
2052 		ret = -ENOENT;
2053 		goto free_filter;
2054 	}
2055 
2056 	if (filter_op == RTE_ETH_FILTER_ADD) {
2057 		bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2058 		ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2059 		if (ret)
2060 			goto free_filter;
2061 		STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2062 	} else {
2063 		if (mfilter == NULL) {
2064 			/* This should not happen. But for Coverity! */
2065 			ret = -ENOENT;
2066 			goto free_filter;
2067 		}
2068 		ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
2069 
2070 		STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
2071 		bnxt_free_filter(bp, mfilter);
2072 		mfilter->fw_l2_filter_id = -1;
2073 		bnxt_free_filter(bp, bfilter);
2074 		bfilter->fw_l2_filter_id = -1;
2075 	}
2076 
2077 	return 0;
2078 free_filter:
2079 	bfilter->fw_l2_filter_id = -1;
2080 	bnxt_free_filter(bp, bfilter);
2081 	return ret;
2082 }
2083 
2084 static int
2085 bnxt_ntuple_filter(struct rte_eth_dev *dev,
2086 			enum rte_filter_op filter_op,
2087 			void *arg)
2088 {
2089 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2090 	int ret;
2091 
2092 	if (filter_op == RTE_ETH_FILTER_NOP)
2093 		return 0;
2094 
2095 	if (arg == NULL) {
2096 		RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
2097 			    filter_op);
2098 		return -EINVAL;
2099 	}
2100 
2101 	switch (filter_op) {
2102 	case RTE_ETH_FILTER_ADD:
2103 		ret = bnxt_cfg_ntuple_filter(bp,
2104 			(struct rte_eth_ntuple_filter *)arg,
2105 			filter_op);
2106 		break;
2107 	case RTE_ETH_FILTER_DELETE:
2108 		ret = bnxt_cfg_ntuple_filter(bp,
2109 			(struct rte_eth_ntuple_filter *)arg,
2110 			filter_op);
2111 		break;
2112 	default:
2113 		RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
2114 		ret = -EINVAL;
2115 		break;
2116 	}
2117 	return ret;
2118 }
2119 
2120 static int
2121 bnxt_parse_fdir_filter(struct bnxt *bp,
2122 		       struct rte_eth_fdir_filter *fdir,
2123 		       struct bnxt_filter_info *filter)
2124 {
2125 	enum rte_fdir_mode fdir_mode =
2126 		bp->eth_dev->data->dev_conf.fdir_conf.mode;
2127 	struct bnxt_vnic_info *vnic0, *vnic;
2128 	struct bnxt_filter_info *filter1;
2129 	uint32_t en = 0;
2130 	int i;
2131 
2132 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2133 		return -EINVAL;
2134 
2135 	filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
2136 	en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
2137 
2138 	switch (fdir->input.flow_type) {
2139 	case RTE_ETH_FLOW_IPV4:
2140 	case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2141 		/* FALLTHROUGH */
2142 		filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
2143 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2144 		filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
2145 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2146 		filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
2147 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2148 		filter->ip_addr_type =
2149 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2150 		filter->src_ipaddr_mask[0] = 0xffffffff;
2151 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2152 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2153 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2154 		filter->ethertype = 0x800;
2155 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2156 		break;
2157 	case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2158 		filter->src_port = fdir->input.flow.tcp4_flow.src_port;
2159 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2160 		filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
2161 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2162 		filter->dst_port_mask = 0xffff;
2163 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2164 		filter->src_port_mask = 0xffff;
2165 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2166 		filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
2167 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2168 		filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
2169 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2170 		filter->ip_protocol = 6;
2171 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2172 		filter->ip_addr_type =
2173 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2174 		filter->src_ipaddr_mask[0] = 0xffffffff;
2175 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2176 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2177 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2178 		filter->ethertype = 0x800;
2179 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2180 		break;
2181 	case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2182 		filter->src_port = fdir->input.flow.udp4_flow.src_port;
2183 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2184 		filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
2185 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2186 		filter->dst_port_mask = 0xffff;
2187 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2188 		filter->src_port_mask = 0xffff;
2189 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2190 		filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
2191 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2192 		filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
2193 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2194 		filter->ip_protocol = 17;
2195 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2196 		filter->ip_addr_type =
2197 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2198 		filter->src_ipaddr_mask[0] = 0xffffffff;
2199 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2200 		filter->dst_ipaddr_mask[0] = 0xffffffff;
2201 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2202 		filter->ethertype = 0x800;
2203 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2204 		break;
2205 	case RTE_ETH_FLOW_IPV6:
2206 	case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2207 		/* FALLTHROUGH */
2208 		filter->ip_addr_type =
2209 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2210 		filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
2211 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2212 		rte_memcpy(filter->src_ipaddr,
2213 			   fdir->input.flow.ipv6_flow.src_ip, 16);
2214 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2215 		rte_memcpy(filter->dst_ipaddr,
2216 			   fdir->input.flow.ipv6_flow.dst_ip, 16);
2217 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2218 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2219 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2220 		memset(filter->src_ipaddr_mask, 0xff, 16);
2221 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2222 		filter->ethertype = 0x86dd;
2223 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2224 		break;
2225 	case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2226 		filter->src_port = fdir->input.flow.tcp6_flow.src_port;
2227 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2228 		filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
2229 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2230 		filter->dst_port_mask = 0xffff;
2231 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2232 		filter->src_port_mask = 0xffff;
2233 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2234 		filter->ip_addr_type =
2235 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2236 		filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
2237 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2238 		rte_memcpy(filter->src_ipaddr,
2239 			   fdir->input.flow.tcp6_flow.ip.src_ip, 16);
2240 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2241 		rte_memcpy(filter->dst_ipaddr,
2242 			   fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
2243 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2244 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2245 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2246 		memset(filter->src_ipaddr_mask, 0xff, 16);
2247 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2248 		filter->ethertype = 0x86dd;
2249 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2250 		break;
2251 	case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2252 		filter->src_port = fdir->input.flow.udp6_flow.src_port;
2253 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2254 		filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
2255 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2256 		filter->dst_port_mask = 0xffff;
2257 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2258 		filter->src_port_mask = 0xffff;
2259 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2260 		filter->ip_addr_type =
2261 			NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2262 		filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
2263 		en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2264 		rte_memcpy(filter->src_ipaddr,
2265 			   fdir->input.flow.udp6_flow.ip.src_ip, 16);
2266 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2267 		rte_memcpy(filter->dst_ipaddr,
2268 			   fdir->input.flow.udp6_flow.ip.dst_ip, 16);
2269 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2270 		memset(filter->dst_ipaddr_mask, 0xff, 16);
2271 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2272 		memset(filter->src_ipaddr_mask, 0xff, 16);
2273 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2274 		filter->ethertype = 0x86dd;
2275 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2276 		break;
2277 	case RTE_ETH_FLOW_L2_PAYLOAD:
2278 		filter->ethertype = fdir->input.flow.l2_flow.ether_type;
2279 		en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2280 		break;
2281 	case RTE_ETH_FLOW_VXLAN:
2282 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2283 			return -EINVAL;
2284 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2285 		filter->tunnel_type =
2286 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
2287 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2288 		break;
2289 	case RTE_ETH_FLOW_NVGRE:
2290 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2291 			return -EINVAL;
2292 		filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2293 		filter->tunnel_type =
2294 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
2295 		en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2296 		break;
2297 	case RTE_ETH_FLOW_UNKNOWN:
2298 	case RTE_ETH_FLOW_RAW:
2299 	case RTE_ETH_FLOW_FRAG_IPV4:
2300 	case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
2301 	case RTE_ETH_FLOW_FRAG_IPV6:
2302 	case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
2303 	case RTE_ETH_FLOW_IPV6_EX:
2304 	case RTE_ETH_FLOW_IPV6_TCP_EX:
2305 	case RTE_ETH_FLOW_IPV6_UDP_EX:
2306 	case RTE_ETH_FLOW_GENEVE:
2307 		/* FALLTHROUGH */
2308 	default:
2309 		return -EINVAL;
2310 	}
2311 
2312 	vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2313 	vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2314 	if (vnic == NULL) {
2315 		RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue);
2316 		return -EINVAL;
2317 	}
2318 
2319 
2320 	if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2321 		rte_memcpy(filter->dst_macaddr,
2322 			fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
2323 			en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2324 	}
2325 
2326 	if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
2327 		filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2328 		filter1 = STAILQ_FIRST(&vnic0->filter);
2329 		//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
2330 	} else {
2331 		filter->dst_id = vnic->fw_vnic_id;
2332 		for (i = 0; i < ETHER_ADDR_LEN; i++)
2333 			if (filter->dst_macaddr[i] == 0x00)
2334 				filter1 = STAILQ_FIRST(&vnic0->filter);
2335 			else
2336 				filter1 = bnxt_get_l2_filter(bp, filter, vnic);
2337 	}
2338 
2339 	if (filter1 == NULL)
2340 		return -EINVAL;
2341 
2342 	en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2343 	filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2344 
2345 	filter->enables = en;
2346 
2347 	return 0;
2348 }
2349 
2350 static struct bnxt_filter_info *
2351 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf)
2352 {
2353 	struct bnxt_filter_info *mf = NULL;
2354 	int i;
2355 
2356 	for (i = bp->nr_vnics - 1; i >= 0; i--) {
2357 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2358 
2359 		STAILQ_FOREACH(mf, &vnic->filter, next) {
2360 			if (mf->filter_type == nf->filter_type &&
2361 			    mf->flags == nf->flags &&
2362 			    mf->src_port == nf->src_port &&
2363 			    mf->src_port_mask == nf->src_port_mask &&
2364 			    mf->dst_port == nf->dst_port &&
2365 			    mf->dst_port_mask == nf->dst_port_mask &&
2366 			    mf->ip_protocol == nf->ip_protocol &&
2367 			    mf->ip_addr_type == nf->ip_addr_type &&
2368 			    mf->ethertype == nf->ethertype &&
2369 			    mf->vni == nf->vni &&
2370 			    mf->tunnel_type == nf->tunnel_type &&
2371 			    mf->l2_ovlan == nf->l2_ovlan &&
2372 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
2373 			    mf->l2_ivlan == nf->l2_ivlan &&
2374 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
2375 			    !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
2376 			    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
2377 				    ETHER_ADDR_LEN) &&
2378 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
2379 				    ETHER_ADDR_LEN) &&
2380 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
2381 				    ETHER_ADDR_LEN) &&
2382 			    !memcmp(mf->src_ipaddr, nf->src_ipaddr,
2383 				    sizeof(nf->src_ipaddr)) &&
2384 			    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
2385 				    sizeof(nf->src_ipaddr_mask)) &&
2386 			    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
2387 				    sizeof(nf->dst_ipaddr)) &&
2388 			    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
2389 				    sizeof(nf->dst_ipaddr_mask)))
2390 				return mf;
2391 		}
2392 	}
2393 	return NULL;
2394 }
2395 
2396 static int
2397 bnxt_fdir_filter(struct rte_eth_dev *dev,
2398 		 enum rte_filter_op filter_op,
2399 		 void *arg)
2400 {
2401 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2402 	struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
2403 	struct bnxt_filter_info *filter, *match;
2404 	struct bnxt_vnic_info *vnic;
2405 	int ret = 0, i;
2406 
2407 	if (filter_op == RTE_ETH_FILTER_NOP)
2408 		return 0;
2409 
2410 	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2411 		return -EINVAL;
2412 
2413 	switch (filter_op) {
2414 	case RTE_ETH_FILTER_ADD:
2415 	case RTE_ETH_FILTER_DELETE:
2416 		/* FALLTHROUGH */
2417 		filter = bnxt_get_unused_filter(bp);
2418 		if (filter == NULL) {
2419 			RTE_LOG(ERR, PMD,
2420 				"Not enough resources for a new flow.\n");
2421 			return -ENOMEM;
2422 		}
2423 
2424 		ret = bnxt_parse_fdir_filter(bp, fdir, filter);
2425 		if (ret != 0)
2426 			goto free_filter;
2427 		filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2428 
2429 		match = bnxt_match_fdir(bp, filter);
2430 		if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
2431 			RTE_LOG(ERR, PMD, "Flow already exists.\n");
2432 			ret = -EEXIST;
2433 			goto free_filter;
2434 		}
2435 		if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2436 			RTE_LOG(ERR, PMD, "Flow does not exist.\n");
2437 			ret = -ENOENT;
2438 			goto free_filter;
2439 		}
2440 
2441 		if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2442 			vnic = STAILQ_FIRST(&bp->ff_pool[0]);
2443 		else
2444 			vnic =
2445 			STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2446 
2447 		if (filter_op == RTE_ETH_FILTER_ADD) {
2448 			ret = bnxt_hwrm_set_ntuple_filter(bp,
2449 							  filter->dst_id,
2450 							  filter);
2451 			if (ret)
2452 				goto free_filter;
2453 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2454 		} else {
2455 			ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
2456 			STAILQ_REMOVE(&vnic->filter, match,
2457 				      bnxt_filter_info, next);
2458 			bnxt_free_filter(bp, match);
2459 			filter->fw_l2_filter_id = -1;
2460 			bnxt_free_filter(bp, filter);
2461 		}
2462 		break;
2463 	case RTE_ETH_FILTER_FLUSH:
2464 		for (i = bp->nr_vnics - 1; i >= 0; i--) {
2465 			struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2466 
2467 			STAILQ_FOREACH(filter, &vnic->filter, next) {
2468 				if (filter->filter_type ==
2469 				    HWRM_CFA_NTUPLE_FILTER) {
2470 					ret =
2471 					bnxt_hwrm_clear_ntuple_filter(bp,
2472 								      filter);
2473 					STAILQ_REMOVE(&vnic->filter, filter,
2474 						      bnxt_filter_info, next);
2475 				}
2476 			}
2477 		}
2478 		return ret;
2479 	case RTE_ETH_FILTER_UPDATE:
2480 	case RTE_ETH_FILTER_STATS:
2481 	case RTE_ETH_FILTER_INFO:
2482 		/* FALLTHROUGH */
2483 		RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op);
2484 		break;
2485 	default:
2486 		RTE_LOG(ERR, PMD, "unknown operation %u", filter_op);
2487 		ret = -EINVAL;
2488 		break;
2489 	}
2490 	return ret;
2491 
2492 free_filter:
2493 	filter->fw_l2_filter_id = -1;
2494 	bnxt_free_filter(bp, filter);
2495 	return ret;
2496 }
2497 
2498 static int
2499 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
2500 		    enum rte_filter_type filter_type,
2501 		    enum rte_filter_op filter_op, void *arg)
2502 {
2503 	int ret = 0;
2504 
2505 	switch (filter_type) {
2506 	case RTE_ETH_FILTER_TUNNEL:
2507 		RTE_LOG(ERR, PMD,
2508 			"filter type: %d: To be implemented\n", filter_type);
2509 		break;
2510 	case RTE_ETH_FILTER_FDIR:
2511 		ret = bnxt_fdir_filter(dev, filter_op, arg);
2512 		break;
2513 	case RTE_ETH_FILTER_NTUPLE:
2514 		ret = bnxt_ntuple_filter(dev, filter_op, arg);
2515 		break;
2516 	case RTE_ETH_FILTER_ETHERTYPE:
2517 		ret = bnxt_ethertype_filter(dev, filter_op, arg);
2518 		break;
2519 	case RTE_ETH_FILTER_GENERIC:
2520 		if (filter_op != RTE_ETH_FILTER_GET)
2521 			return -EINVAL;
2522 		*(const void **)arg = &bnxt_flow_ops;
2523 		break;
2524 	default:
2525 		RTE_LOG(ERR, PMD,
2526 			"Filter type (%d) not supported", filter_type);
2527 		ret = -EINVAL;
2528 		break;
2529 	}
2530 	return ret;
2531 }
2532 
2533 static const uint32_t *
2534 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
2535 {
2536 	static const uint32_t ptypes[] = {
2537 		RTE_PTYPE_L2_ETHER_VLAN,
2538 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2539 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2540 		RTE_PTYPE_L4_ICMP,
2541 		RTE_PTYPE_L4_TCP,
2542 		RTE_PTYPE_L4_UDP,
2543 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2544 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2545 		RTE_PTYPE_INNER_L4_ICMP,
2546 		RTE_PTYPE_INNER_L4_TCP,
2547 		RTE_PTYPE_INNER_L4_UDP,
2548 		RTE_PTYPE_UNKNOWN
2549 	};
2550 
2551 	if (dev->rx_pkt_burst == bnxt_recv_pkts)
2552 		return ptypes;
2553 	return NULL;
2554 }
2555 
2556 
2557 
2558 static int
2559 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
2560 {
2561 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2562 	int rc;
2563 	uint32_t dir_entries;
2564 	uint32_t entry_length;
2565 
2566 	RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
2567 		__func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
2568 		bp->pdev->addr.devid, bp->pdev->addr.function);
2569 
2570 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
2571 	if (rc != 0)
2572 		return rc;
2573 
2574 	return dir_entries * entry_length;
2575 }
2576 
2577 static int
2578 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
2579 		struct rte_dev_eeprom_info *in_eeprom)
2580 {
2581 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2582 	uint32_t index;
2583 	uint32_t offset;
2584 
2585 	RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
2586 		"len = %d\n", __func__, bp->pdev->addr.domain,
2587 		bp->pdev->addr.bus, bp->pdev->addr.devid,
2588 		bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2589 
2590 	if (in_eeprom->offset == 0) /* special offset value to get directory */
2591 		return bnxt_get_nvram_directory(bp, in_eeprom->length,
2592 						in_eeprom->data);
2593 
2594 	index = in_eeprom->offset >> 24;
2595 	offset = in_eeprom->offset & 0xffffff;
2596 
2597 	if (index != 0)
2598 		return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
2599 					   in_eeprom->length, in_eeprom->data);
2600 
2601 	return 0;
2602 }
2603 
2604 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
2605 {
2606 	switch (dir_type) {
2607 	case BNX_DIR_TYPE_CHIMP_PATCH:
2608 	case BNX_DIR_TYPE_BOOTCODE:
2609 	case BNX_DIR_TYPE_BOOTCODE_2:
2610 	case BNX_DIR_TYPE_APE_FW:
2611 	case BNX_DIR_TYPE_APE_PATCH:
2612 	case BNX_DIR_TYPE_KONG_FW:
2613 	case BNX_DIR_TYPE_KONG_PATCH:
2614 	case BNX_DIR_TYPE_BONO_FW:
2615 	case BNX_DIR_TYPE_BONO_PATCH:
2616 		return true;
2617 	}
2618 
2619 	return false;
2620 }
2621 
2622 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
2623 {
2624 	switch (dir_type) {
2625 	case BNX_DIR_TYPE_AVS:
2626 	case BNX_DIR_TYPE_EXP_ROM_MBA:
2627 	case BNX_DIR_TYPE_PCIE:
2628 	case BNX_DIR_TYPE_TSCF_UCODE:
2629 	case BNX_DIR_TYPE_EXT_PHY:
2630 	case BNX_DIR_TYPE_CCM:
2631 	case BNX_DIR_TYPE_ISCSI_BOOT:
2632 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2633 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2634 		return true;
2635 	}
2636 
2637 	return false;
2638 }
2639 
2640 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
2641 {
2642 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2643 		bnxt_dir_type_is_other_exec_format(dir_type);
2644 }
2645 
2646 static int
2647 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
2648 		struct rte_dev_eeprom_info *in_eeprom)
2649 {
2650 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2651 	uint8_t index, dir_op;
2652 	uint16_t type, ext, ordinal, attr;
2653 
2654 	RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
2655 		"len = %d\n", __func__, bp->pdev->addr.domain,
2656 		bp->pdev->addr.bus, bp->pdev->addr.devid,
2657 		bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2658 
2659 	if (!BNXT_PF(bp)) {
2660 		RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
2661 		return -EINVAL;
2662 	}
2663 
2664 	type = in_eeprom->magic >> 16;
2665 
2666 	if (type == 0xffff) { /* special value for directory operations */
2667 		index = in_eeprom->magic & 0xff;
2668 		dir_op = in_eeprom->magic >> 8;
2669 		if (index == 0)
2670 			return -EINVAL;
2671 		switch (dir_op) {
2672 		case 0x0e: /* erase */
2673 			if (in_eeprom->offset != ~in_eeprom->magic)
2674 				return -EINVAL;
2675 			return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
2676 		default:
2677 			return -EINVAL;
2678 		}
2679 	}
2680 
2681 	/* Create or re-write an NVM item: */
2682 	if (bnxt_dir_type_is_executable(type) == true)
2683 		return -EOPNOTSUPP;
2684 	ext = in_eeprom->magic & 0xffff;
2685 	ordinal = in_eeprom->offset >> 16;
2686 	attr = in_eeprom->offset & 0xffff;
2687 
2688 	return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
2689 				     in_eeprom->data, in_eeprom->length);
2690 	return 0;
2691 }
2692 
2693 /*
2694  * Initialization
2695  */
2696 
2697 static const struct eth_dev_ops bnxt_dev_ops = {
2698 	.dev_infos_get = bnxt_dev_info_get_op,
2699 	.dev_close = bnxt_dev_close_op,
2700 	.dev_configure = bnxt_dev_configure_op,
2701 	.dev_start = bnxt_dev_start_op,
2702 	.dev_stop = bnxt_dev_stop_op,
2703 	.dev_set_link_up = bnxt_dev_set_link_up_op,
2704 	.dev_set_link_down = bnxt_dev_set_link_down_op,
2705 	.stats_get = bnxt_stats_get_op,
2706 	.stats_reset = bnxt_stats_reset_op,
2707 	.rx_queue_setup = bnxt_rx_queue_setup_op,
2708 	.rx_queue_release = bnxt_rx_queue_release_op,
2709 	.tx_queue_setup = bnxt_tx_queue_setup_op,
2710 	.tx_queue_release = bnxt_tx_queue_release_op,
2711 	.rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
2712 	.rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
2713 	.reta_update = bnxt_reta_update_op,
2714 	.reta_query = bnxt_reta_query_op,
2715 	.rss_hash_update = bnxt_rss_hash_update_op,
2716 	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
2717 	.link_update = bnxt_link_update_op,
2718 	.promiscuous_enable = bnxt_promiscuous_enable_op,
2719 	.promiscuous_disable = bnxt_promiscuous_disable_op,
2720 	.allmulticast_enable = bnxt_allmulticast_enable_op,
2721 	.allmulticast_disable = bnxt_allmulticast_disable_op,
2722 	.mac_addr_add = bnxt_mac_addr_add_op,
2723 	.mac_addr_remove = bnxt_mac_addr_remove_op,
2724 	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
2725 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
2726 	.udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
2727 	.udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
2728 	.vlan_filter_set = bnxt_vlan_filter_set_op,
2729 	.vlan_offload_set = bnxt_vlan_offload_set_op,
2730 	.vlan_pvid_set = bnxt_vlan_pvid_set_op,
2731 	.mtu_set = bnxt_mtu_set_op,
2732 	.mac_addr_set = bnxt_set_default_mac_addr_op,
2733 	.xstats_get = bnxt_dev_xstats_get_op,
2734 	.xstats_get_names = bnxt_dev_xstats_get_names_op,
2735 	.xstats_reset = bnxt_dev_xstats_reset_op,
2736 	.fw_version_get = bnxt_fw_version_get,
2737 	.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
2738 	.rxq_info_get = bnxt_rxq_info_get_op,
2739 	.txq_info_get = bnxt_txq_info_get_op,
2740 	.dev_led_on = bnxt_dev_led_on_op,
2741 	.dev_led_off = bnxt_dev_led_off_op,
2742 	.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
2743 	.xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
2744 	.rx_queue_count = bnxt_rx_queue_count_op,
2745 	.rx_descriptor_status = bnxt_rx_descriptor_status_op,
2746 	.tx_descriptor_status = bnxt_tx_descriptor_status_op,
2747 	.filter_ctrl = bnxt_filter_ctrl_op,
2748 	.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
2749 	.get_eeprom_length    = bnxt_get_eeprom_length_op,
2750 	.get_eeprom           = bnxt_get_eeprom_op,
2751 	.set_eeprom           = bnxt_set_eeprom_op,
2752 };
2753 
2754 static bool bnxt_vf_pciid(uint16_t id)
2755 {
2756 	if (id == BROADCOM_DEV_ID_57304_VF ||
2757 	    id == BROADCOM_DEV_ID_57406_VF ||
2758 	    id == BROADCOM_DEV_ID_5731X_VF ||
2759 	    id == BROADCOM_DEV_ID_5741X_VF ||
2760 	    id == BROADCOM_DEV_ID_57414_VF ||
2761 	    id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
2762 		return true;
2763 	return false;
2764 }
2765 
2766 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
2767 {
2768 	struct bnxt *bp = eth_dev->data->dev_private;
2769 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2770 	int rc;
2771 
2772 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
2773 	if (!pci_dev->mem_resource[0].addr) {
2774 		RTE_LOG(ERR, PMD,
2775 			"Cannot find PCI device base address, aborting\n");
2776 		rc = -ENODEV;
2777 		goto init_err_disable;
2778 	}
2779 
2780 	bp->eth_dev = eth_dev;
2781 	bp->pdev = pci_dev;
2782 
2783 	bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
2784 	if (!bp->bar0) {
2785 		RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
2786 		rc = -ENOMEM;
2787 		goto init_err_release;
2788 	}
2789 	return 0;
2790 
2791 init_err_release:
2792 	if (bp->bar0)
2793 		bp->bar0 = NULL;
2794 
2795 init_err_disable:
2796 
2797 	return rc;
2798 }
2799 
2800 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
2801 
2802 #define ALLOW_FUNC(x)	\
2803 	{ \
2804 		typeof(x) arg = (x); \
2805 		bp->pf.vf_req_fwd[((arg) >> 5)] &= \
2806 		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
2807 	}
2808 static int
2809 bnxt_dev_init(struct rte_eth_dev *eth_dev)
2810 {
2811 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2812 	char mz_name[RTE_MEMZONE_NAMESIZE];
2813 	const struct rte_memzone *mz = NULL;
2814 	static int version_printed;
2815 	uint32_t total_alloc_len;
2816 	rte_iova_t mz_phys_addr;
2817 	struct bnxt *bp;
2818 	int rc;
2819 
2820 	if (version_printed++ == 0)
2821 		RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
2822 
2823 	rte_eth_copy_pci_info(eth_dev, pci_dev);
2824 
2825 	bp = eth_dev->data->dev_private;
2826 
2827 	rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
2828 	bp->dev_stopped = 1;
2829 
2830 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2831 		goto skip_init;
2832 
2833 	if (bnxt_vf_pciid(pci_dev->id.device_id))
2834 		bp->flags |= BNXT_FLAG_VF;
2835 
2836 	rc = bnxt_init_board(eth_dev);
2837 	if (rc) {
2838 		RTE_LOG(ERR, PMD,
2839 			"Board initialization failed rc: %x\n", rc);
2840 		goto error;
2841 	}
2842 skip_init:
2843 	eth_dev->dev_ops = &bnxt_dev_ops;
2844 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2845 		return 0;
2846 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
2847 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
2848 
2849 	if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
2850 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
2851 			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
2852 			 pci_dev->addr.bus, pci_dev->addr.devid,
2853 			 pci_dev->addr.function, "rx_port_stats");
2854 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
2855 		mz = rte_memzone_lookup(mz_name);
2856 		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
2857 				sizeof(struct rx_port_stats) + 512);
2858 		if (!mz) {
2859 			mz = rte_memzone_reserve(mz_name, total_alloc_len,
2860 						 SOCKET_ID_ANY,
2861 						 RTE_MEMZONE_2MB |
2862 						 RTE_MEMZONE_SIZE_HINT_ONLY);
2863 			if (mz == NULL)
2864 				return -ENOMEM;
2865 		}
2866 		memset(mz->addr, 0, mz->len);
2867 		mz_phys_addr = mz->iova;
2868 		if ((unsigned long)mz->addr == mz_phys_addr) {
2869 			RTE_LOG(WARNING, PMD,
2870 				"Memzone physical address same as virtual.\n");
2871 			RTE_LOG(WARNING, PMD,
2872 				"Using rte_mem_virt2iova()\n");
2873 			mz_phys_addr = rte_mem_virt2iova(mz->addr);
2874 			if (mz_phys_addr == 0) {
2875 				RTE_LOG(ERR, PMD,
2876 				"unable to map address to physical memory\n");
2877 				return -ENOMEM;
2878 			}
2879 		}
2880 
2881 		bp->rx_mem_zone = (const void *)mz;
2882 		bp->hw_rx_port_stats = mz->addr;
2883 		bp->hw_rx_port_stats_map = mz_phys_addr;
2884 
2885 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
2886 			 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
2887 			 pci_dev->addr.bus, pci_dev->addr.devid,
2888 			 pci_dev->addr.function, "tx_port_stats");
2889 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
2890 		mz = rte_memzone_lookup(mz_name);
2891 		total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
2892 				sizeof(struct tx_port_stats) + 512);
2893 		if (!mz) {
2894 			mz = rte_memzone_reserve(mz_name, total_alloc_len,
2895 						 SOCKET_ID_ANY,
2896 						 RTE_MEMZONE_2MB |
2897 						 RTE_MEMZONE_SIZE_HINT_ONLY);
2898 			if (mz == NULL)
2899 				return -ENOMEM;
2900 		}
2901 		memset(mz->addr, 0, mz->len);
2902 		mz_phys_addr = mz->iova;
2903 		if ((unsigned long)mz->addr == mz_phys_addr) {
2904 			RTE_LOG(WARNING, PMD,
2905 				"Memzone physical address same as virtual.\n");
2906 			RTE_LOG(WARNING, PMD,
2907 				"Using rte_mem_virt2iova()\n");
2908 			mz_phys_addr = rte_mem_virt2iova(mz->addr);
2909 			if (mz_phys_addr == 0) {
2910 				RTE_LOG(ERR, PMD,
2911 				"unable to map address to physical memory\n");
2912 				return -ENOMEM;
2913 			}
2914 		}
2915 
2916 		bp->tx_mem_zone = (const void *)mz;
2917 		bp->hw_tx_port_stats = mz->addr;
2918 		bp->hw_tx_port_stats_map = mz_phys_addr;
2919 
2920 		bp->flags |= BNXT_FLAG_PORT_STATS;
2921 	}
2922 
2923 	rc = bnxt_alloc_hwrm_resources(bp);
2924 	if (rc) {
2925 		RTE_LOG(ERR, PMD,
2926 			"hwrm resource allocation failure rc: %x\n", rc);
2927 		goto error_free;
2928 	}
2929 	rc = bnxt_hwrm_ver_get(bp);
2930 	if (rc)
2931 		goto error_free;
2932 	bnxt_hwrm_queue_qportcfg(bp);
2933 
2934 	bnxt_hwrm_func_qcfg(bp);
2935 
2936 	/* Get the MAX capabilities for this function */
2937 	rc = bnxt_hwrm_func_qcaps(bp);
2938 	if (rc) {
2939 		RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
2940 		goto error_free;
2941 	}
2942 	if (bp->max_tx_rings == 0) {
2943 		RTE_LOG(ERR, PMD, "No TX rings available!\n");
2944 		rc = -EBUSY;
2945 		goto error_free;
2946 	}
2947 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
2948 					ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
2949 	if (eth_dev->data->mac_addrs == NULL) {
2950 		RTE_LOG(ERR, PMD,
2951 			"Failed to alloc %u bytes needed to store MAC addr tbl",
2952 			ETHER_ADDR_LEN * bp->max_l2_ctx);
2953 		rc = -ENOMEM;
2954 		goto error_free;
2955 	}
2956 	/* Copy the permanent MAC from the qcap response address now. */
2957 	memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
2958 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
2959 
2960 	if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
2961 		/* 1 ring is for default completion ring */
2962 		RTE_LOG(ERR, PMD, "Insufficient resource: Ring Group\n");
2963 		rc = -ENOSPC;
2964 		goto error_free;
2965 	}
2966 
2967 	bp->grp_info = rte_zmalloc("bnxt_grp_info",
2968 				sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
2969 	if (!bp->grp_info) {
2970 		RTE_LOG(ERR, PMD,
2971 			"Failed to alloc %zu bytes to store group info table\n",
2972 			sizeof(*bp->grp_info) * bp->max_ring_grps);
2973 		rc = -ENOMEM;
2974 		goto error_free;
2975 	}
2976 
2977 	/* Forward all requests if firmware is new enough */
2978 	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
2979 	    (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
2980 	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
2981 		memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
2982 	} else {
2983 		RTE_LOG(WARNING, PMD,
2984 			"Firmware too old for VF mailbox functionality\n");
2985 		memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
2986 	}
2987 
2988 	/*
2989 	 * The following are used for driver cleanup.  If we disallow these,
2990 	 * VF drivers can't clean up cleanly.
2991 	 */
2992 	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
2993 	ALLOW_FUNC(HWRM_VNIC_FREE);
2994 	ALLOW_FUNC(HWRM_RING_FREE);
2995 	ALLOW_FUNC(HWRM_RING_GRP_FREE);
2996 	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
2997 	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
2998 	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
2999 	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
3000 	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
3001 	rc = bnxt_hwrm_func_driver_register(bp);
3002 	if (rc) {
3003 		RTE_LOG(ERR, PMD,
3004 			"Failed to register driver");
3005 		rc = -EBUSY;
3006 		goto error_free;
3007 	}
3008 
3009 	RTE_LOG(INFO, PMD,
3010 		DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
3011 		pci_dev->mem_resource[0].phys_addr,
3012 		pci_dev->mem_resource[0].addr);
3013 
3014 	rc = bnxt_hwrm_func_reset(bp);
3015 	if (rc) {
3016 		RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
3017 		rc = -1;
3018 		goto error_free;
3019 	}
3020 
3021 	if (BNXT_PF(bp)) {
3022 		//if (bp->pf.active_vfs) {
3023 			// TODO: Deallocate VF resources?
3024 		//}
3025 		if (bp->pdev->max_vfs) {
3026 			rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
3027 			if (rc) {
3028 				RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
3029 				goto error_free;
3030 			}
3031 		} else {
3032 			rc = bnxt_hwrm_allocate_pf_only(bp);
3033 			if (rc) {
3034 				RTE_LOG(ERR, PMD,
3035 					"Failed to allocate PF resources\n");
3036 				goto error_free;
3037 			}
3038 		}
3039 	}
3040 
3041 	bnxt_hwrm_port_led_qcaps(bp);
3042 
3043 	rc = bnxt_setup_int(bp);
3044 	if (rc)
3045 		goto error_free;
3046 
3047 	rc = bnxt_alloc_mem(bp);
3048 	if (rc)
3049 		goto error_free_int;
3050 
3051 	rc = bnxt_request_int(bp);
3052 	if (rc)
3053 		goto error_free_int;
3054 
3055 	rc = bnxt_alloc_def_cp_ring(bp);
3056 	if (rc)
3057 		goto error_free_int;
3058 
3059 	bnxt_enable_int(bp);
3060 
3061 	return 0;
3062 
3063 error_free_int:
3064 	bnxt_disable_int(bp);
3065 	bnxt_free_def_cp_ring(bp);
3066 	bnxt_hwrm_func_buf_unrgtr(bp);
3067 	bnxt_free_int(bp);
3068 	bnxt_free_mem(bp);
3069 error_free:
3070 	bnxt_dev_uninit(eth_dev);
3071 error:
3072 	return rc;
3073 }
3074 
3075 static int
3076 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
3077 	struct bnxt *bp = eth_dev->data->dev_private;
3078 	int rc;
3079 
3080 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3081 		return -EPERM;
3082 
3083 	bnxt_disable_int(bp);
3084 	bnxt_free_int(bp);
3085 	bnxt_free_mem(bp);
3086 	if (eth_dev->data->mac_addrs != NULL) {
3087 		rte_free(eth_dev->data->mac_addrs);
3088 		eth_dev->data->mac_addrs = NULL;
3089 	}
3090 	if (bp->grp_info != NULL) {
3091 		rte_free(bp->grp_info);
3092 		bp->grp_info = NULL;
3093 	}
3094 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
3095 	bnxt_free_hwrm_resources(bp);
3096 	rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
3097 	rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
3098 	if (bp->dev_stopped == 0)
3099 		bnxt_dev_close_op(eth_dev);
3100 	if (bp->pf.vf_info)
3101 		rte_free(bp->pf.vf_info);
3102 	eth_dev->dev_ops = NULL;
3103 	eth_dev->rx_pkt_burst = NULL;
3104 	eth_dev->tx_pkt_burst = NULL;
3105 
3106 	return rc;
3107 }
3108 
3109 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3110 	struct rte_pci_device *pci_dev)
3111 {
3112 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
3113 		bnxt_dev_init);
3114 }
3115 
3116 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
3117 {
3118 	return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
3119 }
3120 
3121 static struct rte_pci_driver bnxt_rte_pmd = {
3122 	.id_table = bnxt_pci_id_map,
3123 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
3124 		RTE_PCI_DRV_INTR_LSC,
3125 	.probe = bnxt_pci_probe,
3126 	.remove = bnxt_pci_remove,
3127 };
3128 
3129 static bool
3130 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
3131 {
3132 	if (strcmp(dev->device->driver->name, drv->driver.name))
3133 		return false;
3134 
3135 	return true;
3136 }
3137 
3138 bool is_bnxt_supported(struct rte_eth_dev *dev)
3139 {
3140 	return is_device_supported(dev, &bnxt_rte_pmd);
3141 }
3142 
3143 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
3144 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
3145 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
3146