xref: /f-stack/dpdk/drivers/net/bnxt/bnxt_ethdev.c (revision 2d9fd380)
1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2014-2018 Broadcom
3a9643ea8Slogwang  * All rights reserved.
4a9643ea8Slogwang  */
5a9643ea8Slogwang 
6a9643ea8Slogwang #include <inttypes.h>
7a9643ea8Slogwang #include <stdbool.h>
8a9643ea8Slogwang 
9a9643ea8Slogwang #include <rte_dev.h>
10d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
112bfe3f2eSlogwang #include <rte_ethdev_pci.h>
12a9643ea8Slogwang #include <rte_malloc.h>
13a9643ea8Slogwang #include <rte_cycles.h>
144418919fSjohnjiang #include <rte_alarm.h>
15*2d9fd380Sjfb8856606 #include <rte_kvargs.h>
16*2d9fd380Sjfb8856606 #include <rte_vect.h>
17a9643ea8Slogwang 
18a9643ea8Slogwang #include "bnxt.h"
19a9643ea8Slogwang #include "bnxt_filter.h"
20a9643ea8Slogwang #include "bnxt_hwrm.h"
212bfe3f2eSlogwang #include "bnxt_irq.h"
22*2d9fd380Sjfb8856606 #include "bnxt_reps.h"
23a9643ea8Slogwang #include "bnxt_ring.h"
24a9643ea8Slogwang #include "bnxt_rxq.h"
25a9643ea8Slogwang #include "bnxt_rxr.h"
26a9643ea8Slogwang #include "bnxt_stats.h"
27a9643ea8Slogwang #include "bnxt_txq.h"
28a9643ea8Slogwang #include "bnxt_txr.h"
29a9643ea8Slogwang #include "bnxt_vnic.h"
30a9643ea8Slogwang #include "hsi_struct_def_dpdk.h"
312bfe3f2eSlogwang #include "bnxt_nvm_defs.h"
32*2d9fd380Sjfb8856606 #include "bnxt_tf_common.h"
33*2d9fd380Sjfb8856606 #include "ulp_flow_db.h"
34*2d9fd380Sjfb8856606 #include "rte_pmd_bnxt.h"
35a9643ea8Slogwang 
36a9643ea8Slogwang #define DRV_MODULE_NAME		"bnxt"
37a9643ea8Slogwang static const char bnxt_version[] =
384418919fSjohnjiang 	"Broadcom NetXtreme driver " DRV_MODULE_NAME;
39a9643ea8Slogwang 
404418919fSjohnjiang /*
414418919fSjohnjiang  * The set of PCI devices this driver supports
424418919fSjohnjiang  */
432bfe3f2eSlogwang static const struct rte_pci_id bnxt_pci_id_map[] = {
442bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
45d30ea906Sjfb8856606 			 BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
46d30ea906Sjfb8856606 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
47d30ea906Sjfb8856606 			 BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
482bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
492bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
50a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
51a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
52a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
53a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
542bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
55a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
56a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
57a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
58a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
592bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
602bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
612bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
62a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
632bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
642bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
652bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
662bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
67a9643ea8Slogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
682bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
692bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
702bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
712bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
722bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
732bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
742bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
752bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
762bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
772bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
782bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
792bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
802bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
812bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
82d30ea906Sjfb8856606 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
83d30ea906Sjfb8856606 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
84d30ea906Sjfb8856606 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
85d30ea906Sjfb8856606 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
864418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
874418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
884418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
894418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
904418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
914418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
924418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
934418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
944418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
954418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
964418919fSjohnjiang 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
97a9643ea8Slogwang 	{ .vendor_id = 0, /* sentinel */ },
98a9643ea8Slogwang };
99a9643ea8Slogwang 
100*2d9fd380Sjfb8856606 #define BNXT_DEVARG_TRUFLOW	"host-based-truflow"
101*2d9fd380Sjfb8856606 #define BNXT_DEVARG_FLOW_XSTAT	"flow-xstat"
102*2d9fd380Sjfb8856606 #define BNXT_DEVARG_MAX_NUM_KFLOWS  "max-num-kflows"
103*2d9fd380Sjfb8856606 #define BNXT_DEVARG_REPRESENTOR	"representor"
104*2d9fd380Sjfb8856606 #define BNXT_DEVARG_REP_BASED_PF  "rep-based-pf"
105*2d9fd380Sjfb8856606 #define BNXT_DEVARG_REP_IS_PF  "rep-is-pf"
106*2d9fd380Sjfb8856606 #define BNXT_DEVARG_REP_Q_R2F  "rep-q-r2f"
107*2d9fd380Sjfb8856606 #define BNXT_DEVARG_REP_Q_F2R  "rep-q-f2r"
108*2d9fd380Sjfb8856606 #define BNXT_DEVARG_REP_FC_R2F  "rep-fc-r2f"
109*2d9fd380Sjfb8856606 #define BNXT_DEVARG_REP_FC_F2R  "rep-fc-f2r"
110a9643ea8Slogwang 
111*2d9fd380Sjfb8856606 static const char *const bnxt_dev_args[] = {
112*2d9fd380Sjfb8856606 	BNXT_DEVARG_REPRESENTOR,
113*2d9fd380Sjfb8856606 	BNXT_DEVARG_TRUFLOW,
114*2d9fd380Sjfb8856606 	BNXT_DEVARG_FLOW_XSTAT,
115*2d9fd380Sjfb8856606 	BNXT_DEVARG_MAX_NUM_KFLOWS,
116*2d9fd380Sjfb8856606 	BNXT_DEVARG_REP_BASED_PF,
117*2d9fd380Sjfb8856606 	BNXT_DEVARG_REP_IS_PF,
118*2d9fd380Sjfb8856606 	BNXT_DEVARG_REP_Q_R2F,
119*2d9fd380Sjfb8856606 	BNXT_DEVARG_REP_Q_F2R,
120*2d9fd380Sjfb8856606 	BNXT_DEVARG_REP_FC_R2F,
121*2d9fd380Sjfb8856606 	BNXT_DEVARG_REP_FC_F2R,
122*2d9fd380Sjfb8856606 	NULL
123*2d9fd380Sjfb8856606 };
124d30ea906Sjfb8856606 
125*2d9fd380Sjfb8856606 /*
126*2d9fd380Sjfb8856606  * truflow == false to disable the feature
127*2d9fd380Sjfb8856606  * truflow == true to enable the feature
128*2d9fd380Sjfb8856606  */
129*2d9fd380Sjfb8856606 #define	BNXT_DEVARG_TRUFLOW_INVALID(truflow)	((truflow) > 1)
130*2d9fd380Sjfb8856606 
131*2d9fd380Sjfb8856606 /*
132*2d9fd380Sjfb8856606  * flow_xstat == false to disable the feature
133*2d9fd380Sjfb8856606  * flow_xstat == true to enable the feature
134*2d9fd380Sjfb8856606  */
135*2d9fd380Sjfb8856606 #define	BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)	((flow_xstat) > 1)
136*2d9fd380Sjfb8856606 
137*2d9fd380Sjfb8856606 /*
138*2d9fd380Sjfb8856606  * rep_is_pf == false to indicate VF representor
139*2d9fd380Sjfb8856606  * rep_is_pf == true to indicate PF representor
140*2d9fd380Sjfb8856606  */
141*2d9fd380Sjfb8856606 #define	BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)	((rep_is_pf) > 1)
142*2d9fd380Sjfb8856606 
143*2d9fd380Sjfb8856606 /*
144*2d9fd380Sjfb8856606  * rep_based_pf == Physical index of the PF
145*2d9fd380Sjfb8856606  */
146*2d9fd380Sjfb8856606 #define	BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)	((rep_based_pf) > 15)
147*2d9fd380Sjfb8856606 /*
148*2d9fd380Sjfb8856606  * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction
149*2d9fd380Sjfb8856606  */
150*2d9fd380Sjfb8856606 #define	BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)	((rep_q_r2f) > 3)
151*2d9fd380Sjfb8856606 
152*2d9fd380Sjfb8856606 /*
153*2d9fd380Sjfb8856606  * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction
154*2d9fd380Sjfb8856606  */
155*2d9fd380Sjfb8856606 #define	BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)	((rep_q_f2r) > 3)
156*2d9fd380Sjfb8856606 
157*2d9fd380Sjfb8856606 /*
158*2d9fd380Sjfb8856606  * rep_fc_r2f == Flow control for the representor to endpoint direction
159*2d9fd380Sjfb8856606  */
160*2d9fd380Sjfb8856606 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)	((rep_fc_r2f) > 1)
161*2d9fd380Sjfb8856606 
162*2d9fd380Sjfb8856606 /*
163*2d9fd380Sjfb8856606  * rep_fc_f2r == Flow control for the endpoint to representor direction
164*2d9fd380Sjfb8856606  */
165*2d9fd380Sjfb8856606 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)	((rep_fc_f2r) > 1)
166*2d9fd380Sjfb8856606 
167*2d9fd380Sjfb8856606 int bnxt_cfa_code_dynfield_offset = -1;
168*2d9fd380Sjfb8856606 
169*2d9fd380Sjfb8856606 /*
170*2d9fd380Sjfb8856606  * max_num_kflows must be >= 32
171*2d9fd380Sjfb8856606  * and must be a power-of-2 supported value
172*2d9fd380Sjfb8856606  * return: 1 -> invalid
173*2d9fd380Sjfb8856606  *         0 -> valid
174*2d9fd380Sjfb8856606  */
bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows)175*2d9fd380Sjfb8856606 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows)
176*2d9fd380Sjfb8856606 {
177*2d9fd380Sjfb8856606 	if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows))
178*2d9fd380Sjfb8856606 		return 1;
179*2d9fd380Sjfb8856606 	return 0;
180*2d9fd380Sjfb8856606 }
181d30ea906Sjfb8856606 
1822bfe3f2eSlogwang static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
183d30ea906Sjfb8856606 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
1844418919fSjohnjiang static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
1854418919fSjohnjiang static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
1864418919fSjohnjiang static void bnxt_cancel_fw_health_check(struct bnxt *bp);
1874418919fSjohnjiang static int bnxt_restore_vlan_filters(struct bnxt *bp);
1884418919fSjohnjiang static void bnxt_dev_recover(void *arg);
189*2d9fd380Sjfb8856606 static void bnxt_free_error_recovery_info(struct bnxt *bp);
190*2d9fd380Sjfb8856606 static void bnxt_free_rep_info(struct bnxt *bp);
1914418919fSjohnjiang 
is_bnxt_in_error(struct bnxt * bp)1924418919fSjohnjiang int is_bnxt_in_error(struct bnxt *bp)
1934418919fSjohnjiang {
1944418919fSjohnjiang 	if (bp->flags & BNXT_FLAG_FATAL_ERROR)
1954418919fSjohnjiang 		return -EIO;
1964418919fSjohnjiang 	if (bp->flags & BNXT_FLAG_FW_RESET)
1974418919fSjohnjiang 		return -EBUSY;
1984418919fSjohnjiang 
1994418919fSjohnjiang 	return 0;
2004418919fSjohnjiang }
2012bfe3f2eSlogwang 
202a9643ea8Slogwang /***********************/
203a9643ea8Slogwang 
204a9643ea8Slogwang /*
205a9643ea8Slogwang  * High level utility functions
206a9643ea8Slogwang  */
207a9643ea8Slogwang 
bnxt_rss_ctxts(const struct bnxt * bp)208*2d9fd380Sjfb8856606 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
2094418919fSjohnjiang {
2104418919fSjohnjiang 	if (!BNXT_CHIP_THOR(bp))
2114418919fSjohnjiang 		return 1;
2124418919fSjohnjiang 
2134418919fSjohnjiang 	return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
2144418919fSjohnjiang 				  BNXT_RSS_ENTRIES_PER_CTX_THOR) /
2154418919fSjohnjiang 				    BNXT_RSS_ENTRIES_PER_CTX_THOR;
2164418919fSjohnjiang }
2174418919fSjohnjiang 
bnxt_rss_hash_tbl_size(const struct bnxt * bp)218*2d9fd380Sjfb8856606 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
2194418919fSjohnjiang {
2204418919fSjohnjiang 	if (!BNXT_CHIP_THOR(bp))
2214418919fSjohnjiang 		return HW_HASH_INDEX_SIZE;
2224418919fSjohnjiang 
2234418919fSjohnjiang 	return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
2244418919fSjohnjiang }
2254418919fSjohnjiang 
bnxt_free_parent_info(struct bnxt * bp)226*2d9fd380Sjfb8856606 static void bnxt_free_parent_info(struct bnxt *bp)
227*2d9fd380Sjfb8856606 {
228*2d9fd380Sjfb8856606 	rte_free(bp->parent);
229*2d9fd380Sjfb8856606 }
230*2d9fd380Sjfb8856606 
bnxt_free_pf_info(struct bnxt * bp)231*2d9fd380Sjfb8856606 static void bnxt_free_pf_info(struct bnxt *bp)
232*2d9fd380Sjfb8856606 {
233*2d9fd380Sjfb8856606 	rte_free(bp->pf);
234*2d9fd380Sjfb8856606 }
235*2d9fd380Sjfb8856606 
bnxt_free_link_info(struct bnxt * bp)236*2d9fd380Sjfb8856606 static void bnxt_free_link_info(struct bnxt *bp)
237*2d9fd380Sjfb8856606 {
238*2d9fd380Sjfb8856606 	rte_free(bp->link_info);
239*2d9fd380Sjfb8856606 }
240*2d9fd380Sjfb8856606 
bnxt_free_leds_info(struct bnxt * bp)241*2d9fd380Sjfb8856606 static void bnxt_free_leds_info(struct bnxt *bp)
242*2d9fd380Sjfb8856606 {
243*2d9fd380Sjfb8856606 	if (BNXT_VF(bp))
244*2d9fd380Sjfb8856606 		return;
245*2d9fd380Sjfb8856606 
246*2d9fd380Sjfb8856606 	rte_free(bp->leds);
247*2d9fd380Sjfb8856606 	bp->leds = NULL;
248*2d9fd380Sjfb8856606 }
249*2d9fd380Sjfb8856606 
bnxt_free_flow_stats_info(struct bnxt * bp)250*2d9fd380Sjfb8856606 static void bnxt_free_flow_stats_info(struct bnxt *bp)
251*2d9fd380Sjfb8856606 {
252*2d9fd380Sjfb8856606 	rte_free(bp->flow_stat);
253*2d9fd380Sjfb8856606 	bp->flow_stat = NULL;
254*2d9fd380Sjfb8856606 }
255*2d9fd380Sjfb8856606 
bnxt_free_cos_queues(struct bnxt * bp)256*2d9fd380Sjfb8856606 static void bnxt_free_cos_queues(struct bnxt *bp)
257*2d9fd380Sjfb8856606 {
258*2d9fd380Sjfb8856606 	rte_free(bp->rx_cos_queue);
259*2d9fd380Sjfb8856606 	rte_free(bp->tx_cos_queue);
260*2d9fd380Sjfb8856606 }
261*2d9fd380Sjfb8856606 
bnxt_free_mem(struct bnxt * bp,bool reconfig)2624418919fSjohnjiang static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
263a9643ea8Slogwang {
264a9643ea8Slogwang 	bnxt_free_filter_mem(bp);
265a9643ea8Slogwang 	bnxt_free_vnic_attributes(bp);
266a9643ea8Slogwang 	bnxt_free_vnic_mem(bp);
267a9643ea8Slogwang 
2684418919fSjohnjiang 	/* tx/rx rings are configured as part of *_queue_setup callbacks.
2694418919fSjohnjiang 	 * If the number of rings change across fw update,
2704418919fSjohnjiang 	 * we don't have much choice except to warn the user.
2714418919fSjohnjiang 	 */
2724418919fSjohnjiang 	if (!reconfig) {
273a9643ea8Slogwang 		bnxt_free_stats(bp);
274a9643ea8Slogwang 		bnxt_free_tx_rings(bp);
275a9643ea8Slogwang 		bnxt_free_rx_rings(bp);
276a9643ea8Slogwang 	}
2774418919fSjohnjiang 	bnxt_free_async_cp_ring(bp);
2784418919fSjohnjiang 	bnxt_free_rxtx_nq_ring(bp);
279a9643ea8Slogwang 
2804418919fSjohnjiang 	rte_free(bp->grp_info);
2814418919fSjohnjiang 	bp->grp_info = NULL;
2824418919fSjohnjiang }
2834418919fSjohnjiang 
bnxt_alloc_parent_info(struct bnxt * bp)284*2d9fd380Sjfb8856606 static int bnxt_alloc_parent_info(struct bnxt *bp)
285*2d9fd380Sjfb8856606 {
286*2d9fd380Sjfb8856606 	bp->parent = rte_zmalloc("bnxt_parent_info",
287*2d9fd380Sjfb8856606 				 sizeof(struct bnxt_parent_info), 0);
288*2d9fd380Sjfb8856606 	if (bp->parent == NULL)
289*2d9fd380Sjfb8856606 		return -ENOMEM;
290*2d9fd380Sjfb8856606 
291*2d9fd380Sjfb8856606 	return 0;
292*2d9fd380Sjfb8856606 }
293*2d9fd380Sjfb8856606 
bnxt_alloc_pf_info(struct bnxt * bp)294*2d9fd380Sjfb8856606 static int bnxt_alloc_pf_info(struct bnxt *bp)
295*2d9fd380Sjfb8856606 {
296*2d9fd380Sjfb8856606 	bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0);
297*2d9fd380Sjfb8856606 	if (bp->pf == NULL)
298*2d9fd380Sjfb8856606 		return -ENOMEM;
299*2d9fd380Sjfb8856606 
300*2d9fd380Sjfb8856606 	return 0;
301*2d9fd380Sjfb8856606 }
302*2d9fd380Sjfb8856606 
bnxt_alloc_link_info(struct bnxt * bp)303*2d9fd380Sjfb8856606 static int bnxt_alloc_link_info(struct bnxt *bp)
304*2d9fd380Sjfb8856606 {
305*2d9fd380Sjfb8856606 	bp->link_info =
306*2d9fd380Sjfb8856606 		rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0);
307*2d9fd380Sjfb8856606 	if (bp->link_info == NULL)
308*2d9fd380Sjfb8856606 		return -ENOMEM;
309*2d9fd380Sjfb8856606 
310*2d9fd380Sjfb8856606 	return 0;
311*2d9fd380Sjfb8856606 }
312*2d9fd380Sjfb8856606 
bnxt_alloc_leds_info(struct bnxt * bp)313*2d9fd380Sjfb8856606 static int bnxt_alloc_leds_info(struct bnxt *bp)
314*2d9fd380Sjfb8856606 {
315*2d9fd380Sjfb8856606 	if (BNXT_VF(bp))
316*2d9fd380Sjfb8856606 		return 0;
317*2d9fd380Sjfb8856606 
318*2d9fd380Sjfb8856606 	bp->leds = rte_zmalloc("bnxt_leds",
319*2d9fd380Sjfb8856606 			       BNXT_MAX_LED * sizeof(struct bnxt_led_info),
320*2d9fd380Sjfb8856606 			       0);
321*2d9fd380Sjfb8856606 	if (bp->leds == NULL)
322*2d9fd380Sjfb8856606 		return -ENOMEM;
323*2d9fd380Sjfb8856606 
324*2d9fd380Sjfb8856606 	return 0;
325*2d9fd380Sjfb8856606 }
326*2d9fd380Sjfb8856606 
bnxt_alloc_cos_queues(struct bnxt * bp)327*2d9fd380Sjfb8856606 static int bnxt_alloc_cos_queues(struct bnxt *bp)
328*2d9fd380Sjfb8856606 {
329*2d9fd380Sjfb8856606 	bp->rx_cos_queue =
330*2d9fd380Sjfb8856606 		rte_zmalloc("bnxt_rx_cosq",
331*2d9fd380Sjfb8856606 			    BNXT_COS_QUEUE_COUNT *
332*2d9fd380Sjfb8856606 			    sizeof(struct bnxt_cos_queue_info),
333*2d9fd380Sjfb8856606 			    0);
334*2d9fd380Sjfb8856606 	if (bp->rx_cos_queue == NULL)
335*2d9fd380Sjfb8856606 		return -ENOMEM;
336*2d9fd380Sjfb8856606 
337*2d9fd380Sjfb8856606 	bp->tx_cos_queue =
338*2d9fd380Sjfb8856606 		rte_zmalloc("bnxt_tx_cosq",
339*2d9fd380Sjfb8856606 			    BNXT_COS_QUEUE_COUNT *
340*2d9fd380Sjfb8856606 			    sizeof(struct bnxt_cos_queue_info),
341*2d9fd380Sjfb8856606 			    0);
342*2d9fd380Sjfb8856606 	if (bp->tx_cos_queue == NULL)
343*2d9fd380Sjfb8856606 		return -ENOMEM;
344*2d9fd380Sjfb8856606 
345*2d9fd380Sjfb8856606 	return 0;
346*2d9fd380Sjfb8856606 }
347*2d9fd380Sjfb8856606 
bnxt_alloc_flow_stats_info(struct bnxt * bp)348*2d9fd380Sjfb8856606 static int bnxt_alloc_flow_stats_info(struct bnxt *bp)
349*2d9fd380Sjfb8856606 {
350*2d9fd380Sjfb8856606 	bp->flow_stat = rte_zmalloc("bnxt_flow_xstat",
351*2d9fd380Sjfb8856606 				    sizeof(struct bnxt_flow_stat_info), 0);
352*2d9fd380Sjfb8856606 	if (bp->flow_stat == NULL)
353*2d9fd380Sjfb8856606 		return -ENOMEM;
354*2d9fd380Sjfb8856606 
355*2d9fd380Sjfb8856606 	return 0;
356*2d9fd380Sjfb8856606 }
357*2d9fd380Sjfb8856606 
bnxt_alloc_mem(struct bnxt * bp,bool reconfig)3584418919fSjohnjiang static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
359a9643ea8Slogwang {
360a9643ea8Slogwang 	int rc;
361a9643ea8Slogwang 
3624418919fSjohnjiang 	rc = bnxt_alloc_ring_grps(bp);
3634418919fSjohnjiang 	if (rc)
3644418919fSjohnjiang 		goto alloc_mem_err;
3654418919fSjohnjiang 
3664418919fSjohnjiang 	rc = bnxt_alloc_async_ring_struct(bp);
3674418919fSjohnjiang 	if (rc)
3684418919fSjohnjiang 		goto alloc_mem_err;
3694418919fSjohnjiang 
370a9643ea8Slogwang 	rc = bnxt_alloc_vnic_mem(bp);
371a9643ea8Slogwang 	if (rc)
372a9643ea8Slogwang 		goto alloc_mem_err;
373a9643ea8Slogwang 
374a9643ea8Slogwang 	rc = bnxt_alloc_vnic_attributes(bp);
375a9643ea8Slogwang 	if (rc)
376a9643ea8Slogwang 		goto alloc_mem_err;
377a9643ea8Slogwang 
378a9643ea8Slogwang 	rc = bnxt_alloc_filter_mem(bp);
379a9643ea8Slogwang 	if (rc)
380a9643ea8Slogwang 		goto alloc_mem_err;
381a9643ea8Slogwang 
3824418919fSjohnjiang 	rc = bnxt_alloc_async_cp_ring(bp);
3834418919fSjohnjiang 	if (rc)
3844418919fSjohnjiang 		goto alloc_mem_err;
3854418919fSjohnjiang 
3864418919fSjohnjiang 	rc = bnxt_alloc_rxtx_nq_ring(bp);
3874418919fSjohnjiang 	if (rc)
3884418919fSjohnjiang 		goto alloc_mem_err;
3894418919fSjohnjiang 
390*2d9fd380Sjfb8856606 	if (BNXT_FLOW_XSTATS_EN(bp)) {
391*2d9fd380Sjfb8856606 		rc = bnxt_alloc_flow_stats_info(bp);
392*2d9fd380Sjfb8856606 		if (rc)
393*2d9fd380Sjfb8856606 			goto alloc_mem_err;
394*2d9fd380Sjfb8856606 	}
395*2d9fd380Sjfb8856606 
396a9643ea8Slogwang 	return 0;
397a9643ea8Slogwang 
398a9643ea8Slogwang alloc_mem_err:
3994418919fSjohnjiang 	bnxt_free_mem(bp, reconfig);
4004418919fSjohnjiang 	return rc;
4014418919fSjohnjiang }
4024418919fSjohnjiang 
bnxt_setup_one_vnic(struct bnxt * bp,uint16_t vnic_id)4034418919fSjohnjiang static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
4044418919fSjohnjiang {
4054418919fSjohnjiang 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
4064418919fSjohnjiang 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4074418919fSjohnjiang 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
4084418919fSjohnjiang 	struct bnxt_rx_queue *rxq;
4094418919fSjohnjiang 	unsigned int j;
4104418919fSjohnjiang 	int rc;
4114418919fSjohnjiang 
4124418919fSjohnjiang 	rc = bnxt_vnic_grp_alloc(bp, vnic);
4134418919fSjohnjiang 	if (rc)
4144418919fSjohnjiang 		goto err_out;
4154418919fSjohnjiang 
4164418919fSjohnjiang 	PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
4174418919fSjohnjiang 		    vnic_id, vnic, vnic->fw_grp_ids);
4184418919fSjohnjiang 
4194418919fSjohnjiang 	rc = bnxt_hwrm_vnic_alloc(bp, vnic);
4204418919fSjohnjiang 	if (rc)
4214418919fSjohnjiang 		goto err_out;
4224418919fSjohnjiang 
4234418919fSjohnjiang 	/* Alloc RSS context only if RSS mode is enabled */
4244418919fSjohnjiang 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
4254418919fSjohnjiang 		int j, nr_ctxs = bnxt_rss_ctxts(bp);
4264418919fSjohnjiang 
4274418919fSjohnjiang 		rc = 0;
4284418919fSjohnjiang 		for (j = 0; j < nr_ctxs; j++) {
4294418919fSjohnjiang 			rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
4304418919fSjohnjiang 			if (rc)
4314418919fSjohnjiang 				break;
4324418919fSjohnjiang 		}
4334418919fSjohnjiang 		if (rc) {
4344418919fSjohnjiang 			PMD_DRV_LOG(ERR,
4354418919fSjohnjiang 				    "HWRM vnic %d ctx %d alloc failure rc: %x\n",
4364418919fSjohnjiang 				    vnic_id, j, rc);
4374418919fSjohnjiang 			goto err_out;
4384418919fSjohnjiang 		}
4394418919fSjohnjiang 		vnic->num_lb_ctxts = nr_ctxs;
4404418919fSjohnjiang 	}
4414418919fSjohnjiang 
4424418919fSjohnjiang 	/*
4434418919fSjohnjiang 	 * Firmware sets pf pair in default vnic cfg. If the VLAN strip
4444418919fSjohnjiang 	 * setting is not available at this time, it will not be
4454418919fSjohnjiang 	 * configured correctly in the CFA.
4464418919fSjohnjiang 	 */
4474418919fSjohnjiang 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4484418919fSjohnjiang 		vnic->vlan_strip = true;
4494418919fSjohnjiang 	else
4504418919fSjohnjiang 		vnic->vlan_strip = false;
4514418919fSjohnjiang 
4524418919fSjohnjiang 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
4534418919fSjohnjiang 	if (rc)
4544418919fSjohnjiang 		goto err_out;
4554418919fSjohnjiang 
4564418919fSjohnjiang 	rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
4574418919fSjohnjiang 	if (rc)
4584418919fSjohnjiang 		goto err_out;
4594418919fSjohnjiang 
4604418919fSjohnjiang 	for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
4614418919fSjohnjiang 		rxq = bp->eth_dev->data->rx_queues[j];
4624418919fSjohnjiang 
4634418919fSjohnjiang 		PMD_DRV_LOG(DEBUG,
4644418919fSjohnjiang 			    "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
4654418919fSjohnjiang 			    j, rxq->vnic, rxq->vnic->fw_grp_ids);
4664418919fSjohnjiang 
4674418919fSjohnjiang 		if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
4684418919fSjohnjiang 			rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
4690c6bd470Sfengbojiang 		else
4700c6bd470Sfengbojiang 			vnic->rx_queue_cnt++;
4714418919fSjohnjiang 	}
4724418919fSjohnjiang 
4730c6bd470Sfengbojiang 	PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
4740c6bd470Sfengbojiang 
4754418919fSjohnjiang 	rc = bnxt_vnic_rss_configure(bp, vnic);
4764418919fSjohnjiang 	if (rc)
4774418919fSjohnjiang 		goto err_out;
4784418919fSjohnjiang 
4794418919fSjohnjiang 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
4804418919fSjohnjiang 
4814418919fSjohnjiang 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
4824418919fSjohnjiang 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
4834418919fSjohnjiang 	else
4844418919fSjohnjiang 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
4854418919fSjohnjiang 
4864418919fSjohnjiang 	return 0;
4874418919fSjohnjiang err_out:
4884418919fSjohnjiang 	PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
4894418919fSjohnjiang 		    vnic_id, rc);
490a9643ea8Slogwang 	return rc;
491a9643ea8Slogwang }
492a9643ea8Slogwang 
bnxt_register_fc_ctx_mem(struct bnxt * bp)493*2d9fd380Sjfb8856606 static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
494*2d9fd380Sjfb8856606 {
495*2d9fd380Sjfb8856606 	int rc = 0;
496*2d9fd380Sjfb8856606 
497*2d9fd380Sjfb8856606 	rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma,
498*2d9fd380Sjfb8856606 				&bp->flow_stat->rx_fc_in_tbl.ctx_id);
499*2d9fd380Sjfb8856606 	if (rc)
500*2d9fd380Sjfb8856606 		return rc;
501*2d9fd380Sjfb8856606 
502*2d9fd380Sjfb8856606 	PMD_DRV_LOG(DEBUG,
503*2d9fd380Sjfb8856606 		    "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
504*2d9fd380Sjfb8856606 		    " rx_fc_in_tbl.ctx_id = %d\n",
505*2d9fd380Sjfb8856606 		    bp->flow_stat->rx_fc_in_tbl.va,
506*2d9fd380Sjfb8856606 		    (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma),
507*2d9fd380Sjfb8856606 		    bp->flow_stat->rx_fc_in_tbl.ctx_id);
508*2d9fd380Sjfb8856606 
509*2d9fd380Sjfb8856606 	rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma,
510*2d9fd380Sjfb8856606 				&bp->flow_stat->rx_fc_out_tbl.ctx_id);
511*2d9fd380Sjfb8856606 	if (rc)
512*2d9fd380Sjfb8856606 		return rc;
513*2d9fd380Sjfb8856606 
514*2d9fd380Sjfb8856606 	PMD_DRV_LOG(DEBUG,
515*2d9fd380Sjfb8856606 		    "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
516*2d9fd380Sjfb8856606 		    " rx_fc_out_tbl.ctx_id = %d\n",
517*2d9fd380Sjfb8856606 		    bp->flow_stat->rx_fc_out_tbl.va,
518*2d9fd380Sjfb8856606 		    (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma),
519*2d9fd380Sjfb8856606 		    bp->flow_stat->rx_fc_out_tbl.ctx_id);
520*2d9fd380Sjfb8856606 
521*2d9fd380Sjfb8856606 	rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma,
522*2d9fd380Sjfb8856606 				&bp->flow_stat->tx_fc_in_tbl.ctx_id);
523*2d9fd380Sjfb8856606 	if (rc)
524*2d9fd380Sjfb8856606 		return rc;
525*2d9fd380Sjfb8856606 
526*2d9fd380Sjfb8856606 	PMD_DRV_LOG(DEBUG,
527*2d9fd380Sjfb8856606 		    "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
528*2d9fd380Sjfb8856606 		    " tx_fc_in_tbl.ctx_id = %d\n",
529*2d9fd380Sjfb8856606 		    bp->flow_stat->tx_fc_in_tbl.va,
530*2d9fd380Sjfb8856606 		    (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma),
531*2d9fd380Sjfb8856606 		    bp->flow_stat->tx_fc_in_tbl.ctx_id);
532*2d9fd380Sjfb8856606 
533*2d9fd380Sjfb8856606 	rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma,
534*2d9fd380Sjfb8856606 				&bp->flow_stat->tx_fc_out_tbl.ctx_id);
535*2d9fd380Sjfb8856606 	if (rc)
536*2d9fd380Sjfb8856606 		return rc;
537*2d9fd380Sjfb8856606 
538*2d9fd380Sjfb8856606 	PMD_DRV_LOG(DEBUG,
539*2d9fd380Sjfb8856606 		    "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
540*2d9fd380Sjfb8856606 		    " tx_fc_out_tbl.ctx_id = %d\n",
541*2d9fd380Sjfb8856606 		    bp->flow_stat->tx_fc_out_tbl.va,
542*2d9fd380Sjfb8856606 		    (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma),
543*2d9fd380Sjfb8856606 		    bp->flow_stat->tx_fc_out_tbl.ctx_id);
544*2d9fd380Sjfb8856606 
545*2d9fd380Sjfb8856606 	memset(bp->flow_stat->rx_fc_out_tbl.va,
546*2d9fd380Sjfb8856606 	       0,
547*2d9fd380Sjfb8856606 	       bp->flow_stat->rx_fc_out_tbl.size);
548*2d9fd380Sjfb8856606 	rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
549*2d9fd380Sjfb8856606 				       CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
550*2d9fd380Sjfb8856606 				       bp->flow_stat->rx_fc_out_tbl.ctx_id,
551*2d9fd380Sjfb8856606 				       bp->flow_stat->max_fc,
552*2d9fd380Sjfb8856606 				       true);
553*2d9fd380Sjfb8856606 	if (rc)
554*2d9fd380Sjfb8856606 		return rc;
555*2d9fd380Sjfb8856606 
556*2d9fd380Sjfb8856606 	memset(bp->flow_stat->tx_fc_out_tbl.va,
557*2d9fd380Sjfb8856606 	       0,
558*2d9fd380Sjfb8856606 	       bp->flow_stat->tx_fc_out_tbl.size);
559*2d9fd380Sjfb8856606 	rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
560*2d9fd380Sjfb8856606 				       CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
561*2d9fd380Sjfb8856606 				       bp->flow_stat->tx_fc_out_tbl.ctx_id,
562*2d9fd380Sjfb8856606 				       bp->flow_stat->max_fc,
563*2d9fd380Sjfb8856606 				       true);
564*2d9fd380Sjfb8856606 
565*2d9fd380Sjfb8856606 	return rc;
566*2d9fd380Sjfb8856606 }
567*2d9fd380Sjfb8856606 
bnxt_alloc_ctx_mem_buf(char * type,size_t size,struct bnxt_ctx_mem_buf_info * ctx)568*2d9fd380Sjfb8856606 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
569*2d9fd380Sjfb8856606 				  struct bnxt_ctx_mem_buf_info *ctx)
570*2d9fd380Sjfb8856606 {
571*2d9fd380Sjfb8856606 	if (!ctx)
572*2d9fd380Sjfb8856606 		return -EINVAL;
573*2d9fd380Sjfb8856606 
574*2d9fd380Sjfb8856606 	ctx->va = rte_zmalloc(type, size, 0);
575*2d9fd380Sjfb8856606 	if (ctx->va == NULL)
576*2d9fd380Sjfb8856606 		return -ENOMEM;
577*2d9fd380Sjfb8856606 	rte_mem_lock_page(ctx->va);
578*2d9fd380Sjfb8856606 	ctx->size = size;
579*2d9fd380Sjfb8856606 	ctx->dma = rte_mem_virt2iova(ctx->va);
580*2d9fd380Sjfb8856606 	if (ctx->dma == RTE_BAD_IOVA)
581*2d9fd380Sjfb8856606 		return -ENOMEM;
582*2d9fd380Sjfb8856606 
583*2d9fd380Sjfb8856606 	return 0;
584*2d9fd380Sjfb8856606 }
585*2d9fd380Sjfb8856606 
bnxt_init_fc_ctx_mem(struct bnxt * bp)586*2d9fd380Sjfb8856606 static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
587*2d9fd380Sjfb8856606 {
588*2d9fd380Sjfb8856606 	struct rte_pci_device *pdev = bp->pdev;
589*2d9fd380Sjfb8856606 	char type[RTE_MEMZONE_NAMESIZE];
590*2d9fd380Sjfb8856606 	uint16_t max_fc;
591*2d9fd380Sjfb8856606 	int rc = 0;
592*2d9fd380Sjfb8856606 
593*2d9fd380Sjfb8856606 	max_fc = bp->flow_stat->max_fc;
594*2d9fd380Sjfb8856606 
595*2d9fd380Sjfb8856606 	sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
596*2d9fd380Sjfb8856606 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
597*2d9fd380Sjfb8856606 	/* 4 bytes for each counter-id */
598*2d9fd380Sjfb8856606 	rc = bnxt_alloc_ctx_mem_buf(type,
599*2d9fd380Sjfb8856606 				    max_fc * 4,
600*2d9fd380Sjfb8856606 				    &bp->flow_stat->rx_fc_in_tbl);
601*2d9fd380Sjfb8856606 	if (rc)
602*2d9fd380Sjfb8856606 		return rc;
603*2d9fd380Sjfb8856606 
604*2d9fd380Sjfb8856606 	sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
605*2d9fd380Sjfb8856606 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
606*2d9fd380Sjfb8856606 	/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
607*2d9fd380Sjfb8856606 	rc = bnxt_alloc_ctx_mem_buf(type,
608*2d9fd380Sjfb8856606 				    max_fc * 16,
609*2d9fd380Sjfb8856606 				    &bp->flow_stat->rx_fc_out_tbl);
610*2d9fd380Sjfb8856606 	if (rc)
611*2d9fd380Sjfb8856606 		return rc;
612*2d9fd380Sjfb8856606 
613*2d9fd380Sjfb8856606 	sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
614*2d9fd380Sjfb8856606 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
615*2d9fd380Sjfb8856606 	/* 4 bytes for each counter-id */
616*2d9fd380Sjfb8856606 	rc = bnxt_alloc_ctx_mem_buf(type,
617*2d9fd380Sjfb8856606 				    max_fc * 4,
618*2d9fd380Sjfb8856606 				    &bp->flow_stat->tx_fc_in_tbl);
619*2d9fd380Sjfb8856606 	if (rc)
620*2d9fd380Sjfb8856606 		return rc;
621*2d9fd380Sjfb8856606 
622*2d9fd380Sjfb8856606 	sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
623*2d9fd380Sjfb8856606 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
624*2d9fd380Sjfb8856606 	/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
625*2d9fd380Sjfb8856606 	rc = bnxt_alloc_ctx_mem_buf(type,
626*2d9fd380Sjfb8856606 				    max_fc * 16,
627*2d9fd380Sjfb8856606 				    &bp->flow_stat->tx_fc_out_tbl);
628*2d9fd380Sjfb8856606 	if (rc)
629*2d9fd380Sjfb8856606 		return rc;
630*2d9fd380Sjfb8856606 
631*2d9fd380Sjfb8856606 	rc = bnxt_register_fc_ctx_mem(bp);
632*2d9fd380Sjfb8856606 
633*2d9fd380Sjfb8856606 	return rc;
634*2d9fd380Sjfb8856606 }
635*2d9fd380Sjfb8856606 
bnxt_init_ctx_mem(struct bnxt * bp)636*2d9fd380Sjfb8856606 static int bnxt_init_ctx_mem(struct bnxt *bp)
637*2d9fd380Sjfb8856606 {
638*2d9fd380Sjfb8856606 	int rc = 0;
639*2d9fd380Sjfb8856606 
640*2d9fd380Sjfb8856606 	if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) ||
641*2d9fd380Sjfb8856606 	    !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) ||
642*2d9fd380Sjfb8856606 	    !BNXT_FLOW_XSTATS_EN(bp))
643*2d9fd380Sjfb8856606 		return 0;
644*2d9fd380Sjfb8856606 
645*2d9fd380Sjfb8856606 	rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc);
646*2d9fd380Sjfb8856606 	if (rc)
647*2d9fd380Sjfb8856606 		return rc;
648*2d9fd380Sjfb8856606 
649*2d9fd380Sjfb8856606 	rc = bnxt_init_fc_ctx_mem(bp);
650*2d9fd380Sjfb8856606 
651*2d9fd380Sjfb8856606 	return rc;
652*2d9fd380Sjfb8856606 }
653*2d9fd380Sjfb8856606 
bnxt_update_phy_setting(struct bnxt * bp)654*2d9fd380Sjfb8856606 static int bnxt_update_phy_setting(struct bnxt *bp)
655a9643ea8Slogwang {
6562bfe3f2eSlogwang 	struct rte_eth_link new;
657*2d9fd380Sjfb8856606 	int rc;
658*2d9fd380Sjfb8856606 
659*2d9fd380Sjfb8856606 	rc = bnxt_get_hwrm_link_config(bp, &new);
660*2d9fd380Sjfb8856606 	if (rc) {
661*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Failed to get link settings\n");
662*2d9fd380Sjfb8856606 		return rc;
663*2d9fd380Sjfb8856606 	}
664*2d9fd380Sjfb8856606 
665*2d9fd380Sjfb8856606 	/*
666*2d9fd380Sjfb8856606 	 * On BCM957508-N2100 adapters, FW will not allow any user other
667*2d9fd380Sjfb8856606 	 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call
668*2d9fd380Sjfb8856606 	 * always returns link up. Force phy update always in that case.
669*2d9fd380Sjfb8856606 	 */
670*2d9fd380Sjfb8856606 	if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) {
671*2d9fd380Sjfb8856606 		rc = bnxt_set_hwrm_link_config(bp, true);
672*2d9fd380Sjfb8856606 		if (rc) {
673*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR, "Failed to update PHY settings\n");
674*2d9fd380Sjfb8856606 			return rc;
675*2d9fd380Sjfb8856606 		}
676*2d9fd380Sjfb8856606 	}
677*2d9fd380Sjfb8856606 
678*2d9fd380Sjfb8856606 	return rc;
679*2d9fd380Sjfb8856606 }
680*2d9fd380Sjfb8856606 
bnxt_init_chip(struct bnxt * bp)681*2d9fd380Sjfb8856606 static int bnxt_init_chip(struct bnxt *bp)
682*2d9fd380Sjfb8856606 {
6832bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
6842bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
6852bfe3f2eSlogwang 	uint32_t intr_vector = 0;
6862bfe3f2eSlogwang 	uint32_t queue_id, base = BNXT_MISC_VEC_ID;
6872bfe3f2eSlogwang 	uint32_t vec = BNXT_MISC_VEC_ID;
688d30ea906Sjfb8856606 	unsigned int i, j;
689a9643ea8Slogwang 	int rc;
690a9643ea8Slogwang 
6914418919fSjohnjiang 	if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
692d30ea906Sjfb8856606 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
693d30ea906Sjfb8856606 			DEV_RX_OFFLOAD_JUMBO_FRAME;
6942bfe3f2eSlogwang 		bp->flags |= BNXT_FLAG_JUMBO;
6952bfe3f2eSlogwang 	} else {
696d30ea906Sjfb8856606 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
697d30ea906Sjfb8856606 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
6982bfe3f2eSlogwang 		bp->flags &= ~BNXT_FLAG_JUMBO;
6992bfe3f2eSlogwang 	}
7002bfe3f2eSlogwang 
7014418919fSjohnjiang 	/* THOR does not support ring groups.
7024418919fSjohnjiang 	 * But we will use the array to save RSS context IDs.
7034418919fSjohnjiang 	 */
7044418919fSjohnjiang 	if (BNXT_CHIP_THOR(bp))
7054418919fSjohnjiang 		bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
7064418919fSjohnjiang 
707a9643ea8Slogwang 	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
708a9643ea8Slogwang 	if (rc) {
709d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
710a9643ea8Slogwang 		goto err_out;
711a9643ea8Slogwang 	}
712a9643ea8Slogwang 
713a9643ea8Slogwang 	rc = bnxt_alloc_hwrm_rings(bp);
714a9643ea8Slogwang 	if (rc) {
715d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
716a9643ea8Slogwang 		goto err_out;
717a9643ea8Slogwang 	}
718a9643ea8Slogwang 
719a9643ea8Slogwang 	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
720a9643ea8Slogwang 	if (rc) {
721d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
722a9643ea8Slogwang 		goto err_out;
723a9643ea8Slogwang 	}
724a9643ea8Slogwang 
7254418919fSjohnjiang 	if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
7264418919fSjohnjiang 		goto skip_cosq_cfg;
7274418919fSjohnjiang 
7284418919fSjohnjiang 	for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
7294418919fSjohnjiang 		if (bp->rx_cos_queue[i].id != 0xff) {
7304418919fSjohnjiang 			struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
7314418919fSjohnjiang 
7324418919fSjohnjiang 			if (!vnic) {
7334418919fSjohnjiang 				PMD_DRV_LOG(ERR,
7344418919fSjohnjiang 					    "Num pools more than FW profile\n");
7354418919fSjohnjiang 				rc = -EINVAL;
7364418919fSjohnjiang 				goto err_out;
7374418919fSjohnjiang 			}
7384418919fSjohnjiang 			vnic->cos_queue_id = bp->rx_cos_queue[i].id;
7394418919fSjohnjiang 			bp->rx_cosq_cnt++;
7404418919fSjohnjiang 		}
7414418919fSjohnjiang 	}
7424418919fSjohnjiang 
7434418919fSjohnjiang skip_cosq_cfg:
744a9643ea8Slogwang 	rc = bnxt_mq_rx_configure(bp);
745a9643ea8Slogwang 	if (rc) {
746d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
747a9643ea8Slogwang 		goto err_out;
748a9643ea8Slogwang 	}
749a9643ea8Slogwang 
750a9643ea8Slogwang 	/* VNIC configuration */
751a9643ea8Slogwang 	for (i = 0; i < bp->nr_vnics; i++) {
7524418919fSjohnjiang 		rc = bnxt_setup_one_vnic(bp, i);
7534418919fSjohnjiang 		if (rc)
754a9643ea8Slogwang 			goto err_out;
755a9643ea8Slogwang 	}
756a9643ea8Slogwang 
7572bfe3f2eSlogwang 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
758a9643ea8Slogwang 	if (rc) {
759d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR,
760a9643ea8Slogwang 			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
761a9643ea8Slogwang 		goto err_out;
762a9643ea8Slogwang 	}
763a9643ea8Slogwang 
7642bfe3f2eSlogwang 	/* check and configure queue intr-vector mapping */
7652bfe3f2eSlogwang 	if ((rte_intr_cap_multiple(intr_handle) ||
7662bfe3f2eSlogwang 	     !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
7672bfe3f2eSlogwang 	    bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
7682bfe3f2eSlogwang 		intr_vector = bp->eth_dev->data->nb_rx_queues;
769d30ea906Sjfb8856606 		PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
7702bfe3f2eSlogwang 		if (intr_vector > bp->rx_cp_nr_rings) {
771d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
7722bfe3f2eSlogwang 					bp->rx_cp_nr_rings);
7732bfe3f2eSlogwang 			return -ENOTSUP;
7742bfe3f2eSlogwang 		}
7754b05018fSfengbojiang 		rc = rte_intr_efd_enable(intr_handle, intr_vector);
7764b05018fSfengbojiang 		if (rc)
7774b05018fSfengbojiang 			return rc;
7782bfe3f2eSlogwang 	}
7792bfe3f2eSlogwang 
7802bfe3f2eSlogwang 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
7812bfe3f2eSlogwang 		intr_handle->intr_vec =
7822bfe3f2eSlogwang 			rte_zmalloc("intr_vec",
7832bfe3f2eSlogwang 				    bp->eth_dev->data->nb_rx_queues *
7842bfe3f2eSlogwang 				    sizeof(int), 0);
7852bfe3f2eSlogwang 		if (intr_handle->intr_vec == NULL) {
786d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
7872bfe3f2eSlogwang 				" intr_vec", bp->eth_dev->data->nb_rx_queues);
7884b05018fSfengbojiang 			rc = -ENOMEM;
7894b05018fSfengbojiang 			goto err_disable;
7902bfe3f2eSlogwang 		}
791d30ea906Sjfb8856606 		PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
7922bfe3f2eSlogwang 			"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
793d30ea906Sjfb8856606 			 intr_handle->intr_vec, intr_handle->nb_efd,
7942bfe3f2eSlogwang 			intr_handle->max_intr);
7952bfe3f2eSlogwang 		for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
7962bfe3f2eSlogwang 		     queue_id++) {
7974b05018fSfengbojiang 			intr_handle->intr_vec[queue_id] =
7984b05018fSfengbojiang 							vec + BNXT_RX_VEC_START;
7992bfe3f2eSlogwang 			if (vec < base + intr_handle->nb_efd - 1)
8002bfe3f2eSlogwang 				vec++;
8012bfe3f2eSlogwang 		}
8024b05018fSfengbojiang 	}
8032bfe3f2eSlogwang 
8042bfe3f2eSlogwang 	/* enable uio/vfio intr/eventfd mapping */
8054b05018fSfengbojiang 	rc = rte_intr_enable(intr_handle);
8064418919fSjohnjiang #ifndef RTE_EXEC_ENV_FREEBSD
8074418919fSjohnjiang 	/* In FreeBSD OS, nic_uio driver does not support interrupts */
8084b05018fSfengbojiang 	if (rc)
8094b05018fSfengbojiang 		goto err_free;
8104418919fSjohnjiang #endif
8112bfe3f2eSlogwang 
812*2d9fd380Sjfb8856606 	rc = bnxt_update_phy_setting(bp);
813*2d9fd380Sjfb8856606 	if (rc)
8144b05018fSfengbojiang 		goto err_free;
8152bfe3f2eSlogwang 
816*2d9fd380Sjfb8856606 	bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
817*2d9fd380Sjfb8856606 	if (!bp->mark_table)
818*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
8192bfe3f2eSlogwang 
820a9643ea8Slogwang 	return 0;
821a9643ea8Slogwang 
8224b05018fSfengbojiang err_free:
8234b05018fSfengbojiang 	rte_free(intr_handle->intr_vec);
8244b05018fSfengbojiang err_disable:
8254b05018fSfengbojiang 	rte_intr_efd_disable(intr_handle);
826a9643ea8Slogwang err_out:
827d30ea906Sjfb8856606 	/* Some of the error status returned by FW may not be from errno.h */
828d30ea906Sjfb8856606 	if (rc > 0)
829d30ea906Sjfb8856606 		rc = -EIO;
830d30ea906Sjfb8856606 
831a9643ea8Slogwang 	return rc;
832a9643ea8Slogwang }
833a9643ea8Slogwang 
bnxt_shutdown_nic(struct bnxt * bp)834a9643ea8Slogwang static int bnxt_shutdown_nic(struct bnxt *bp)
835a9643ea8Slogwang {
836a9643ea8Slogwang 	bnxt_free_all_hwrm_resources(bp);
837a9643ea8Slogwang 	bnxt_free_all_filters(bp);
838a9643ea8Slogwang 	bnxt_free_all_vnics(bp);
839a9643ea8Slogwang 	return 0;
840a9643ea8Slogwang }
841a9643ea8Slogwang 
842a9643ea8Slogwang /*
843a9643ea8Slogwang  * Device configuration and status function
844a9643ea8Slogwang  */
845a9643ea8Slogwang 
bnxt_get_speed_capabilities(struct bnxt * bp)846*2d9fd380Sjfb8856606 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
847*2d9fd380Sjfb8856606 {
848*2d9fd380Sjfb8856606 	uint32_t link_speed = bp->link_info->support_speeds;
849*2d9fd380Sjfb8856606 	uint32_t speed_capa = 0;
850*2d9fd380Sjfb8856606 
851*2d9fd380Sjfb8856606 	/* If PAM4 is configured, use PAM4 supported speed */
852*2d9fd380Sjfb8856606 	if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0)
853*2d9fd380Sjfb8856606 		link_speed = bp->link_info->support_pam4_speeds;
854*2d9fd380Sjfb8856606 
855*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
856*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_100M;
857*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
858*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_100M_HD;
859*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
860*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_1G;
861*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
862*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_2_5G;
863*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
864*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_10G;
865*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
866*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_20G;
867*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
868*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_25G;
869*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
870*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_40G;
871*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
872*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_50G;
873*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
874*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_100G;
875*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
876*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_50G;
877*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
878*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_100G;
879*2d9fd380Sjfb8856606 	if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
880*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_200G;
881*2d9fd380Sjfb8856606 
882*2d9fd380Sjfb8856606 	if (bp->link_info->auto_mode ==
883*2d9fd380Sjfb8856606 	    HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
884*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_FIXED;
885*2d9fd380Sjfb8856606 	else
886*2d9fd380Sjfb8856606 		speed_capa |= ETH_LINK_SPEED_AUTONEG;
887*2d9fd380Sjfb8856606 
888*2d9fd380Sjfb8856606 	return speed_capa;
889*2d9fd380Sjfb8856606 }
890*2d9fd380Sjfb8856606 
bnxt_dev_info_get_op(struct rte_eth_dev * eth_dev,struct rte_eth_dev_info * dev_info)8914418919fSjohnjiang static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
892a9643ea8Slogwang 				struct rte_eth_dev_info *dev_info)
893a9643ea8Slogwang {
8944418919fSjohnjiang 	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
8954b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
896a9643ea8Slogwang 	uint16_t max_vnics, i, j, vpool, vrxq;
8972bfe3f2eSlogwang 	unsigned int max_rx_rings;
8984418919fSjohnjiang 	int rc;
8994418919fSjohnjiang 
9004418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
9014418919fSjohnjiang 	if (rc)
9024418919fSjohnjiang 		return rc;
9032bfe3f2eSlogwang 
904a9643ea8Slogwang 	/* MAC Specifics */
9052bfe3f2eSlogwang 	dev_info->max_mac_addrs = bp->max_l2_ctx;
906a9643ea8Slogwang 	dev_info->max_hash_mac_addrs = 0;
907a9643ea8Slogwang 
908a9643ea8Slogwang 	/* PF/VF specifics */
9092bfe3f2eSlogwang 	if (BNXT_PF(bp))
9104418919fSjohnjiang 		dev_info->max_vfs = pdev->max_vfs;
9114418919fSjohnjiang 
9124418919fSjohnjiang 	max_rx_rings = BNXT_MAX_RINGS(bp);
9132bfe3f2eSlogwang 	/* For the sake of symmetry, max_rx_queues = max_tx_queues */
9142bfe3f2eSlogwang 	dev_info->max_rx_queues = max_rx_rings;
9152bfe3f2eSlogwang 	dev_info->max_tx_queues = max_rx_rings;
9164418919fSjohnjiang 	dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
9172bfe3f2eSlogwang 	dev_info->hash_key_size = 40;
9182bfe3f2eSlogwang 	max_vnics = bp->max_vnics;
919a9643ea8Slogwang 
9204418919fSjohnjiang 	/* MTU specifics */
9214418919fSjohnjiang 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
9224418919fSjohnjiang 	dev_info->max_mtu = BNXT_MAX_MTU;
9234418919fSjohnjiang 
924a9643ea8Slogwang 	/* Fast path specifics */
925a9643ea8Slogwang 	dev_info->min_rx_bufsize = 1;
9264418919fSjohnjiang 	dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
927d30ea906Sjfb8856606 
928d30ea906Sjfb8856606 	dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
929d30ea906Sjfb8856606 	if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
930d30ea906Sjfb8856606 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
931*2d9fd380Sjfb8856606 	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
932*2d9fd380Sjfb8856606 	dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
933*2d9fd380Sjfb8856606 				    dev_info->tx_queue_offload_capa;
934d30ea906Sjfb8856606 	dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
935a9643ea8Slogwang 
936*2d9fd380Sjfb8856606 	dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
937*2d9fd380Sjfb8856606 
938a9643ea8Slogwang 	/* *INDENT-OFF* */
939a9643ea8Slogwang 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
940a9643ea8Slogwang 		.rx_thresh = {
941a9643ea8Slogwang 			.pthresh = 8,
942a9643ea8Slogwang 			.hthresh = 8,
943a9643ea8Slogwang 			.wthresh = 0,
944a9643ea8Slogwang 		},
945a9643ea8Slogwang 		.rx_free_thresh = 32,
9460c6bd470Sfengbojiang 		.rx_drop_en = BNXT_DEFAULT_RX_DROP_EN,
947a9643ea8Slogwang 	};
948a9643ea8Slogwang 
949a9643ea8Slogwang 	dev_info->default_txconf = (struct rte_eth_txconf) {
950a9643ea8Slogwang 		.tx_thresh = {
951a9643ea8Slogwang 			.pthresh = 32,
952a9643ea8Slogwang 			.hthresh = 0,
953a9643ea8Slogwang 			.wthresh = 0,
954a9643ea8Slogwang 		},
955a9643ea8Slogwang 		.tx_free_thresh = 32,
956a9643ea8Slogwang 		.tx_rs_thresh = 32,
957a9643ea8Slogwang 	};
9582bfe3f2eSlogwang 	eth_dev->data->dev_conf.intr_conf.lsc = 1;
9592bfe3f2eSlogwang 
9602bfe3f2eSlogwang 	eth_dev->data->dev_conf.intr_conf.rxq = 1;
961d30ea906Sjfb8856606 	dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
962d30ea906Sjfb8856606 	dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
963d30ea906Sjfb8856606 	dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
964d30ea906Sjfb8856606 	dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
9652bfe3f2eSlogwang 
966*2d9fd380Sjfb8856606 	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
967*2d9fd380Sjfb8856606 		dev_info->switch_info.name = eth_dev->device->name;
968*2d9fd380Sjfb8856606 		dev_info->switch_info.domain_id = bp->switch_domain_id;
969*2d9fd380Sjfb8856606 		dev_info->switch_info.port_id =
970*2d9fd380Sjfb8856606 				BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF :
971*2d9fd380Sjfb8856606 				    BNXT_SWITCH_PORT_ID_TRUSTED_VF;
972*2d9fd380Sjfb8856606 	}
973*2d9fd380Sjfb8856606 
974a9643ea8Slogwang 	/* *INDENT-ON* */
975a9643ea8Slogwang 
976a9643ea8Slogwang 	/*
977a9643ea8Slogwang 	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
978a9643ea8Slogwang 	 *       need further investigation.
979a9643ea8Slogwang 	 */
980a9643ea8Slogwang 
981a9643ea8Slogwang 	/* VMDq resources */
982a9643ea8Slogwang 	vpool = 64; /* ETH_64_POOLS */
983a9643ea8Slogwang 	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
984a9643ea8Slogwang 	for (i = 0; i < 4; vpool >>= 1, i++) {
985a9643ea8Slogwang 		if (max_vnics > vpool) {
986a9643ea8Slogwang 			for (j = 0; j < 5; vrxq >>= 1, j++) {
987a9643ea8Slogwang 				if (dev_info->max_rx_queues > vrxq) {
988a9643ea8Slogwang 					if (vpool > vrxq)
989a9643ea8Slogwang 						vpool = vrxq;
990a9643ea8Slogwang 					goto found;
991a9643ea8Slogwang 				}
992a9643ea8Slogwang 			}
993a9643ea8Slogwang 			/* Not enough resources to support VMDq */
994a9643ea8Slogwang 			break;
995a9643ea8Slogwang 		}
996a9643ea8Slogwang 	}
997a9643ea8Slogwang 	/* Not enough resources to support VMDq */
998a9643ea8Slogwang 	vpool = 0;
999a9643ea8Slogwang 	vrxq = 0;
1000a9643ea8Slogwang found:
1001a9643ea8Slogwang 	dev_info->max_vmdq_pools = vpool;
1002a9643ea8Slogwang 	dev_info->vmdq_queue_num = vrxq;
1003a9643ea8Slogwang 
1004a9643ea8Slogwang 	dev_info->vmdq_pool_base = 0;
1005a9643ea8Slogwang 	dev_info->vmdq_queue_base = 0;
10064418919fSjohnjiang 
10074418919fSjohnjiang 	return 0;
1008a9643ea8Slogwang }
1009a9643ea8Slogwang 
1010a9643ea8Slogwang /* Configure the device based on the configuration provided */
bnxt_dev_configure_op(struct rte_eth_dev * eth_dev)1011a9643ea8Slogwang static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
1012a9643ea8Slogwang {
10134b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1014d30ea906Sjfb8856606 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1015d30ea906Sjfb8856606 	int rc;
1016a9643ea8Slogwang 
1017a9643ea8Slogwang 	bp->rx_queues = (void *)eth_dev->data->rx_queues;
1018a9643ea8Slogwang 	bp->tx_queues = (void *)eth_dev->data->tx_queues;
1019d30ea906Sjfb8856606 	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
1020d30ea906Sjfb8856606 	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
1021d30ea906Sjfb8856606 
10224418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
10234418919fSjohnjiang 	if (rc)
10244418919fSjohnjiang 		return rc;
10254418919fSjohnjiang 
1026d30ea906Sjfb8856606 	if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
1027d30ea906Sjfb8856606 		rc = bnxt_hwrm_check_vf_rings(bp);
1028d30ea906Sjfb8856606 		if (rc) {
1029d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
1030d30ea906Sjfb8856606 			return -ENOSPC;
1031d30ea906Sjfb8856606 		}
1032d30ea906Sjfb8856606 
10334418919fSjohnjiang 		/* If a resource has already been allocated - in this case
10344418919fSjohnjiang 		 * it is the async completion ring, free it. Reallocate it after
10354418919fSjohnjiang 		 * resource reservation. This will ensure the resource counts
10364418919fSjohnjiang 		 * are calculated correctly.
10374418919fSjohnjiang 		 */
10384418919fSjohnjiang 
10394418919fSjohnjiang 		pthread_mutex_lock(&bp->def_cp_lock);
10404418919fSjohnjiang 
10414418919fSjohnjiang 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
10424418919fSjohnjiang 			bnxt_disable_int(bp);
10434418919fSjohnjiang 			bnxt_free_cp_ring(bp, bp->async_cp_ring);
10444418919fSjohnjiang 		}
10454418919fSjohnjiang 
1046d30ea906Sjfb8856606 		rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
1047d30ea906Sjfb8856606 		if (rc) {
1048d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
10494418919fSjohnjiang 			pthread_mutex_unlock(&bp->def_cp_lock);
1050d30ea906Sjfb8856606 			return -ENOSPC;
1051d30ea906Sjfb8856606 		}
10524418919fSjohnjiang 
10534418919fSjohnjiang 		if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
10544418919fSjohnjiang 			rc = bnxt_alloc_async_cp_ring(bp);
10554418919fSjohnjiang 			if (rc) {
10564418919fSjohnjiang 				pthread_mutex_unlock(&bp->def_cp_lock);
10574418919fSjohnjiang 				return rc;
10584418919fSjohnjiang 			}
10594418919fSjohnjiang 			bnxt_enable_int(bp);
10604418919fSjohnjiang 		}
10614418919fSjohnjiang 
10624418919fSjohnjiang 		pthread_mutex_unlock(&bp->def_cp_lock);
1063d30ea906Sjfb8856606 	} else {
1064d30ea906Sjfb8856606 		/* legacy driver needs to get updated values */
1065d30ea906Sjfb8856606 		rc = bnxt_hwrm_func_qcaps(bp);
1066d30ea906Sjfb8856606 		if (rc) {
1067d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
1068d30ea906Sjfb8856606 			return rc;
1069d30ea906Sjfb8856606 		}
1070d30ea906Sjfb8856606 	}
1071a9643ea8Slogwang 
1072a9643ea8Slogwang 	/* Inherit new configurations */
1073d30ea906Sjfb8856606 	if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
1074d30ea906Sjfb8856606 	    eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
10754418919fSjohnjiang 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
10764418919fSjohnjiang 		+ BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
1077d30ea906Sjfb8856606 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
10784418919fSjohnjiang 	    bp->max_stat_ctx)
10794418919fSjohnjiang 		goto resource_error;
10804418919fSjohnjiang 
10814418919fSjohnjiang 	if (BNXT_HAS_RING_GRPS(bp) &&
10824418919fSjohnjiang 	    (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
10834418919fSjohnjiang 		goto resource_error;
10844418919fSjohnjiang 
10854418919fSjohnjiang 	if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
10864418919fSjohnjiang 	    bp->max_vnics < eth_dev->data->nb_rx_queues)
10874418919fSjohnjiang 		goto resource_error;
10884418919fSjohnjiang 
10894418919fSjohnjiang 	bp->rx_cp_nr_rings = bp->rx_nr_rings;
10904418919fSjohnjiang 	bp->tx_cp_nr_rings = bp->tx_nr_rings;
10914418919fSjohnjiang 
10924418919fSjohnjiang 	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
10934418919fSjohnjiang 		rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
10944418919fSjohnjiang 	eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
10954418919fSjohnjiang 
10964418919fSjohnjiang 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
10974418919fSjohnjiang 		eth_dev->data->mtu =
10984418919fSjohnjiang 			eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
10994418919fSjohnjiang 			RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
11004418919fSjohnjiang 			BNXT_NUM_VLANS;
11014418919fSjohnjiang 		bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
11024418919fSjohnjiang 	}
11034418919fSjohnjiang 	return 0;
11044418919fSjohnjiang 
11054418919fSjohnjiang resource_error:
1106d30ea906Sjfb8856606 	PMD_DRV_LOG(ERR,
1107d30ea906Sjfb8856606 		    "Insufficient resources to support requested config\n");
1108d30ea906Sjfb8856606 	PMD_DRV_LOG(ERR,
1109d30ea906Sjfb8856606 		    "Num Queues Requested: Tx %d, Rx %d\n",
1110d30ea906Sjfb8856606 		    eth_dev->data->nb_tx_queues,
1111d30ea906Sjfb8856606 		    eth_dev->data->nb_rx_queues);
1112d30ea906Sjfb8856606 	PMD_DRV_LOG(ERR,
1113d30ea906Sjfb8856606 		    "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
1114d30ea906Sjfb8856606 		    bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
1115d30ea906Sjfb8856606 		    bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
1116d30ea906Sjfb8856606 	return -ENOSPC;
1117d30ea906Sjfb8856606 }
1118d30ea906Sjfb8856606 
bnxt_print_link_info(struct rte_eth_dev * eth_dev)1119*2d9fd380Sjfb8856606 void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
11202bfe3f2eSlogwang {
11212bfe3f2eSlogwang 	struct rte_eth_link *link = &eth_dev->data->dev_link;
11222bfe3f2eSlogwang 
11232bfe3f2eSlogwang 	if (link->link_status)
1124d30ea906Sjfb8856606 		PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
11252bfe3f2eSlogwang 			eth_dev->data->port_id,
11262bfe3f2eSlogwang 			(uint32_t)link->link_speed,
11272bfe3f2eSlogwang 			(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
11282bfe3f2eSlogwang 			("full-duplex") : ("half-duplex\n"));
11292bfe3f2eSlogwang 	else
1130d30ea906Sjfb8856606 		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
11312bfe3f2eSlogwang 			eth_dev->data->port_id);
11322bfe3f2eSlogwang }
11332bfe3f2eSlogwang 
11344418919fSjohnjiang /*
11354418919fSjohnjiang  * Determine whether the current configuration requires support for scattered
11364418919fSjohnjiang  * receive; return 1 if scattered receive is required and 0 if not.
11374418919fSjohnjiang  */
bnxt_scattered_rx(struct rte_eth_dev * eth_dev)11384418919fSjohnjiang static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
11394418919fSjohnjiang {
11404418919fSjohnjiang 	uint16_t buf_size;
11414418919fSjohnjiang 	int i;
11424418919fSjohnjiang 
11434418919fSjohnjiang 	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
11444418919fSjohnjiang 		return 1;
11454418919fSjohnjiang 
11464418919fSjohnjiang 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
11474418919fSjohnjiang 		struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
11484418919fSjohnjiang 
11494418919fSjohnjiang 		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
11504418919fSjohnjiang 				      RTE_PKTMBUF_HEADROOM);
11514418919fSjohnjiang 		if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
11524418919fSjohnjiang 			return 1;
11534418919fSjohnjiang 	}
11544418919fSjohnjiang 	return 0;
11554418919fSjohnjiang }
11564418919fSjohnjiang 
11574418919fSjohnjiang static eth_rx_burst_t
bnxt_receive_function(struct rte_eth_dev * eth_dev)1158*2d9fd380Sjfb8856606 bnxt_receive_function(struct rte_eth_dev *eth_dev)
11594418919fSjohnjiang {
1160*2d9fd380Sjfb8856606 	struct bnxt *bp = eth_dev->data->dev_private;
1161*2d9fd380Sjfb8856606 
1162*2d9fd380Sjfb8856606 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
11634418919fSjohnjiang #ifndef RTE_LIBRTE_IEEE1588
11644418919fSjohnjiang 	/*
11654418919fSjohnjiang 	 * Vector mode receive can be enabled only if scatter rx is not
11664418919fSjohnjiang 	 * in use and rx offloads are limited to VLAN stripping and
11674418919fSjohnjiang 	 * CRC stripping.
11684418919fSjohnjiang 	 */
11694418919fSjohnjiang 	if (!eth_dev->data->scattered_rx &&
11704418919fSjohnjiang 	    !(eth_dev->data->dev_conf.rxmode.offloads &
11714418919fSjohnjiang 	      ~(DEV_RX_OFFLOAD_VLAN_STRIP |
11724418919fSjohnjiang 		DEV_RX_OFFLOAD_KEEP_CRC |
11734418919fSjohnjiang 		DEV_RX_OFFLOAD_JUMBO_FRAME |
11744418919fSjohnjiang 		DEV_RX_OFFLOAD_IPV4_CKSUM |
11754418919fSjohnjiang 		DEV_RX_OFFLOAD_UDP_CKSUM |
11764418919fSjohnjiang 		DEV_RX_OFFLOAD_TCP_CKSUM |
11774418919fSjohnjiang 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
11784418919fSjohnjiang 		DEV_RX_OFFLOAD_RSS_HASH |
1179*2d9fd380Sjfb8856606 		DEV_RX_OFFLOAD_VLAN_FILTER)) &&
1180*2d9fd380Sjfb8856606 	    !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp) &&
1181*2d9fd380Sjfb8856606 	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
11824418919fSjohnjiang 		PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
11834418919fSjohnjiang 			    eth_dev->data->port_id);
1184*2d9fd380Sjfb8856606 		bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
11854418919fSjohnjiang 		return bnxt_recv_pkts_vec;
11864418919fSjohnjiang 	}
11874418919fSjohnjiang 	PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
11884418919fSjohnjiang 		    eth_dev->data->port_id);
11894418919fSjohnjiang 	PMD_DRV_LOG(INFO,
11904418919fSjohnjiang 		    "Port %d scatter: %d rx offload: %" PRIX64 "\n",
11914418919fSjohnjiang 		    eth_dev->data->port_id,
11924418919fSjohnjiang 		    eth_dev->data->scattered_rx,
11934418919fSjohnjiang 		    eth_dev->data->dev_conf.rxmode.offloads);
11944418919fSjohnjiang #endif
11954418919fSjohnjiang #endif
1196*2d9fd380Sjfb8856606 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
11974418919fSjohnjiang 	return bnxt_recv_pkts;
11984418919fSjohnjiang }
11994418919fSjohnjiang 
12004418919fSjohnjiang static eth_tx_burst_t
bnxt_transmit_function(__rte_unused struct rte_eth_dev * eth_dev)12014418919fSjohnjiang bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
12024418919fSjohnjiang {
1203*2d9fd380Sjfb8856606 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
12044418919fSjohnjiang #ifndef RTE_LIBRTE_IEEE1588
1205*2d9fd380Sjfb8856606 	uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads;
1206*2d9fd380Sjfb8856606 	struct bnxt *bp = eth_dev->data->dev_private;
1207*2d9fd380Sjfb8856606 
12084418919fSjohnjiang 	/*
12094418919fSjohnjiang 	 * Vector mode transmit can be enabled only if not using scatter rx
12104418919fSjohnjiang 	 * or tx offloads.
12114418919fSjohnjiang 	 */
12124418919fSjohnjiang 	if (!eth_dev->data->scattered_rx &&
1213*2d9fd380Sjfb8856606 	    !(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
1214*2d9fd380Sjfb8856606 	    !BNXT_TRUFLOW_EN(bp) &&
1215*2d9fd380Sjfb8856606 	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
12164418919fSjohnjiang 		PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
12174418919fSjohnjiang 			    eth_dev->data->port_id);
12184418919fSjohnjiang 		return bnxt_xmit_pkts_vec;
12194418919fSjohnjiang 	}
12204418919fSjohnjiang 	PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
12214418919fSjohnjiang 		    eth_dev->data->port_id);
12224418919fSjohnjiang 	PMD_DRV_LOG(INFO,
12234418919fSjohnjiang 		    "Port %d scatter: %d tx offload: %" PRIX64 "\n",
12244418919fSjohnjiang 		    eth_dev->data->port_id,
12254418919fSjohnjiang 		    eth_dev->data->scattered_rx,
1226*2d9fd380Sjfb8856606 		    offloads);
12274418919fSjohnjiang #endif
12284418919fSjohnjiang #endif
12294418919fSjohnjiang 	return bnxt_xmit_pkts;
12304418919fSjohnjiang }
12314418919fSjohnjiang 
bnxt_handle_if_change_status(struct bnxt * bp)12324418919fSjohnjiang static int bnxt_handle_if_change_status(struct bnxt *bp)
12334418919fSjohnjiang {
12344418919fSjohnjiang 	int rc;
12354418919fSjohnjiang 
12364418919fSjohnjiang 	/* Since fw has undergone a reset and lost all contexts,
12374418919fSjohnjiang 	 * set fatal flag to not issue hwrm during cleanup
12384418919fSjohnjiang 	 */
12394418919fSjohnjiang 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
12404418919fSjohnjiang 	bnxt_uninit_resources(bp, true);
12414418919fSjohnjiang 
12424418919fSjohnjiang 	/* clear fatal flag so that re-init happens */
12434418919fSjohnjiang 	bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
12444418919fSjohnjiang 	rc = bnxt_init_resources(bp, true);
12454418919fSjohnjiang 
12464418919fSjohnjiang 	bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
12474418919fSjohnjiang 
12484418919fSjohnjiang 	return rc;
12494418919fSjohnjiang }
12504418919fSjohnjiang 
bnxt_dev_start_op(struct rte_eth_dev * eth_dev)1251a9643ea8Slogwang static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
1252a9643ea8Slogwang {
12534b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1254d30ea906Sjfb8856606 	uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
12552bfe3f2eSlogwang 	int vlan_mask = 0;
12560c6bd470Sfengbojiang 	int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
1257a9643ea8Slogwang 
12584418919fSjohnjiang 	if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
12594418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
12604418919fSjohnjiang 		return -EINVAL;
12614418919fSjohnjiang 	}
12624418919fSjohnjiang 
12632bfe3f2eSlogwang 	if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1264d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR,
1265*2d9fd380Sjfb8856606 			"RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
12662bfe3f2eSlogwang 			bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1267a9643ea8Slogwang 	}
1268a9643ea8Slogwang 
12690c6bd470Sfengbojiang 	do {
12700c6bd470Sfengbojiang 		rc = bnxt_hwrm_if_change(bp, true);
12710c6bd470Sfengbojiang 		if (rc == 0 || rc != -EAGAIN)
12720c6bd470Sfengbojiang 			break;
12730c6bd470Sfengbojiang 
12740c6bd470Sfengbojiang 		rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL);
12750c6bd470Sfengbojiang 	} while (retry_cnt--);
12760c6bd470Sfengbojiang 
12770c6bd470Sfengbojiang 	if (rc)
12780c6bd470Sfengbojiang 		return rc;
12790c6bd470Sfengbojiang 
12804418919fSjohnjiang 	if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
12814418919fSjohnjiang 		rc = bnxt_handle_if_change_status(bp);
12824418919fSjohnjiang 		if (rc)
12834418919fSjohnjiang 			return rc;
12844418919fSjohnjiang 	}
12850c6bd470Sfengbojiang 
12864418919fSjohnjiang 	bnxt_enable_int(bp);
12874418919fSjohnjiang 
1288579bf1e2Sjfb8856606 	rc = bnxt_init_chip(bp);
1289a9643ea8Slogwang 	if (rc)
1290a9643ea8Slogwang 		goto error;
1291a9643ea8Slogwang 
12924418919fSjohnjiang 	eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
12934418919fSjohnjiang 	eth_dev->data->dev_started = 1;
12944418919fSjohnjiang 
12950c6bd470Sfengbojiang 	bnxt_link_update_op(eth_dev, 1);
12962bfe3f2eSlogwang 
1297d30ea906Sjfb8856606 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
12982bfe3f2eSlogwang 		vlan_mask |= ETH_VLAN_FILTER_MASK;
1299d30ea906Sjfb8856606 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
13002bfe3f2eSlogwang 		vlan_mask |= ETH_VLAN_STRIP_MASK;
13012bfe3f2eSlogwang 	rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
1302a9643ea8Slogwang 	if (rc)
1303a9643ea8Slogwang 		goto error;
1304a9643ea8Slogwang 
1305*2d9fd380Sjfb8856606 	/* Initialize bnxt ULP port details */
1306*2d9fd380Sjfb8856606 	rc = bnxt_ulp_port_init(bp);
1307*2d9fd380Sjfb8856606 	if (rc)
1308*2d9fd380Sjfb8856606 		goto error;
1309*2d9fd380Sjfb8856606 
13104418919fSjohnjiang 	eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
13114418919fSjohnjiang 	eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
13124418919fSjohnjiang 
13134418919fSjohnjiang 	bnxt_schedule_fw_health_check(bp);
13140c6bd470Sfengbojiang 
1315a9643ea8Slogwang 	return 0;
1316a9643ea8Slogwang 
1317a9643ea8Slogwang error:
1318a9643ea8Slogwang 	bnxt_shutdown_nic(bp);
1319a9643ea8Slogwang 	bnxt_free_tx_mbufs(bp);
1320a9643ea8Slogwang 	bnxt_free_rx_mbufs(bp);
13210c6bd470Sfengbojiang 	bnxt_hwrm_if_change(bp, false);
13224418919fSjohnjiang 	eth_dev->data->dev_started = 0;
1323a9643ea8Slogwang 	return rc;
1324a9643ea8Slogwang }
1325a9643ea8Slogwang 
bnxt_dev_set_link_up_op(struct rte_eth_dev * eth_dev)1326a9643ea8Slogwang static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
1327a9643ea8Slogwang {
13284b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
13292bfe3f2eSlogwang 	int rc = 0;
1330a9643ea8Slogwang 
1331*2d9fd380Sjfb8856606 	if (!bp->link_info->link_up)
13322bfe3f2eSlogwang 		rc = bnxt_set_hwrm_link_config(bp, true);
13332bfe3f2eSlogwang 	if (!rc)
1334a9643ea8Slogwang 		eth_dev->data->dev_link.link_status = 1;
13352bfe3f2eSlogwang 
13362bfe3f2eSlogwang 	bnxt_print_link_info(eth_dev);
13374418919fSjohnjiang 	return rc;
1338a9643ea8Slogwang }
1339a9643ea8Slogwang 
bnxt_dev_set_link_down_op(struct rte_eth_dev * eth_dev)1340a9643ea8Slogwang static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1341a9643ea8Slogwang {
13424b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1343a9643ea8Slogwang 
1344a9643ea8Slogwang 	eth_dev->data->dev_link.link_status = 0;
1345a9643ea8Slogwang 	bnxt_set_hwrm_link_config(bp, false);
1346*2d9fd380Sjfb8856606 	bp->link_info->link_up = 0;
13472bfe3f2eSlogwang 
1348a9643ea8Slogwang 	return 0;
1349a9643ea8Slogwang }
1350a9643ea8Slogwang 
bnxt_free_switch_domain(struct bnxt * bp)1351*2d9fd380Sjfb8856606 static void bnxt_free_switch_domain(struct bnxt *bp)
1352*2d9fd380Sjfb8856606 {
1353*2d9fd380Sjfb8856606 	int rc = 0;
1354*2d9fd380Sjfb8856606 
1355*2d9fd380Sjfb8856606 	if (bp->switch_domain_id) {
1356*2d9fd380Sjfb8856606 		rc = rte_eth_switch_domain_free(bp->switch_domain_id);
1357*2d9fd380Sjfb8856606 		if (rc)
1358*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n",
1359*2d9fd380Sjfb8856606 				    bp->switch_domain_id, rc);
1360*2d9fd380Sjfb8856606 	}
1361*2d9fd380Sjfb8856606 }
1362*2d9fd380Sjfb8856606 
1363a9643ea8Slogwang /* Unload the driver, release resources */
bnxt_dev_stop_op(struct rte_eth_dev * eth_dev)1364*2d9fd380Sjfb8856606 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
1365a9643ea8Slogwang {
13664b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
13674b05018fSfengbojiang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
13684b05018fSfengbojiang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
13690c6bd470Sfengbojiang 	struct rte_eth_link link;
1370*2d9fd380Sjfb8856606 	int ret;
13714b05018fSfengbojiang 
13724418919fSjohnjiang 	eth_dev->data->dev_started = 0;
1373*2d9fd380Sjfb8856606 	eth_dev->data->scattered_rx = 0;
1374*2d9fd380Sjfb8856606 
13754418919fSjohnjiang 	/* Prevent crashes when queues are still in use */
13764418919fSjohnjiang 	eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
13774418919fSjohnjiang 	eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
13784418919fSjohnjiang 
13794b05018fSfengbojiang 	bnxt_disable_int(bp);
13804b05018fSfengbojiang 
13814b05018fSfengbojiang 	/* disable uio/vfio intr/eventfd mapping */
13824b05018fSfengbojiang 	rte_intr_disable(intr_handle);
1383a9643ea8Slogwang 
1384*2d9fd380Sjfb8856606 	/* Stop the child representors for this device */
1385*2d9fd380Sjfb8856606 	ret = bnxt_rep_stop_all(bp);
1386*2d9fd380Sjfb8856606 	if (ret != 0)
1387*2d9fd380Sjfb8856606 		return ret;
1388*2d9fd380Sjfb8856606 
1389*2d9fd380Sjfb8856606 	/* delete the bnxt ULP port details */
1390*2d9fd380Sjfb8856606 	bnxt_ulp_port_deinit(bp);
1391*2d9fd380Sjfb8856606 
13924418919fSjohnjiang 	bnxt_cancel_fw_health_check(bp);
13934418919fSjohnjiang 
13940c6bd470Sfengbojiang 	/* Do not bring link down during reset recovery */
13950c6bd470Sfengbojiang 	if (!is_bnxt_in_error(bp)) {
13964418919fSjohnjiang 		bnxt_dev_set_link_down_op(eth_dev);
13970c6bd470Sfengbojiang 		/* Wait for link to be reset */
13980c6bd470Sfengbojiang 		if (BNXT_SINGLE_PF(bp))
13990c6bd470Sfengbojiang 			rte_delay_ms(500);
14000c6bd470Sfengbojiang 		/* clear the recorded link status */
14010c6bd470Sfengbojiang 		memset(&link, 0, sizeof(link));
14020c6bd470Sfengbojiang 		rte_eth_linkstatus_set(eth_dev, &link);
14030c6bd470Sfengbojiang 	}
14044b05018fSfengbojiang 
14054b05018fSfengbojiang 	/* Clean queue intr-vector mapping */
14064b05018fSfengbojiang 	rte_intr_efd_disable(intr_handle);
14074b05018fSfengbojiang 	if (intr_handle->intr_vec != NULL) {
14084b05018fSfengbojiang 		rte_free(intr_handle->intr_vec);
14094b05018fSfengbojiang 		intr_handle->intr_vec = NULL;
14104b05018fSfengbojiang 	}
14114b05018fSfengbojiang 
14122bfe3f2eSlogwang 	bnxt_hwrm_port_clr_stats(bp);
1413579bf1e2Sjfb8856606 	bnxt_free_tx_mbufs(bp);
1414579bf1e2Sjfb8856606 	bnxt_free_rx_mbufs(bp);
14154418919fSjohnjiang 	/* Process any remaining notifications in default completion queue */
14164418919fSjohnjiang 	bnxt_int_handler(eth_dev);
1417a9643ea8Slogwang 	bnxt_shutdown_nic(bp);
14180c6bd470Sfengbojiang 	bnxt_hwrm_if_change(bp, false);
1419*2d9fd380Sjfb8856606 
1420*2d9fd380Sjfb8856606 	rte_free(bp->mark_table);
1421*2d9fd380Sjfb8856606 	bp->mark_table = NULL;
1422*2d9fd380Sjfb8856606 
1423*2d9fd380Sjfb8856606 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
14244418919fSjohnjiang 	bp->rx_cosq_cnt = 0;
1425*2d9fd380Sjfb8856606 	/* All filters are deleted on a port stop. */
1426*2d9fd380Sjfb8856606 	if (BNXT_FLOW_XSTATS_EN(bp))
1427*2d9fd380Sjfb8856606 		bp->flow_stat->flow_count = 0;
1428*2d9fd380Sjfb8856606 
1429*2d9fd380Sjfb8856606 	return 0;
1430a9643ea8Slogwang }
1431a9643ea8Slogwang 
bnxt_dev_close_op(struct rte_eth_dev * eth_dev)1432*2d9fd380Sjfb8856606 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
1433a9643ea8Slogwang {
14344b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1435*2d9fd380Sjfb8856606 	int ret = 0;
1436*2d9fd380Sjfb8856606 
1437*2d9fd380Sjfb8856606 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1438*2d9fd380Sjfb8856606 		return 0;
1439a9643ea8Slogwang 
14404418919fSjohnjiang 	/* cancel the recovery handler before remove dev */
14414418919fSjohnjiang 	rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
14424418919fSjohnjiang 	rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
1443*2d9fd380Sjfb8856606 	bnxt_cancel_fc_thread(bp);
14444418919fSjohnjiang 
14454418919fSjohnjiang 	if (eth_dev->data->dev_started)
1446*2d9fd380Sjfb8856606 		ret = bnxt_dev_stop_op(eth_dev);
1447a9643ea8Slogwang 
1448*2d9fd380Sjfb8856606 	bnxt_free_switch_domain(bp);
1449*2d9fd380Sjfb8856606 
1450*2d9fd380Sjfb8856606 	bnxt_uninit_resources(bp, false);
1451*2d9fd380Sjfb8856606 
1452*2d9fd380Sjfb8856606 	bnxt_free_leds_info(bp);
1453*2d9fd380Sjfb8856606 	bnxt_free_cos_queues(bp);
1454*2d9fd380Sjfb8856606 	bnxt_free_link_info(bp);
1455*2d9fd380Sjfb8856606 	bnxt_free_pf_info(bp);
1456*2d9fd380Sjfb8856606 	bnxt_free_parent_info(bp);
1457*2d9fd380Sjfb8856606 
1458*2d9fd380Sjfb8856606 	rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1459*2d9fd380Sjfb8856606 	bp->tx_mem_zone = NULL;
1460*2d9fd380Sjfb8856606 	rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
1461*2d9fd380Sjfb8856606 	bp->rx_mem_zone = NULL;
1462*2d9fd380Sjfb8856606 
1463*2d9fd380Sjfb8856606 	bnxt_hwrm_free_vf_info(bp);
1464*2d9fd380Sjfb8856606 
14652bfe3f2eSlogwang 	rte_free(bp->grp_info);
14662bfe3f2eSlogwang 	bp->grp_info = NULL;
1467d30ea906Sjfb8856606 
1468*2d9fd380Sjfb8856606 	return ret;
1469a9643ea8Slogwang }
1470a9643ea8Slogwang 
bnxt_mac_addr_remove_op(struct rte_eth_dev * eth_dev,uint32_t index)1471a9643ea8Slogwang static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
1472a9643ea8Slogwang 				    uint32_t index)
1473a9643ea8Slogwang {
14744b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1475a9643ea8Slogwang 	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
1476a9643ea8Slogwang 	struct bnxt_vnic_info *vnic;
1477a9643ea8Slogwang 	struct bnxt_filter_info *filter, *temp_filter;
14782bfe3f2eSlogwang 	uint32_t i;
1479a9643ea8Slogwang 
14804418919fSjohnjiang 	if (is_bnxt_in_error(bp))
14814418919fSjohnjiang 		return;
14824418919fSjohnjiang 
1483a9643ea8Slogwang 	/*
1484a9643ea8Slogwang 	 * Loop through all VNICs from the specified filter flow pools to
1485a9643ea8Slogwang 	 * remove the corresponding MAC addr filter
1486a9643ea8Slogwang 	 */
1487d30ea906Sjfb8856606 	for (i = 0; i < bp->nr_vnics; i++) {
1488a9643ea8Slogwang 		if (!(pool_mask & (1ULL << i)))
1489a9643ea8Slogwang 			continue;
1490a9643ea8Slogwang 
1491d30ea906Sjfb8856606 		vnic = &bp->vnic_info[i];
1492a9643ea8Slogwang 		filter = STAILQ_FIRST(&vnic->filter);
1493a9643ea8Slogwang 		while (filter) {
1494a9643ea8Slogwang 			temp_filter = STAILQ_NEXT(filter, next);
1495a9643ea8Slogwang 			if (filter->mac_index == index) {
1496a9643ea8Slogwang 				STAILQ_REMOVE(&vnic->filter, filter,
1497a9643ea8Slogwang 						bnxt_filter_info, next);
14982bfe3f2eSlogwang 				bnxt_hwrm_clear_l2_filter(bp, filter);
14994418919fSjohnjiang 				bnxt_free_filter(bp, filter);
1500a9643ea8Slogwang 			}
1501a9643ea8Slogwang 			filter = temp_filter;
1502a9643ea8Slogwang 		}
1503a9643ea8Slogwang 	}
1504a9643ea8Slogwang }
1505a9643ea8Slogwang 
bnxt_add_mac_filter(struct bnxt * bp,struct bnxt_vnic_info * vnic,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool)15064418919fSjohnjiang static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
15074418919fSjohnjiang 			       struct rte_ether_addr *mac_addr, uint32_t index,
15084418919fSjohnjiang 			       uint32_t pool)
15094418919fSjohnjiang {
15104418919fSjohnjiang 	struct bnxt_filter_info *filter;
15114418919fSjohnjiang 	int rc = 0;
15124418919fSjohnjiang 
15134418919fSjohnjiang 	/* Attach requested MAC address to the new l2_filter */
15144418919fSjohnjiang 	STAILQ_FOREACH(filter, &vnic->filter, next) {
15154418919fSjohnjiang 		if (filter->mac_index == index) {
15164418919fSjohnjiang 			PMD_DRV_LOG(DEBUG,
15174418919fSjohnjiang 				    "MAC addr already existed for pool %d\n",
15184418919fSjohnjiang 				    pool);
15194418919fSjohnjiang 			return 0;
15204418919fSjohnjiang 		}
15214418919fSjohnjiang 	}
15224418919fSjohnjiang 
15234418919fSjohnjiang 	filter = bnxt_alloc_filter(bp);
15244418919fSjohnjiang 	if (!filter) {
15254418919fSjohnjiang 		PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
15264418919fSjohnjiang 		return -ENODEV;
15274418919fSjohnjiang 	}
15284418919fSjohnjiang 
15294418919fSjohnjiang 	/* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
15304418919fSjohnjiang 	 * if the MAC that's been programmed now is a different one, then,
15314418919fSjohnjiang 	 * copy that addr to filter->l2_addr
15324418919fSjohnjiang 	 */
15334418919fSjohnjiang 	if (mac_addr)
15344418919fSjohnjiang 		memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
15354418919fSjohnjiang 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
15364418919fSjohnjiang 
15374418919fSjohnjiang 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
15384418919fSjohnjiang 	if (!rc) {
15394418919fSjohnjiang 		filter->mac_index = index;
15404418919fSjohnjiang 		if (filter->mac_index == 0)
15414418919fSjohnjiang 			STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
15424418919fSjohnjiang 		else
15434418919fSjohnjiang 			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
15444418919fSjohnjiang 	} else {
15454418919fSjohnjiang 		bnxt_free_filter(bp, filter);
15464418919fSjohnjiang 	}
15474418919fSjohnjiang 
15484418919fSjohnjiang 	return rc;
15494418919fSjohnjiang }
15504418919fSjohnjiang 
bnxt_mac_addr_add_op(struct rte_eth_dev * eth_dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool)15512bfe3f2eSlogwang static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
15524418919fSjohnjiang 				struct rte_ether_addr *mac_addr,
1553a9643ea8Slogwang 				uint32_t index, uint32_t pool)
1554a9643ea8Slogwang {
15554b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1556d30ea906Sjfb8856606 	struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
15574b05018fSfengbojiang 	int rc = 0;
1558a9643ea8Slogwang 
15594418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
15604418919fSjohnjiang 	if (rc)
15614418919fSjohnjiang 		return rc;
15624418919fSjohnjiang 
15630c6bd470Sfengbojiang 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1564d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
15652bfe3f2eSlogwang 		return -ENOTSUP;
15662bfe3f2eSlogwang 	}
15672bfe3f2eSlogwang 
1568a9643ea8Slogwang 	if (!vnic) {
1569d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
15702bfe3f2eSlogwang 		return -EINVAL;
1571a9643ea8Slogwang 	}
15724418919fSjohnjiang 
15734418919fSjohnjiang 	/* Filter settings will get applied when port is started */
15744418919fSjohnjiang 	if (!eth_dev->data->dev_started)
15752bfe3f2eSlogwang 		return 0;
15764b05018fSfengbojiang 
15774418919fSjohnjiang 	rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
15784b05018fSfengbojiang 
15794b05018fSfengbojiang 	return rc;
1580a9643ea8Slogwang }
1581a9643ea8Slogwang 
bnxt_link_update_op(struct rte_eth_dev * eth_dev,int wait_to_complete)15820c6bd470Sfengbojiang int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
1583a9643ea8Slogwang {
1584a9643ea8Slogwang 	int rc = 0;
15854b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1586a9643ea8Slogwang 	struct rte_eth_link new;
15870c6bd470Sfengbojiang 	int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT :
15880c6bd470Sfengbojiang 			BNXT_MIN_LINK_WAIT_CNT;
15894418919fSjohnjiang 
15904418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
15914418919fSjohnjiang 	if (rc)
15924418919fSjohnjiang 		return rc;
1593a9643ea8Slogwang 
1594a9643ea8Slogwang 	memset(&new, 0, sizeof(new));
1595a9643ea8Slogwang 	do {
1596a9643ea8Slogwang 		/* Retrieve link info from hardware */
1597a9643ea8Slogwang 		rc = bnxt_get_hwrm_link_config(bp, &new);
1598a9643ea8Slogwang 		if (rc) {
1599a9643ea8Slogwang 			new.link_speed = ETH_LINK_SPEED_100M;
1600a9643ea8Slogwang 			new.link_duplex = ETH_LINK_FULL_DUPLEX;
1601d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR,
16022bfe3f2eSlogwang 				"Failed to retrieve link rc = 0x%x!\n", rc);
1603a9643ea8Slogwang 			goto out;
1604a9643ea8Slogwang 		}
1605a9643ea8Slogwang 
16060c6bd470Sfengbojiang 		if (!wait_to_complete || new.link_status)
16072bfe3f2eSlogwang 			break;
16084b05018fSfengbojiang 
16094b05018fSfengbojiang 		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
16104b05018fSfengbojiang 	} while (cnt--);
1611a9643ea8Slogwang 
16120c6bd470Sfengbojiang 	/* Only single function PF can bring phy down.
16130c6bd470Sfengbojiang 	 * When port is stopped, report link down for VF/MH/NPAR functions.
16140c6bd470Sfengbojiang 	 */
16150c6bd470Sfengbojiang 	if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started)
16160c6bd470Sfengbojiang 		memset(&new, 0, sizeof(new));
16170c6bd470Sfengbojiang 
1618a9643ea8Slogwang out:
16192bfe3f2eSlogwang 	/* Timed out or success */
16202bfe3f2eSlogwang 	if (new.link_status != eth_dev->data->dev_link.link_status ||
16212bfe3f2eSlogwang 	new.link_speed != eth_dev->data->dev_link.link_speed) {
16224418919fSjohnjiang 		rte_eth_linkstatus_set(eth_dev, &new);
1623d30ea906Sjfb8856606 
1624*2d9fd380Sjfb8856606 		rte_eth_dev_callback_process(eth_dev,
1625d30ea906Sjfb8856606 					     RTE_ETH_EVENT_INTR_LSC,
1626d30ea906Sjfb8856606 					     NULL);
1627d30ea906Sjfb8856606 
16282bfe3f2eSlogwang 		bnxt_print_link_info(eth_dev);
16292bfe3f2eSlogwang 	}
16302bfe3f2eSlogwang 
1631a9643ea8Slogwang 	return rc;
1632a9643ea8Slogwang }
1633a9643ea8Slogwang 
bnxt_promiscuous_enable_op(struct rte_eth_dev * eth_dev)16344418919fSjohnjiang static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1635a9643ea8Slogwang {
16364b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1637a9643ea8Slogwang 	struct bnxt_vnic_info *vnic;
16384418919fSjohnjiang 	uint32_t old_flags;
16394418919fSjohnjiang 	int rc;
16404418919fSjohnjiang 
16414418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
16424418919fSjohnjiang 	if (rc)
16434418919fSjohnjiang 		return rc;
16444418919fSjohnjiang 
16454418919fSjohnjiang 	/* Filter settings will get applied when port is started */
16464418919fSjohnjiang 	if (!eth_dev->data->dev_started)
16474418919fSjohnjiang 		return 0;
1648a9643ea8Slogwang 
1649a9643ea8Slogwang 	if (bp->vnic_info == NULL)
16504418919fSjohnjiang 		return 0;
1651a9643ea8Slogwang 
16524418919fSjohnjiang 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1653a9643ea8Slogwang 
16544418919fSjohnjiang 	old_flags = vnic->flags;
1655a9643ea8Slogwang 	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
16564418919fSjohnjiang 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
16574418919fSjohnjiang 	if (rc != 0)
16584418919fSjohnjiang 		vnic->flags = old_flags;
16594418919fSjohnjiang 
16604418919fSjohnjiang 	return rc;
1661a9643ea8Slogwang }
1662a9643ea8Slogwang 
bnxt_promiscuous_disable_op(struct rte_eth_dev * eth_dev)16634418919fSjohnjiang static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1664a9643ea8Slogwang {
16654b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1666a9643ea8Slogwang 	struct bnxt_vnic_info *vnic;
16674418919fSjohnjiang 	uint32_t old_flags;
16684418919fSjohnjiang 	int rc;
16694418919fSjohnjiang 
16704418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
16714418919fSjohnjiang 	if (rc)
16724418919fSjohnjiang 		return rc;
16734418919fSjohnjiang 
16744418919fSjohnjiang 	/* Filter settings will get applied when port is started */
16754418919fSjohnjiang 	if (!eth_dev->data->dev_started)
16764418919fSjohnjiang 		return 0;
1677a9643ea8Slogwang 
1678a9643ea8Slogwang 	if (bp->vnic_info == NULL)
16794418919fSjohnjiang 		return 0;
1680a9643ea8Slogwang 
16814418919fSjohnjiang 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1682a9643ea8Slogwang 
16834418919fSjohnjiang 	old_flags = vnic->flags;
1684a9643ea8Slogwang 	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
16854418919fSjohnjiang 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
16864418919fSjohnjiang 	if (rc != 0)
16874418919fSjohnjiang 		vnic->flags = old_flags;
16884418919fSjohnjiang 
16894418919fSjohnjiang 	return rc;
1690a9643ea8Slogwang }
1691a9643ea8Slogwang 
bnxt_allmulticast_enable_op(struct rte_eth_dev * eth_dev)16924418919fSjohnjiang static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1693a9643ea8Slogwang {
16944b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1695a9643ea8Slogwang 	struct bnxt_vnic_info *vnic;
16964418919fSjohnjiang 	uint32_t old_flags;
16974418919fSjohnjiang 	int rc;
16984418919fSjohnjiang 
16994418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
17004418919fSjohnjiang 	if (rc)
17014418919fSjohnjiang 		return rc;
17024418919fSjohnjiang 
17034418919fSjohnjiang 	/* Filter settings will get applied when port is started */
17044418919fSjohnjiang 	if (!eth_dev->data->dev_started)
17054418919fSjohnjiang 		return 0;
1706a9643ea8Slogwang 
1707a9643ea8Slogwang 	if (bp->vnic_info == NULL)
17084418919fSjohnjiang 		return 0;
1709a9643ea8Slogwang 
17104418919fSjohnjiang 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1711a9643ea8Slogwang 
17124418919fSjohnjiang 	old_flags = vnic->flags;
1713a9643ea8Slogwang 	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
17144418919fSjohnjiang 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
17154418919fSjohnjiang 	if (rc != 0)
17164418919fSjohnjiang 		vnic->flags = old_flags;
17174418919fSjohnjiang 
17184418919fSjohnjiang 	return rc;
1719a9643ea8Slogwang }
1720a9643ea8Slogwang 
bnxt_allmulticast_disable_op(struct rte_eth_dev * eth_dev)17214418919fSjohnjiang static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1722a9643ea8Slogwang {
17234b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1724a9643ea8Slogwang 	struct bnxt_vnic_info *vnic;
17254418919fSjohnjiang 	uint32_t old_flags;
17264418919fSjohnjiang 	int rc;
17274418919fSjohnjiang 
17284418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
17294418919fSjohnjiang 	if (rc)
17304418919fSjohnjiang 		return rc;
17314418919fSjohnjiang 
17324418919fSjohnjiang 	/* Filter settings will get applied when port is started */
17334418919fSjohnjiang 	if (!eth_dev->data->dev_started)
17344418919fSjohnjiang 		return 0;
1735a9643ea8Slogwang 
1736a9643ea8Slogwang 	if (bp->vnic_info == NULL)
17374418919fSjohnjiang 		return 0;
1738a9643ea8Slogwang 
17394418919fSjohnjiang 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
1740a9643ea8Slogwang 
17414418919fSjohnjiang 	old_flags = vnic->flags;
1742a9643ea8Slogwang 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
17434418919fSjohnjiang 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
17444418919fSjohnjiang 	if (rc != 0)
17454418919fSjohnjiang 		vnic->flags = old_flags;
17464418919fSjohnjiang 
17474418919fSjohnjiang 	return rc;
1748a9643ea8Slogwang }
1749a9643ea8Slogwang 
17504b05018fSfengbojiang /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
bnxt_qid_to_rxq(struct bnxt * bp,uint16_t qid)17514b05018fSfengbojiang static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
17524b05018fSfengbojiang {
17534b05018fSfengbojiang 	if (qid >= bp->rx_nr_rings)
17544b05018fSfengbojiang 		return NULL;
17554b05018fSfengbojiang 
17564b05018fSfengbojiang 	return bp->eth_dev->data->rx_queues[qid];
17574b05018fSfengbojiang }
17584b05018fSfengbojiang 
17594b05018fSfengbojiang /* Return rxq corresponding to a given rss table ring/group ID. */
bnxt_rss_to_qid(struct bnxt * bp,uint16_t fwr)17604b05018fSfengbojiang static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
17614b05018fSfengbojiang {
17624418919fSjohnjiang 	struct bnxt_rx_queue *rxq;
17634b05018fSfengbojiang 	unsigned int i;
17644b05018fSfengbojiang 
17654418919fSjohnjiang 	if (!BNXT_HAS_RING_GRPS(bp)) {
17664418919fSjohnjiang 		for (i = 0; i < bp->rx_nr_rings; i++) {
17674418919fSjohnjiang 			rxq = bp->eth_dev->data->rx_queues[i];
17684418919fSjohnjiang 			if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
17694418919fSjohnjiang 				return rxq->index;
17704418919fSjohnjiang 		}
17714418919fSjohnjiang 	} else {
17724b05018fSfengbojiang 		for (i = 0; i < bp->rx_nr_rings; i++) {
17734b05018fSfengbojiang 			if (bp->grp_info[i].fw_grp_id == fwr)
17744b05018fSfengbojiang 				return i;
17754b05018fSfengbojiang 		}
17764418919fSjohnjiang 	}
17774b05018fSfengbojiang 
17784b05018fSfengbojiang 	return INVALID_HW_RING_ID;
17794b05018fSfengbojiang }
17804b05018fSfengbojiang 
bnxt_reta_update_op(struct rte_eth_dev * eth_dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1781a9643ea8Slogwang static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1782a9643ea8Slogwang 			    struct rte_eth_rss_reta_entry64 *reta_conf,
1783a9643ea8Slogwang 			    uint16_t reta_size)
1784a9643ea8Slogwang {
17854b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1786a9643ea8Slogwang 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1787*2d9fd380Sjfb8856606 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
17884418919fSjohnjiang 	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
17894b05018fSfengbojiang 	uint16_t idx, sft;
17904418919fSjohnjiang 	int i, rc;
17914418919fSjohnjiang 
17924418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
17934418919fSjohnjiang 	if (rc)
17944418919fSjohnjiang 		return rc;
1795a9643ea8Slogwang 
17964b05018fSfengbojiang 	if (!vnic->rss_table)
17974b05018fSfengbojiang 		return -EINVAL;
17984b05018fSfengbojiang 
1799a9643ea8Slogwang 	if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1800a9643ea8Slogwang 		return -EINVAL;
1801a9643ea8Slogwang 
18024b05018fSfengbojiang 	if (reta_size != tbl_size) {
1803d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1804a9643ea8Slogwang 			"(%d) must equal the size supported by the hardware "
18054b05018fSfengbojiang 			"(%d)\n", reta_size, tbl_size);
1806a9643ea8Slogwang 		return -EINVAL;
1807a9643ea8Slogwang 	}
18084b05018fSfengbojiang 
18094b05018fSfengbojiang 	for (i = 0; i < reta_size; i++) {
18104b05018fSfengbojiang 		struct bnxt_rx_queue *rxq;
18114b05018fSfengbojiang 
18124b05018fSfengbojiang 		idx = i / RTE_RETA_GROUP_SIZE;
18134b05018fSfengbojiang 		sft = i % RTE_RETA_GROUP_SIZE;
18144b05018fSfengbojiang 
18154b05018fSfengbojiang 		if (!(reta_conf[idx].mask & (1ULL << sft)))
18164b05018fSfengbojiang 			continue;
18174b05018fSfengbojiang 
18184b05018fSfengbojiang 		rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
18194b05018fSfengbojiang 		if (!rxq) {
18204b05018fSfengbojiang 			PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
18214b05018fSfengbojiang 			return -EINVAL;
1822a9643ea8Slogwang 		}
18234b05018fSfengbojiang 
18244418919fSjohnjiang 		if (BNXT_CHIP_THOR(bp)) {
18254418919fSjohnjiang 			vnic->rss_table[i * 2] =
18264418919fSjohnjiang 				rxq->rx_ring->rx_ring_struct->fw_ring_id;
18274418919fSjohnjiang 			vnic->rss_table[i * 2 + 1] =
18284418919fSjohnjiang 				rxq->cp_ring->cp_ring_struct->fw_ring_id;
18294418919fSjohnjiang 		} else {
18304b05018fSfengbojiang 			vnic->rss_table[i] =
18314b05018fSfengbojiang 			    vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
18324b05018fSfengbojiang 		}
18334418919fSjohnjiang 	}
18344b05018fSfengbojiang 
18354b05018fSfengbojiang 	bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1836a9643ea8Slogwang 	return 0;
1837a9643ea8Slogwang }
1838a9643ea8Slogwang 
bnxt_reta_query_op(struct rte_eth_dev * eth_dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1839a9643ea8Slogwang static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1840a9643ea8Slogwang 			      struct rte_eth_rss_reta_entry64 *reta_conf,
1841a9643ea8Slogwang 			      uint16_t reta_size)
1842a9643ea8Slogwang {
18434b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1844*2d9fd380Sjfb8856606 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
18454418919fSjohnjiang 	uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
18464b05018fSfengbojiang 	uint16_t idx, sft, i;
18474418919fSjohnjiang 	int rc;
18484418919fSjohnjiang 
18494418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
18504418919fSjohnjiang 	if (rc)
18514418919fSjohnjiang 		return rc;
1852a9643ea8Slogwang 
1853a9643ea8Slogwang 	/* Retrieve from the default VNIC */
1854a9643ea8Slogwang 	if (!vnic)
1855a9643ea8Slogwang 		return -EINVAL;
1856a9643ea8Slogwang 	if (!vnic->rss_table)
1857a9643ea8Slogwang 		return -EINVAL;
1858a9643ea8Slogwang 
18594b05018fSfengbojiang 	if (reta_size != tbl_size) {
1860d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1861a9643ea8Slogwang 			"(%d) must equal the size supported by the hardware "
18624b05018fSfengbojiang 			"(%d)\n", reta_size, tbl_size);
1863a9643ea8Slogwang 		return -EINVAL;
1864a9643ea8Slogwang 	}
1865a9643ea8Slogwang 
18664b05018fSfengbojiang 	for (idx = 0, i = 0; i < reta_size; i++) {
18674b05018fSfengbojiang 		idx = i / RTE_RETA_GROUP_SIZE;
18684b05018fSfengbojiang 		sft = i % RTE_RETA_GROUP_SIZE;
18694b05018fSfengbojiang 
18704b05018fSfengbojiang 		if (reta_conf[idx].mask & (1ULL << sft)) {
18714b05018fSfengbojiang 			uint16_t qid;
18724b05018fSfengbojiang 
18734418919fSjohnjiang 			if (BNXT_CHIP_THOR(bp))
18744418919fSjohnjiang 				qid = bnxt_rss_to_qid(bp,
18754418919fSjohnjiang 						      vnic->rss_table[i * 2]);
18764418919fSjohnjiang 			else
18774b05018fSfengbojiang 				qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
18784b05018fSfengbojiang 
18794b05018fSfengbojiang 			if (qid == INVALID_HW_RING_ID) {
18804b05018fSfengbojiang 				PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
18814b05018fSfengbojiang 				return -EINVAL;
18824b05018fSfengbojiang 			}
18834b05018fSfengbojiang 			reta_conf[idx].reta[sft] = qid;
18844b05018fSfengbojiang 		}
18852bfe3f2eSlogwang 	}
18862bfe3f2eSlogwang 
1887a9643ea8Slogwang 	return 0;
1888a9643ea8Slogwang }
1889a9643ea8Slogwang 
bnxt_rss_hash_update_op(struct rte_eth_dev * eth_dev,struct rte_eth_rss_conf * rss_conf)1890a9643ea8Slogwang static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1891a9643ea8Slogwang 				   struct rte_eth_rss_conf *rss_conf)
1892a9643ea8Slogwang {
18934b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1894a9643ea8Slogwang 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1895a9643ea8Slogwang 	struct bnxt_vnic_info *vnic;
18964418919fSjohnjiang 	int rc;
18974418919fSjohnjiang 
18984418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
18994418919fSjohnjiang 	if (rc)
19004418919fSjohnjiang 		return rc;
1901a9643ea8Slogwang 
1902a9643ea8Slogwang 	/*
1903a9643ea8Slogwang 	 * If RSS enablement were different than dev_configure,
1904a9643ea8Slogwang 	 * then return -EINVAL
1905a9643ea8Slogwang 	 */
1906a9643ea8Slogwang 	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1907a9643ea8Slogwang 		if (!rss_conf->rss_hf)
1908d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "Hash type NONE\n");
1909a9643ea8Slogwang 	} else {
1910a9643ea8Slogwang 		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1911a9643ea8Slogwang 			return -EINVAL;
1912a9643ea8Slogwang 	}
19132bfe3f2eSlogwang 
19142bfe3f2eSlogwang 	bp->flags |= BNXT_FLAG_UPDATE_HASH;
19150c6bd470Sfengbojiang 	memcpy(&eth_dev->data->dev_conf.rx_adv_conf.rss_conf,
19160c6bd470Sfengbojiang 	       rss_conf,
19170c6bd470Sfengbojiang 	       sizeof(*rss_conf));
19182bfe3f2eSlogwang 
19194418919fSjohnjiang 	/* Update the default RSS VNIC(s) */
1920*2d9fd380Sjfb8856606 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
19214418919fSjohnjiang 	vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1922*2d9fd380Sjfb8856606 	vnic->hash_mode =
1923*2d9fd380Sjfb8856606 		bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
1924*2d9fd380Sjfb8856606 					    ETH_RSS_LEVEL(rss_conf->rss_hf));
1925a9643ea8Slogwang 
1926a9643ea8Slogwang 	/*
19274418919fSjohnjiang 	 * If hashkey is not specified, use the previously configured
19284418919fSjohnjiang 	 * hashkey
1929a9643ea8Slogwang 	 */
19304418919fSjohnjiang 	if (!rss_conf->rss_key)
19314418919fSjohnjiang 		goto rss_config;
1932a9643ea8Slogwang 
19334418919fSjohnjiang 	if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
19344418919fSjohnjiang 		PMD_DRV_LOG(ERR,
19354418919fSjohnjiang 			    "Invalid hashkey length, should be 16 bytes\n");
19364418919fSjohnjiang 		return -EINVAL;
1937a9643ea8Slogwang 	}
19384418919fSjohnjiang 	memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
19394418919fSjohnjiang 
19404418919fSjohnjiang rss_config:
19414418919fSjohnjiang 	bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1942a9643ea8Slogwang 	return 0;
1943a9643ea8Slogwang }
1944a9643ea8Slogwang 
bnxt_rss_hash_conf_get_op(struct rte_eth_dev * eth_dev,struct rte_eth_rss_conf * rss_conf)1945a9643ea8Slogwang static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1946a9643ea8Slogwang 				     struct rte_eth_rss_conf *rss_conf)
1947a9643ea8Slogwang {
19484b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
1949*2d9fd380Sjfb8856606 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
19504418919fSjohnjiang 	int len, rc;
1951a9643ea8Slogwang 	uint32_t hash_types;
1952a9643ea8Slogwang 
19534418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
19544418919fSjohnjiang 	if (rc)
19554418919fSjohnjiang 		return rc;
19564418919fSjohnjiang 
1957a9643ea8Slogwang 	/* RSS configuration is the same for all VNICs */
1958a9643ea8Slogwang 	if (vnic && vnic->rss_hash_key) {
1959a9643ea8Slogwang 		if (rss_conf->rss_key) {
1960a9643ea8Slogwang 			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1961a9643ea8Slogwang 			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1962a9643ea8Slogwang 			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1963a9643ea8Slogwang 		}
1964a9643ea8Slogwang 
1965a9643ea8Slogwang 		hash_types = vnic->hash_type;
1966a9643ea8Slogwang 		rss_conf->rss_hf = 0;
1967a9643ea8Slogwang 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1968a9643ea8Slogwang 			rss_conf->rss_hf |= ETH_RSS_IPV4;
1969a9643ea8Slogwang 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1970a9643ea8Slogwang 		}
1971a9643ea8Slogwang 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1972a9643ea8Slogwang 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1973a9643ea8Slogwang 			hash_types &=
1974a9643ea8Slogwang 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1975a9643ea8Slogwang 		}
1976a9643ea8Slogwang 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1977a9643ea8Slogwang 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1978a9643ea8Slogwang 			hash_types &=
1979a9643ea8Slogwang 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1980a9643ea8Slogwang 		}
1981a9643ea8Slogwang 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1982a9643ea8Slogwang 			rss_conf->rss_hf |= ETH_RSS_IPV6;
1983a9643ea8Slogwang 			hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1984a9643ea8Slogwang 		}
1985a9643ea8Slogwang 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1986a9643ea8Slogwang 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1987a9643ea8Slogwang 			hash_types &=
1988a9643ea8Slogwang 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1989a9643ea8Slogwang 		}
1990a9643ea8Slogwang 		if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1991a9643ea8Slogwang 			rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1992a9643ea8Slogwang 			hash_types &=
1993a9643ea8Slogwang 				~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1994a9643ea8Slogwang 		}
1995*2d9fd380Sjfb8856606 
1996*2d9fd380Sjfb8856606 		rss_conf->rss_hf |=
1997*2d9fd380Sjfb8856606 			bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode);
1998*2d9fd380Sjfb8856606 
1999a9643ea8Slogwang 		if (hash_types) {
2000d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR,
20010c6bd470Sfengbojiang 				"Unknown RSS config from firmware (%08x), RSS disabled",
2002a9643ea8Slogwang 				vnic->hash_type);
2003a9643ea8Slogwang 			return -ENOTSUP;
2004a9643ea8Slogwang 		}
2005a9643ea8Slogwang 	} else {
2006a9643ea8Slogwang 		rss_conf->rss_hf = 0;
2007a9643ea8Slogwang 	}
2008a9643ea8Slogwang 	return 0;
2009a9643ea8Slogwang }
2010a9643ea8Slogwang 
bnxt_flow_ctrl_get_op(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)2011a9643ea8Slogwang static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
20122bfe3f2eSlogwang 			       struct rte_eth_fc_conf *fc_conf)
2013a9643ea8Slogwang {
20144b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
2015a9643ea8Slogwang 	struct rte_eth_link link_info;
2016a9643ea8Slogwang 	int rc;
2017a9643ea8Slogwang 
20184418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
20194418919fSjohnjiang 	if (rc)
20204418919fSjohnjiang 		return rc;
20214418919fSjohnjiang 
2022a9643ea8Slogwang 	rc = bnxt_get_hwrm_link_config(bp, &link_info);
2023a9643ea8Slogwang 	if (rc)
2024a9643ea8Slogwang 		return rc;
2025a9643ea8Slogwang 
2026a9643ea8Slogwang 	memset(fc_conf, 0, sizeof(*fc_conf));
2027*2d9fd380Sjfb8856606 	if (bp->link_info->auto_pause)
2028a9643ea8Slogwang 		fc_conf->autoneg = 1;
2029*2d9fd380Sjfb8856606 	switch (bp->link_info->pause) {
2030a9643ea8Slogwang 	case 0:
2031a9643ea8Slogwang 		fc_conf->mode = RTE_FC_NONE;
2032a9643ea8Slogwang 		break;
2033a9643ea8Slogwang 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
2034a9643ea8Slogwang 		fc_conf->mode = RTE_FC_TX_PAUSE;
2035a9643ea8Slogwang 		break;
2036a9643ea8Slogwang 	case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
2037a9643ea8Slogwang 		fc_conf->mode = RTE_FC_RX_PAUSE;
2038a9643ea8Slogwang 		break;
2039a9643ea8Slogwang 	case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
2040a9643ea8Slogwang 			HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
2041a9643ea8Slogwang 		fc_conf->mode = RTE_FC_FULL;
2042a9643ea8Slogwang 		break;
2043a9643ea8Slogwang 	}
2044a9643ea8Slogwang 	return 0;
2045a9643ea8Slogwang }
2046a9643ea8Slogwang 
bnxt_flow_ctrl_set_op(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)2047a9643ea8Slogwang static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
2048a9643ea8Slogwang 			       struct rte_eth_fc_conf *fc_conf)
2049a9643ea8Slogwang {
20504b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
20514418919fSjohnjiang 	int rc;
20524418919fSjohnjiang 
20534418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
20544418919fSjohnjiang 	if (rc)
20554418919fSjohnjiang 		return rc;
2056a9643ea8Slogwang 
2057d30ea906Sjfb8856606 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2058d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
20592bfe3f2eSlogwang 		return -ENOTSUP;
20602bfe3f2eSlogwang 	}
20612bfe3f2eSlogwang 
2062a9643ea8Slogwang 	switch (fc_conf->mode) {
2063a9643ea8Slogwang 	case RTE_FC_NONE:
2064*2d9fd380Sjfb8856606 		bp->link_info->auto_pause = 0;
2065*2d9fd380Sjfb8856606 		bp->link_info->force_pause = 0;
2066a9643ea8Slogwang 		break;
2067a9643ea8Slogwang 	case RTE_FC_RX_PAUSE:
2068a9643ea8Slogwang 		if (fc_conf->autoneg) {
2069*2d9fd380Sjfb8856606 			bp->link_info->auto_pause =
2070a9643ea8Slogwang 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
2071*2d9fd380Sjfb8856606 			bp->link_info->force_pause = 0;
2072a9643ea8Slogwang 		} else {
2073*2d9fd380Sjfb8856606 			bp->link_info->auto_pause = 0;
2074*2d9fd380Sjfb8856606 			bp->link_info->force_pause =
2075a9643ea8Slogwang 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
2076a9643ea8Slogwang 		}
2077a9643ea8Slogwang 		break;
2078a9643ea8Slogwang 	case RTE_FC_TX_PAUSE:
2079a9643ea8Slogwang 		if (fc_conf->autoneg) {
2080*2d9fd380Sjfb8856606 			bp->link_info->auto_pause =
2081a9643ea8Slogwang 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
2082*2d9fd380Sjfb8856606 			bp->link_info->force_pause = 0;
2083a9643ea8Slogwang 		} else {
2084*2d9fd380Sjfb8856606 			bp->link_info->auto_pause = 0;
2085*2d9fd380Sjfb8856606 			bp->link_info->force_pause =
2086a9643ea8Slogwang 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
2087a9643ea8Slogwang 		}
2088a9643ea8Slogwang 		break;
2089a9643ea8Slogwang 	case RTE_FC_FULL:
2090a9643ea8Slogwang 		if (fc_conf->autoneg) {
2091*2d9fd380Sjfb8856606 			bp->link_info->auto_pause =
2092a9643ea8Slogwang 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
2093a9643ea8Slogwang 					HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
2094*2d9fd380Sjfb8856606 			bp->link_info->force_pause = 0;
2095a9643ea8Slogwang 		} else {
2096*2d9fd380Sjfb8856606 			bp->link_info->auto_pause = 0;
2097*2d9fd380Sjfb8856606 			bp->link_info->force_pause =
2098a9643ea8Slogwang 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
2099a9643ea8Slogwang 					HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
2100a9643ea8Slogwang 		}
2101a9643ea8Slogwang 		break;
2102a9643ea8Slogwang 	}
2103a9643ea8Slogwang 	return bnxt_set_hwrm_link_config(bp, true);
2104a9643ea8Slogwang }
2105a9643ea8Slogwang 
21062bfe3f2eSlogwang /* Add UDP tunneling port */
21072bfe3f2eSlogwang static int
bnxt_udp_tunnel_port_add_op(struct rte_eth_dev * eth_dev,struct rte_eth_udp_tunnel * udp_tunnel)21082bfe3f2eSlogwang bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
21092bfe3f2eSlogwang 			 struct rte_eth_udp_tunnel *udp_tunnel)
21102bfe3f2eSlogwang {
21114b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
21122bfe3f2eSlogwang 	uint16_t tunnel_type = 0;
21132bfe3f2eSlogwang 	int rc = 0;
21142bfe3f2eSlogwang 
21154418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
21164418919fSjohnjiang 	if (rc)
21174418919fSjohnjiang 		return rc;
21184418919fSjohnjiang 
21192bfe3f2eSlogwang 	switch (udp_tunnel->prot_type) {
21202bfe3f2eSlogwang 	case RTE_TUNNEL_TYPE_VXLAN:
21212bfe3f2eSlogwang 		if (bp->vxlan_port_cnt) {
2122d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
21232bfe3f2eSlogwang 				udp_tunnel->udp_port);
21242bfe3f2eSlogwang 			if (bp->vxlan_port != udp_tunnel->udp_port) {
2125d30ea906Sjfb8856606 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
21262bfe3f2eSlogwang 				return -ENOSPC;
21272bfe3f2eSlogwang 			}
21282bfe3f2eSlogwang 			bp->vxlan_port_cnt++;
21292bfe3f2eSlogwang 			return 0;
21302bfe3f2eSlogwang 		}
21312bfe3f2eSlogwang 		tunnel_type =
21322bfe3f2eSlogwang 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
21332bfe3f2eSlogwang 		bp->vxlan_port_cnt++;
21342bfe3f2eSlogwang 		break;
21352bfe3f2eSlogwang 	case RTE_TUNNEL_TYPE_GENEVE:
21362bfe3f2eSlogwang 		if (bp->geneve_port_cnt) {
2137d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
21382bfe3f2eSlogwang 				udp_tunnel->udp_port);
21392bfe3f2eSlogwang 			if (bp->geneve_port != udp_tunnel->udp_port) {
2140d30ea906Sjfb8856606 				PMD_DRV_LOG(ERR, "Only one port allowed\n");
21412bfe3f2eSlogwang 				return -ENOSPC;
21422bfe3f2eSlogwang 			}
21432bfe3f2eSlogwang 			bp->geneve_port_cnt++;
21442bfe3f2eSlogwang 			return 0;
21452bfe3f2eSlogwang 		}
21462bfe3f2eSlogwang 		tunnel_type =
21472bfe3f2eSlogwang 			HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
21482bfe3f2eSlogwang 		bp->geneve_port_cnt++;
21492bfe3f2eSlogwang 		break;
21502bfe3f2eSlogwang 	default:
2151d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
21522bfe3f2eSlogwang 		return -ENOTSUP;
21532bfe3f2eSlogwang 	}
21542bfe3f2eSlogwang 	rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
21552bfe3f2eSlogwang 					     tunnel_type);
21562bfe3f2eSlogwang 	return rc;
21572bfe3f2eSlogwang }
21582bfe3f2eSlogwang 
21592bfe3f2eSlogwang static int
bnxt_udp_tunnel_port_del_op(struct rte_eth_dev * eth_dev,struct rte_eth_udp_tunnel * udp_tunnel)21602bfe3f2eSlogwang bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
21612bfe3f2eSlogwang 			 struct rte_eth_udp_tunnel *udp_tunnel)
21622bfe3f2eSlogwang {
21634b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
21642bfe3f2eSlogwang 	uint16_t tunnel_type = 0;
21652bfe3f2eSlogwang 	uint16_t port = 0;
21662bfe3f2eSlogwang 	int rc = 0;
21672bfe3f2eSlogwang 
21684418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
21694418919fSjohnjiang 	if (rc)
21704418919fSjohnjiang 		return rc;
21714418919fSjohnjiang 
21722bfe3f2eSlogwang 	switch (udp_tunnel->prot_type) {
21732bfe3f2eSlogwang 	case RTE_TUNNEL_TYPE_VXLAN:
21742bfe3f2eSlogwang 		if (!bp->vxlan_port_cnt) {
2175d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
21762bfe3f2eSlogwang 			return -EINVAL;
21772bfe3f2eSlogwang 		}
21782bfe3f2eSlogwang 		if (bp->vxlan_port != udp_tunnel->udp_port) {
2179d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
21802bfe3f2eSlogwang 				udp_tunnel->udp_port, bp->vxlan_port);
21812bfe3f2eSlogwang 			return -EINVAL;
21822bfe3f2eSlogwang 		}
21832bfe3f2eSlogwang 		if (--bp->vxlan_port_cnt)
21842bfe3f2eSlogwang 			return 0;
21852bfe3f2eSlogwang 
21862bfe3f2eSlogwang 		tunnel_type =
21872bfe3f2eSlogwang 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
21882bfe3f2eSlogwang 		port = bp->vxlan_fw_dst_port_id;
21892bfe3f2eSlogwang 		break;
21902bfe3f2eSlogwang 	case RTE_TUNNEL_TYPE_GENEVE:
21912bfe3f2eSlogwang 		if (!bp->geneve_port_cnt) {
2192d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
21932bfe3f2eSlogwang 			return -EINVAL;
21942bfe3f2eSlogwang 		}
21952bfe3f2eSlogwang 		if (bp->geneve_port != udp_tunnel->udp_port) {
2196d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
21972bfe3f2eSlogwang 				udp_tunnel->udp_port, bp->geneve_port);
21982bfe3f2eSlogwang 			return -EINVAL;
21992bfe3f2eSlogwang 		}
22002bfe3f2eSlogwang 		if (--bp->geneve_port_cnt)
22012bfe3f2eSlogwang 			return 0;
22022bfe3f2eSlogwang 
22032bfe3f2eSlogwang 		tunnel_type =
22042bfe3f2eSlogwang 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
22052bfe3f2eSlogwang 		port = bp->geneve_fw_dst_port_id;
22062bfe3f2eSlogwang 		break;
22072bfe3f2eSlogwang 	default:
2208d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
22092bfe3f2eSlogwang 		return -ENOTSUP;
22102bfe3f2eSlogwang 	}
22112bfe3f2eSlogwang 
22122bfe3f2eSlogwang 	rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
22132bfe3f2eSlogwang 	return rc;
22142bfe3f2eSlogwang }
22152bfe3f2eSlogwang 
bnxt_del_vlan_filter(struct bnxt * bp,uint16_t vlan_id)22162bfe3f2eSlogwang static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
22172bfe3f2eSlogwang {
22184418919fSjohnjiang 	struct bnxt_filter_info *filter;
22192bfe3f2eSlogwang 	struct bnxt_vnic_info *vnic;
22202bfe3f2eSlogwang 	int rc = 0;
22214418919fSjohnjiang 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
22222bfe3f2eSlogwang 
22234418919fSjohnjiang 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
22242bfe3f2eSlogwang 	filter = STAILQ_FIRST(&vnic->filter);
22252bfe3f2eSlogwang 	while (filter) {
22264418919fSjohnjiang 		/* Search for this matching MAC+VLAN filter */
22274418919fSjohnjiang 		if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
22284418919fSjohnjiang 			/* Delete the filter */
22294418919fSjohnjiang 			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
22304418919fSjohnjiang 			if (rc)
22314418919fSjohnjiang 				return rc;
22322bfe3f2eSlogwang 			STAILQ_REMOVE(&vnic->filter, filter,
22332bfe3f2eSlogwang 				      bnxt_filter_info, next);
22344418919fSjohnjiang 			bnxt_free_filter(bp, filter);
2235d30ea906Sjfb8856606 			PMD_DRV_LOG(INFO,
22364418919fSjohnjiang 				    "Deleted vlan filter for %d\n",
22372bfe3f2eSlogwang 				    vlan_id);
22384418919fSjohnjiang 			return 0;
22392bfe3f2eSlogwang 		}
22404418919fSjohnjiang 		filter = STAILQ_NEXT(filter, next);
22412bfe3f2eSlogwang 	}
22424418919fSjohnjiang 	return -ENOENT;
22432bfe3f2eSlogwang }
22442bfe3f2eSlogwang 
bnxt_add_vlan_filter(struct bnxt * bp,uint16_t vlan_id)22452bfe3f2eSlogwang static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
22462bfe3f2eSlogwang {
22474418919fSjohnjiang 	struct bnxt_filter_info *filter;
22482bfe3f2eSlogwang 	struct bnxt_vnic_info *vnic;
22492bfe3f2eSlogwang 	int rc = 0;
2250579bf1e2Sjfb8856606 	uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2251579bf1e2Sjfb8856606 		HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
2252579bf1e2Sjfb8856606 	uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
22532bfe3f2eSlogwang 
22544418919fSjohnjiang 	/* Implementation notes on the use of VNIC in this command:
22554418919fSjohnjiang 	 *
22564418919fSjohnjiang 	 * By default, these filters belong to default vnic for the function.
22574418919fSjohnjiang 	 * Once these filters are set up, only destination VNIC can be modified.
22584418919fSjohnjiang 	 * If the destination VNIC is not specified in this command,
22594418919fSjohnjiang 	 * then the HWRM shall only create an l2 context id.
22602bfe3f2eSlogwang 	 */
22612bfe3f2eSlogwang 
22624418919fSjohnjiang 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
22634418919fSjohnjiang 	filter = STAILQ_FIRST(&vnic->filter);
22644418919fSjohnjiang 	/* Check if the VLAN has already been added */
22654418919fSjohnjiang 	while (filter) {
22664418919fSjohnjiang 		if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
22674418919fSjohnjiang 			return -EEXIST;
22684418919fSjohnjiang 
22694418919fSjohnjiang 		filter = STAILQ_NEXT(filter, next);
22702bfe3f2eSlogwang 	}
22714418919fSjohnjiang 
22724418919fSjohnjiang 	/* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
22734418919fSjohnjiang 	 * command to create MAC+VLAN filter with the right flags, enables set.
22744418919fSjohnjiang 	 */
22754418919fSjohnjiang 	filter = bnxt_alloc_filter(bp);
22764418919fSjohnjiang 	if (!filter) {
2277d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR,
22782bfe3f2eSlogwang 			    "MAC/VLAN filter alloc failed\n");
22794418919fSjohnjiang 		return -ENOMEM;
22802bfe3f2eSlogwang 	}
22812bfe3f2eSlogwang 	/* MAC + VLAN ID filter */
22824418919fSjohnjiang 	/* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
22834418919fSjohnjiang 	 * untagged packets are received
22844418919fSjohnjiang 	 *
22854418919fSjohnjiang 	 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
22864418919fSjohnjiang 	 * packets and only the programmed vlan's packets are received
22874418919fSjohnjiang 	 */
22884418919fSjohnjiang 	filter->l2_ivlan = vlan_id;
22894418919fSjohnjiang 	filter->l2_ivlan_mask = 0x0FFF;
22904418919fSjohnjiang 	filter->enables |= en;
22914418919fSjohnjiang 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
22924418919fSjohnjiang 
22934418919fSjohnjiang 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
22944418919fSjohnjiang 	if (rc) {
22954418919fSjohnjiang 		/* Free the newly allocated filter as we were
22964418919fSjohnjiang 		 * not able to create the filter in hardware.
22974418919fSjohnjiang 		 */
22984418919fSjohnjiang 		bnxt_free_filter(bp, filter);
22994418919fSjohnjiang 		return rc;
23004418919fSjohnjiang 	}
23014418919fSjohnjiang 
23024418919fSjohnjiang 	filter->mac_index = 0;
23034418919fSjohnjiang 	/* Add this new filter to the list */
23044418919fSjohnjiang 	if (vlan_id == 0)
23054418919fSjohnjiang 		STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
23064418919fSjohnjiang 	else
23074418919fSjohnjiang 		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
23084418919fSjohnjiang 
2309d30ea906Sjfb8856606 	PMD_DRV_LOG(INFO,
23102bfe3f2eSlogwang 		    "Added Vlan filter for %d\n", vlan_id);
23112bfe3f2eSlogwang 	return rc;
23122bfe3f2eSlogwang }
23132bfe3f2eSlogwang 
bnxt_vlan_filter_set_op(struct rte_eth_dev * eth_dev,uint16_t vlan_id,int on)23142bfe3f2eSlogwang static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
23152bfe3f2eSlogwang 		uint16_t vlan_id, int on)
23162bfe3f2eSlogwang {
23174b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
23184418919fSjohnjiang 	int rc;
23194418919fSjohnjiang 
23204418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
23214418919fSjohnjiang 	if (rc)
23224418919fSjohnjiang 		return rc;
23232bfe3f2eSlogwang 
23240c6bd470Sfengbojiang 	if (!eth_dev->data->dev_started) {
23250c6bd470Sfengbojiang 		PMD_DRV_LOG(ERR, "port must be started before setting vlan\n");
23260c6bd470Sfengbojiang 		return -EINVAL;
23270c6bd470Sfengbojiang 	}
23280c6bd470Sfengbojiang 
23292bfe3f2eSlogwang 	/* These operations apply to ALL existing MAC/VLAN filters */
23302bfe3f2eSlogwang 	if (on)
23312bfe3f2eSlogwang 		return bnxt_add_vlan_filter(bp, vlan_id);
23322bfe3f2eSlogwang 	else
23332bfe3f2eSlogwang 		return bnxt_del_vlan_filter(bp, vlan_id);
23342bfe3f2eSlogwang }
23352bfe3f2eSlogwang 
bnxt_del_dflt_mac_filter(struct bnxt * bp,struct bnxt_vnic_info * vnic)23364418919fSjohnjiang static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
23374418919fSjohnjiang 				    struct bnxt_vnic_info *vnic)
23382bfe3f2eSlogwang {
23394418919fSjohnjiang 	struct bnxt_filter_info *filter;
23404418919fSjohnjiang 	int rc;
23412bfe3f2eSlogwang 
23424418919fSjohnjiang 	filter = STAILQ_FIRST(&vnic->filter);
23434418919fSjohnjiang 	while (filter) {
23444418919fSjohnjiang 		if (filter->mac_index == 0 &&
23454418919fSjohnjiang 		    !memcmp(filter->l2_addr, bp->mac_addr,
23464418919fSjohnjiang 			    RTE_ETHER_ADDR_LEN)) {
23474418919fSjohnjiang 			rc = bnxt_hwrm_clear_l2_filter(bp, filter);
23484418919fSjohnjiang 			if (!rc) {
23494418919fSjohnjiang 				STAILQ_REMOVE(&vnic->filter, filter,
23504418919fSjohnjiang 					      bnxt_filter_info, next);
23514418919fSjohnjiang 				bnxt_free_filter(bp, filter);
23524418919fSjohnjiang 			}
23534418919fSjohnjiang 			return rc;
23544418919fSjohnjiang 		}
23554418919fSjohnjiang 		filter = STAILQ_NEXT(filter, next);
23564418919fSjohnjiang 	}
23574418919fSjohnjiang 	return 0;
23584418919fSjohnjiang }
23594418919fSjohnjiang 
23604418919fSjohnjiang static int
bnxt_config_vlan_hw_filter(struct bnxt * bp,uint64_t rx_offloads)23614418919fSjohnjiang bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
23624418919fSjohnjiang {
23634418919fSjohnjiang 	struct bnxt_vnic_info *vnic;
23644418919fSjohnjiang 	unsigned int i;
23654418919fSjohnjiang 	int rc;
23664418919fSjohnjiang 
23674418919fSjohnjiang 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
2368d30ea906Sjfb8856606 	if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
23692bfe3f2eSlogwang 		/* Remove any VLAN filters programmed */
23704418919fSjohnjiang 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
23712bfe3f2eSlogwang 			bnxt_del_vlan_filter(bp, i);
23724418919fSjohnjiang 
23734418919fSjohnjiang 		rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
23744418919fSjohnjiang 		if (rc)
23754418919fSjohnjiang 			return rc;
23764418919fSjohnjiang 	} else {
23774418919fSjohnjiang 		/* Default filter will allow packets that match the
23784418919fSjohnjiang 		 * dest mac. So, it has to be deleted, otherwise, we
23794418919fSjohnjiang 		 * will endup receiving vlan packets for which the
23804418919fSjohnjiang 		 * filter is not programmed, when hw-vlan-filter
23814418919fSjohnjiang 		 * configuration is ON
23824418919fSjohnjiang 		 */
23834418919fSjohnjiang 		bnxt_del_dflt_mac_filter(bp, vnic);
23844418919fSjohnjiang 		/* This filter will allow only untagged packets */
23854418919fSjohnjiang 		bnxt_add_vlan_filter(bp, 0);
23862bfe3f2eSlogwang 	}
2387d30ea906Sjfb8856606 	PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
2388d30ea906Sjfb8856606 		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
23894418919fSjohnjiang 
23904418919fSjohnjiang 	return 0;
23914418919fSjohnjiang }
23924418919fSjohnjiang 
bnxt_free_one_vnic(struct bnxt * bp,uint16_t vnic_id)23934418919fSjohnjiang static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
23944418919fSjohnjiang {
23954418919fSjohnjiang 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
23964418919fSjohnjiang 	unsigned int i;
23974418919fSjohnjiang 	int rc;
23984418919fSjohnjiang 
23994418919fSjohnjiang 	/* Destroy vnic filters and vnic */
24004418919fSjohnjiang 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
24014418919fSjohnjiang 	    DEV_RX_OFFLOAD_VLAN_FILTER) {
24024418919fSjohnjiang 		for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
24034418919fSjohnjiang 			bnxt_del_vlan_filter(bp, i);
24044418919fSjohnjiang 	}
24054418919fSjohnjiang 	bnxt_del_dflt_mac_filter(bp, vnic);
24064418919fSjohnjiang 
24074418919fSjohnjiang 	rc = bnxt_hwrm_vnic_free(bp, vnic);
24084418919fSjohnjiang 	if (rc)
24094418919fSjohnjiang 		return rc;
24104418919fSjohnjiang 
24114418919fSjohnjiang 	rte_free(vnic->fw_grp_ids);
24124418919fSjohnjiang 	vnic->fw_grp_ids = NULL;
24134418919fSjohnjiang 
24140c6bd470Sfengbojiang 	vnic->rx_queue_cnt = 0;
24150c6bd470Sfengbojiang 
24164418919fSjohnjiang 	return 0;
24174418919fSjohnjiang }
24184418919fSjohnjiang 
24194418919fSjohnjiang static int
bnxt_config_vlan_hw_stripping(struct bnxt * bp,uint64_t rx_offloads)24204418919fSjohnjiang bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
24214418919fSjohnjiang {
24224418919fSjohnjiang 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
24234418919fSjohnjiang 	int rc;
24244418919fSjohnjiang 
24254418919fSjohnjiang 	/* Destroy, recreate and reconfigure the default vnic */
24264418919fSjohnjiang 	rc = bnxt_free_one_vnic(bp, 0);
24274418919fSjohnjiang 	if (rc)
24284418919fSjohnjiang 		return rc;
24294418919fSjohnjiang 
24304418919fSjohnjiang 	/* default vnic 0 */
24314418919fSjohnjiang 	rc = bnxt_setup_one_vnic(bp, 0);
24324418919fSjohnjiang 	if (rc)
24334418919fSjohnjiang 		return rc;
24344418919fSjohnjiang 
24354418919fSjohnjiang 	if (bp->eth_dev->data->dev_conf.rxmode.offloads &
24364418919fSjohnjiang 	    DEV_RX_OFFLOAD_VLAN_FILTER) {
24374418919fSjohnjiang 		rc = bnxt_add_vlan_filter(bp, 0);
24384418919fSjohnjiang 		if (rc)
24394418919fSjohnjiang 			return rc;
24404418919fSjohnjiang 		rc = bnxt_restore_vlan_filters(bp);
24414418919fSjohnjiang 		if (rc)
24424418919fSjohnjiang 			return rc;
24434418919fSjohnjiang 	} else {
24444418919fSjohnjiang 		rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
24454418919fSjohnjiang 		if (rc)
24464418919fSjohnjiang 			return rc;
24474418919fSjohnjiang 	}
24484418919fSjohnjiang 
24494418919fSjohnjiang 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
24504418919fSjohnjiang 	if (rc)
24514418919fSjohnjiang 		return rc;
24524418919fSjohnjiang 
24534418919fSjohnjiang 	PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
24544418919fSjohnjiang 		    !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
24554418919fSjohnjiang 
24564418919fSjohnjiang 	return rc;
24574418919fSjohnjiang }
24584418919fSjohnjiang 
24594418919fSjohnjiang static int
bnxt_vlan_offload_set_op(struct rte_eth_dev * dev,int mask)24604418919fSjohnjiang bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
24614418919fSjohnjiang {
24624418919fSjohnjiang 	uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
24634418919fSjohnjiang 	struct bnxt *bp = dev->data->dev_private;
24644418919fSjohnjiang 	int rc;
24654418919fSjohnjiang 
24664418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
24674418919fSjohnjiang 	if (rc)
24684418919fSjohnjiang 		return rc;
24694418919fSjohnjiang 
24704418919fSjohnjiang 	/* Filter settings will get applied when port is started */
24714418919fSjohnjiang 	if (!dev->data->dev_started)
24724418919fSjohnjiang 		return 0;
24734418919fSjohnjiang 
24744418919fSjohnjiang 	if (mask & ETH_VLAN_FILTER_MASK) {
24754418919fSjohnjiang 		/* Enable or disable VLAN filtering */
24764418919fSjohnjiang 		rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
24774418919fSjohnjiang 		if (rc)
24784418919fSjohnjiang 			return rc;
24792bfe3f2eSlogwang 	}
24802bfe3f2eSlogwang 
24812bfe3f2eSlogwang 	if (mask & ETH_VLAN_STRIP_MASK) {
24822bfe3f2eSlogwang 		/* Enable or disable VLAN stripping */
24834418919fSjohnjiang 		rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
24844418919fSjohnjiang 		if (rc)
24854418919fSjohnjiang 			return rc;
24862bfe3f2eSlogwang 	}
24872bfe3f2eSlogwang 
24884418919fSjohnjiang 	if (mask & ETH_VLAN_EXTEND_MASK) {
24894418919fSjohnjiang 		if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
24904418919fSjohnjiang 			PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
24914418919fSjohnjiang 		else
24924418919fSjohnjiang 			PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
24934418919fSjohnjiang 	}
24942bfe3f2eSlogwang 
24952bfe3f2eSlogwang 	return 0;
24962bfe3f2eSlogwang }
24972bfe3f2eSlogwang 
2498d30ea906Sjfb8856606 static int
bnxt_vlan_tpid_set_op(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)24994418919fSjohnjiang bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
25004418919fSjohnjiang 		      uint16_t tpid)
25014418919fSjohnjiang {
25024418919fSjohnjiang 	struct bnxt *bp = dev->data->dev_private;
25034418919fSjohnjiang 	int qinq = dev->data->dev_conf.rxmode.offloads &
25044418919fSjohnjiang 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
25054418919fSjohnjiang 
25064418919fSjohnjiang 	if (vlan_type != ETH_VLAN_TYPE_INNER &&
25074418919fSjohnjiang 	    vlan_type != ETH_VLAN_TYPE_OUTER) {
25084418919fSjohnjiang 		PMD_DRV_LOG(ERR,
25094418919fSjohnjiang 			    "Unsupported vlan type.");
25104418919fSjohnjiang 		return -EINVAL;
25114418919fSjohnjiang 	}
25124418919fSjohnjiang 	if (!qinq) {
25134418919fSjohnjiang 		PMD_DRV_LOG(ERR,
25144418919fSjohnjiang 			    "QinQ not enabled. Needs to be ON as we can "
25154418919fSjohnjiang 			    "accelerate only outer vlan\n");
25164418919fSjohnjiang 		return -EINVAL;
25174418919fSjohnjiang 	}
25184418919fSjohnjiang 
25194418919fSjohnjiang 	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
25204418919fSjohnjiang 		switch (tpid) {
25214418919fSjohnjiang 		case RTE_ETHER_TYPE_QINQ:
25224418919fSjohnjiang 			bp->outer_tpid_bd =
25234418919fSjohnjiang 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
25244418919fSjohnjiang 				break;
25254418919fSjohnjiang 		case RTE_ETHER_TYPE_VLAN:
25264418919fSjohnjiang 			bp->outer_tpid_bd =
25274418919fSjohnjiang 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
25284418919fSjohnjiang 				break;
2529*2d9fd380Sjfb8856606 		case RTE_ETHER_TYPE_QINQ1:
25304418919fSjohnjiang 			bp->outer_tpid_bd =
25314418919fSjohnjiang 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
25324418919fSjohnjiang 				break;
2533*2d9fd380Sjfb8856606 		case RTE_ETHER_TYPE_QINQ2:
25344418919fSjohnjiang 			bp->outer_tpid_bd =
25354418919fSjohnjiang 				TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
25364418919fSjohnjiang 				break;
2537*2d9fd380Sjfb8856606 		case RTE_ETHER_TYPE_QINQ3:
25384418919fSjohnjiang 			bp->outer_tpid_bd =
25394418919fSjohnjiang 				 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
25404418919fSjohnjiang 				break;
25414418919fSjohnjiang 		default:
25424418919fSjohnjiang 			PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
25434418919fSjohnjiang 			return -EINVAL;
25444418919fSjohnjiang 		}
25454418919fSjohnjiang 		bp->outer_tpid_bd |= tpid;
25464418919fSjohnjiang 		PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
25474418919fSjohnjiang 	} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
25484418919fSjohnjiang 		PMD_DRV_LOG(ERR,
25494418919fSjohnjiang 			    "Can accelerate only outer vlan in QinQ\n");
25504418919fSjohnjiang 		return -EINVAL;
25514418919fSjohnjiang 	}
25524418919fSjohnjiang 
25534418919fSjohnjiang 	return 0;
25544418919fSjohnjiang }
25554418919fSjohnjiang 
25564418919fSjohnjiang static int
bnxt_set_default_mac_addr_op(struct rte_eth_dev * dev,struct rte_ether_addr * addr)25574418919fSjohnjiang bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
25584418919fSjohnjiang 			     struct rte_ether_addr *addr)
25592bfe3f2eSlogwang {
25604b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
25612bfe3f2eSlogwang 	/* Default Filter is tied to VNIC 0 */
2562*2d9fd380Sjfb8856606 	struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
25632bfe3f2eSlogwang 	int rc;
25642bfe3f2eSlogwang 
25654418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
25664418919fSjohnjiang 	if (rc)
25674418919fSjohnjiang 		return rc;
25684418919fSjohnjiang 
2569d30ea906Sjfb8856606 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2570d30ea906Sjfb8856606 		return -EPERM;
25712bfe3f2eSlogwang 
25724418919fSjohnjiang 	if (rte_is_zero_ether_addr(addr))
25734b05018fSfengbojiang 		return -EINVAL;
25742bfe3f2eSlogwang 
25754418919fSjohnjiang 	/* Filter settings will get applied when port is started */
25764418919fSjohnjiang 	if (!dev->data->dev_started)
25774b05018fSfengbojiang 		return 0;
25784418919fSjohnjiang 
25794418919fSjohnjiang 	/* Check if the requested MAC is already added */
25804418919fSjohnjiang 	if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
25814418919fSjohnjiang 		return 0;
25824418919fSjohnjiang 
25834418919fSjohnjiang 	/* Destroy filter and re-create it */
25844418919fSjohnjiang 	bnxt_del_dflt_mac_filter(bp, vnic);
25854418919fSjohnjiang 
25864418919fSjohnjiang 	memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
25874418919fSjohnjiang 	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
25884418919fSjohnjiang 		/* This filter will allow only untagged packets */
25894418919fSjohnjiang 		rc = bnxt_add_vlan_filter(bp, 0);
25904418919fSjohnjiang 	} else {
25914418919fSjohnjiang 		rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
25922bfe3f2eSlogwang 	}
2593d30ea906Sjfb8856606 
25944418919fSjohnjiang 	PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
25954418919fSjohnjiang 	return rc;
25962bfe3f2eSlogwang }
25972bfe3f2eSlogwang 
25982bfe3f2eSlogwang static int
bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev * eth_dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)25992bfe3f2eSlogwang bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
26004418919fSjohnjiang 			  struct rte_ether_addr *mc_addr_set,
26012bfe3f2eSlogwang 			  uint32_t nb_mc_addr)
26022bfe3f2eSlogwang {
26034b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
26042bfe3f2eSlogwang 	char *mc_addr_list = (char *)mc_addr_set;
26052bfe3f2eSlogwang 	struct bnxt_vnic_info *vnic;
26062bfe3f2eSlogwang 	uint32_t off = 0, i = 0;
26074418919fSjohnjiang 	int rc;
26082bfe3f2eSlogwang 
26094418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
26104418919fSjohnjiang 	if (rc)
26114418919fSjohnjiang 		return rc;
26124418919fSjohnjiang 
26134418919fSjohnjiang 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
26142bfe3f2eSlogwang 
26152bfe3f2eSlogwang 	if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
26162bfe3f2eSlogwang 		vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
26172bfe3f2eSlogwang 		goto allmulti;
26182bfe3f2eSlogwang 	}
26192bfe3f2eSlogwang 
26202bfe3f2eSlogwang 	/* TODO Check for Duplicate mcast addresses */
26212bfe3f2eSlogwang 	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
26222bfe3f2eSlogwang 	for (i = 0; i < nb_mc_addr; i++) {
26234418919fSjohnjiang 		memcpy(vnic->mc_list + off, &mc_addr_list[i],
26244418919fSjohnjiang 			RTE_ETHER_ADDR_LEN);
26254418919fSjohnjiang 		off += RTE_ETHER_ADDR_LEN;
26262bfe3f2eSlogwang 	}
26272bfe3f2eSlogwang 
26282bfe3f2eSlogwang 	vnic->mc_addr_cnt = i;
26294418919fSjohnjiang 	if (vnic->mc_addr_cnt)
26304418919fSjohnjiang 		vnic->flags |= BNXT_VNIC_INFO_MCAST;
26314418919fSjohnjiang 	else
26324418919fSjohnjiang 		vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
26332bfe3f2eSlogwang 
26342bfe3f2eSlogwang allmulti:
26352bfe3f2eSlogwang 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
26362bfe3f2eSlogwang }
26372bfe3f2eSlogwang 
26382bfe3f2eSlogwang static int
bnxt_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)26392bfe3f2eSlogwang bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
26402bfe3f2eSlogwang {
26414b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
26422bfe3f2eSlogwang 	uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
26432bfe3f2eSlogwang 	uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
26442bfe3f2eSlogwang 	uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
26450c6bd470Sfengbojiang 	uint8_t fw_rsvd = bp->fw_ver & 0xff;
26462bfe3f2eSlogwang 	int ret;
26472bfe3f2eSlogwang 
26480c6bd470Sfengbojiang 	ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
26490c6bd470Sfengbojiang 			fw_major, fw_minor, fw_updt, fw_rsvd);
26502bfe3f2eSlogwang 
26512bfe3f2eSlogwang 	ret += 1; /* add the size of '\0' */
26522bfe3f2eSlogwang 	if (fw_size < (uint32_t)ret)
26532bfe3f2eSlogwang 		return ret;
26542bfe3f2eSlogwang 	else
26552bfe3f2eSlogwang 		return 0;
26562bfe3f2eSlogwang }
26572bfe3f2eSlogwang 
26582bfe3f2eSlogwang static void
bnxt_rxq_info_get_op(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)26592bfe3f2eSlogwang bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
26602bfe3f2eSlogwang 	struct rte_eth_rxq_info *qinfo)
26612bfe3f2eSlogwang {
26624418919fSjohnjiang 	struct bnxt *bp = dev->data->dev_private;
26632bfe3f2eSlogwang 	struct bnxt_rx_queue *rxq;
26642bfe3f2eSlogwang 
26654418919fSjohnjiang 	if (is_bnxt_in_error(bp))
26664418919fSjohnjiang 		return;
26674418919fSjohnjiang 
26682bfe3f2eSlogwang 	rxq = dev->data->rx_queues[queue_id];
26692bfe3f2eSlogwang 
26702bfe3f2eSlogwang 	qinfo->mp = rxq->mb_pool;
26712bfe3f2eSlogwang 	qinfo->scattered_rx = dev->data->scattered_rx;
26722bfe3f2eSlogwang 	qinfo->nb_desc = rxq->nb_rx_desc;
26732bfe3f2eSlogwang 
26742bfe3f2eSlogwang 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
26750c6bd470Sfengbojiang 	qinfo->conf.rx_drop_en = rxq->drop_en;
26764418919fSjohnjiang 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
26770c6bd470Sfengbojiang 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
26782bfe3f2eSlogwang }
26792bfe3f2eSlogwang 
26802bfe3f2eSlogwang static void
bnxt_txq_info_get_op(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)26812bfe3f2eSlogwang bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
26822bfe3f2eSlogwang 	struct rte_eth_txq_info *qinfo)
26832bfe3f2eSlogwang {
26844418919fSjohnjiang 	struct bnxt *bp = dev->data->dev_private;
26852bfe3f2eSlogwang 	struct bnxt_tx_queue *txq;
26862bfe3f2eSlogwang 
26874418919fSjohnjiang 	if (is_bnxt_in_error(bp))
26884418919fSjohnjiang 		return;
26894418919fSjohnjiang 
26902bfe3f2eSlogwang 	txq = dev->data->tx_queues[queue_id];
26912bfe3f2eSlogwang 
26922bfe3f2eSlogwang 	qinfo->nb_desc = txq->nb_tx_desc;
26932bfe3f2eSlogwang 
26942bfe3f2eSlogwang 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
26952bfe3f2eSlogwang 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
26962bfe3f2eSlogwang 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
26972bfe3f2eSlogwang 
26982bfe3f2eSlogwang 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
26992bfe3f2eSlogwang 	qinfo->conf.tx_rs_thresh = 0;
27002bfe3f2eSlogwang 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2701*2d9fd380Sjfb8856606 	qinfo->conf.offloads = txq->offloads;
2702*2d9fd380Sjfb8856606 }
2703*2d9fd380Sjfb8856606 
2704*2d9fd380Sjfb8856606 static const struct {
2705*2d9fd380Sjfb8856606 	eth_rx_burst_t pkt_burst;
2706*2d9fd380Sjfb8856606 	const char *info;
2707*2d9fd380Sjfb8856606 } bnxt_rx_burst_info[] = {
2708*2d9fd380Sjfb8856606 	{bnxt_recv_pkts,	"Scalar"},
2709*2d9fd380Sjfb8856606 #if defined(RTE_ARCH_X86)
2710*2d9fd380Sjfb8856606 	{bnxt_recv_pkts_vec,	"Vector SSE"},
2711*2d9fd380Sjfb8856606 #elif defined(RTE_ARCH_ARM64)
2712*2d9fd380Sjfb8856606 	{bnxt_recv_pkts_vec,	"Vector Neon"},
2713*2d9fd380Sjfb8856606 #endif
2714*2d9fd380Sjfb8856606 };
2715*2d9fd380Sjfb8856606 
2716*2d9fd380Sjfb8856606 static int
bnxt_rx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)2717*2d9fd380Sjfb8856606 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2718*2d9fd380Sjfb8856606 		       struct rte_eth_burst_mode *mode)
2719*2d9fd380Sjfb8856606 {
2720*2d9fd380Sjfb8856606 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2721*2d9fd380Sjfb8856606 	size_t i;
2722*2d9fd380Sjfb8856606 
2723*2d9fd380Sjfb8856606 	for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) {
2724*2d9fd380Sjfb8856606 		if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) {
2725*2d9fd380Sjfb8856606 			snprintf(mode->info, sizeof(mode->info), "%s",
2726*2d9fd380Sjfb8856606 				 bnxt_rx_burst_info[i].info);
2727*2d9fd380Sjfb8856606 			return 0;
2728*2d9fd380Sjfb8856606 		}
2729*2d9fd380Sjfb8856606 	}
2730*2d9fd380Sjfb8856606 
2731*2d9fd380Sjfb8856606 	return -EINVAL;
2732*2d9fd380Sjfb8856606 }
2733*2d9fd380Sjfb8856606 
2734*2d9fd380Sjfb8856606 static const struct {
2735*2d9fd380Sjfb8856606 	eth_tx_burst_t pkt_burst;
2736*2d9fd380Sjfb8856606 	const char *info;
2737*2d9fd380Sjfb8856606 } bnxt_tx_burst_info[] = {
2738*2d9fd380Sjfb8856606 	{bnxt_xmit_pkts,	"Scalar"},
2739*2d9fd380Sjfb8856606 #if defined(RTE_ARCH_X86)
2740*2d9fd380Sjfb8856606 	{bnxt_xmit_pkts_vec,	"Vector SSE"},
2741*2d9fd380Sjfb8856606 #elif defined(RTE_ARCH_ARM64)
2742*2d9fd380Sjfb8856606 	{bnxt_xmit_pkts_vec,	"Vector Neon"},
2743*2d9fd380Sjfb8856606 #endif
2744*2d9fd380Sjfb8856606 };
2745*2d9fd380Sjfb8856606 
2746*2d9fd380Sjfb8856606 static int
bnxt_tx_burst_mode_get(struct rte_eth_dev * dev,__rte_unused uint16_t queue_id,struct rte_eth_burst_mode * mode)2747*2d9fd380Sjfb8856606 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2748*2d9fd380Sjfb8856606 		       struct rte_eth_burst_mode *mode)
2749*2d9fd380Sjfb8856606 {
2750*2d9fd380Sjfb8856606 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
2751*2d9fd380Sjfb8856606 	size_t i;
2752*2d9fd380Sjfb8856606 
2753*2d9fd380Sjfb8856606 	for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) {
2754*2d9fd380Sjfb8856606 		if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) {
2755*2d9fd380Sjfb8856606 			snprintf(mode->info, sizeof(mode->info), "%s",
2756*2d9fd380Sjfb8856606 				 bnxt_tx_burst_info[i].info);
2757*2d9fd380Sjfb8856606 			return 0;
2758*2d9fd380Sjfb8856606 		}
2759*2d9fd380Sjfb8856606 	}
2760*2d9fd380Sjfb8856606 
2761*2d9fd380Sjfb8856606 	return -EINVAL;
27622bfe3f2eSlogwang }
27632bfe3f2eSlogwang 
bnxt_mtu_set_op(struct rte_eth_dev * eth_dev,uint16_t new_mtu)27644418919fSjohnjiang int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
27652bfe3f2eSlogwang {
27662bfe3f2eSlogwang 	struct bnxt *bp = eth_dev->data->dev_private;
27674418919fSjohnjiang 	uint32_t new_pkt_size;
27682bfe3f2eSlogwang 	uint32_t rc = 0;
27692bfe3f2eSlogwang 	uint32_t i;
27702bfe3f2eSlogwang 
27714418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
27724418919fSjohnjiang 	if (rc)
27734418919fSjohnjiang 		return rc;
27742bfe3f2eSlogwang 
27754418919fSjohnjiang 	/* Exit if receive queues are not configured yet */
27764418919fSjohnjiang 	if (!eth_dev->data->nb_rx_queues)
27774418919fSjohnjiang 		return rc;
27784418919fSjohnjiang 
27794418919fSjohnjiang 	new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
27804418919fSjohnjiang 		       VLAN_TAG_SIZE * BNXT_NUM_VLANS;
27814418919fSjohnjiang 
27824418919fSjohnjiang 	/*
2783*2d9fd380Sjfb8856606 	 * Disallow any MTU change that would require scattered receive support
2784*2d9fd380Sjfb8856606 	 * if it is not already enabled.
27854418919fSjohnjiang 	 */
27864418919fSjohnjiang 	if (eth_dev->data->dev_started &&
2787*2d9fd380Sjfb8856606 	    !eth_dev->data->scattered_rx &&
27884418919fSjohnjiang 	    (new_pkt_size >
27894418919fSjohnjiang 	     eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
27904418919fSjohnjiang 		PMD_DRV_LOG(ERR,
27914418919fSjohnjiang 			    "MTU change would require scattered rx support. ");
27924418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
27932bfe3f2eSlogwang 		return -EINVAL;
27942bfe3f2eSlogwang 	}
27952bfe3f2eSlogwang 
27964418919fSjohnjiang 	if (new_mtu > RTE_ETHER_MTU) {
27972bfe3f2eSlogwang 		bp->flags |= BNXT_FLAG_JUMBO;
2798d30ea906Sjfb8856606 		bp->eth_dev->data->dev_conf.rxmode.offloads |=
2799d30ea906Sjfb8856606 			DEV_RX_OFFLOAD_JUMBO_FRAME;
28002bfe3f2eSlogwang 	} else {
2801d30ea906Sjfb8856606 		bp->eth_dev->data->dev_conf.rxmode.offloads &=
2802d30ea906Sjfb8856606 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
28032bfe3f2eSlogwang 		bp->flags &= ~BNXT_FLAG_JUMBO;
28042bfe3f2eSlogwang 	}
28052bfe3f2eSlogwang 
28064418919fSjohnjiang 	/* Is there a change in mtu setting? */
28074418919fSjohnjiang 	if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
28084418919fSjohnjiang 		return rc;
28092bfe3f2eSlogwang 
28102bfe3f2eSlogwang 	for (i = 0; i < bp->nr_vnics; i++) {
28112bfe3f2eSlogwang 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2812579bf1e2Sjfb8856606 		uint16_t size = 0;
28132bfe3f2eSlogwang 
28144418919fSjohnjiang 		vnic->mru = BNXT_VNIC_MRU(new_mtu);
28152bfe3f2eSlogwang 		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
28162bfe3f2eSlogwang 		if (rc)
28172bfe3f2eSlogwang 			break;
28182bfe3f2eSlogwang 
2819579bf1e2Sjfb8856606 		size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2820579bf1e2Sjfb8856606 		size -= RTE_PKTMBUF_HEADROOM;
2821579bf1e2Sjfb8856606 
2822579bf1e2Sjfb8856606 		if (size < new_mtu) {
28232bfe3f2eSlogwang 			rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
28242bfe3f2eSlogwang 			if (rc)
28252bfe3f2eSlogwang 				return rc;
28262bfe3f2eSlogwang 		}
2827579bf1e2Sjfb8856606 	}
28282bfe3f2eSlogwang 
28294418919fSjohnjiang 	if (!rc)
28304418919fSjohnjiang 		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
28314418919fSjohnjiang 
28324418919fSjohnjiang 	PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
28334418919fSjohnjiang 
28342bfe3f2eSlogwang 	return rc;
28352bfe3f2eSlogwang }
28362bfe3f2eSlogwang 
28372bfe3f2eSlogwang static int
bnxt_vlan_pvid_set_op(struct rte_eth_dev * dev,uint16_t pvid,int on)28382bfe3f2eSlogwang bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
28392bfe3f2eSlogwang {
28404b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
28412bfe3f2eSlogwang 	uint16_t vlan = bp->vlan;
28422bfe3f2eSlogwang 	int rc;
28432bfe3f2eSlogwang 
28444418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
28454418919fSjohnjiang 	if (rc)
28464418919fSjohnjiang 		return rc;
28474418919fSjohnjiang 
2848d30ea906Sjfb8856606 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2849d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR,
28502bfe3f2eSlogwang 			"PVID cannot be modified for this function\n");
28512bfe3f2eSlogwang 		return -ENOTSUP;
28522bfe3f2eSlogwang 	}
28532bfe3f2eSlogwang 	bp->vlan = on ? pvid : 0;
28542bfe3f2eSlogwang 
28552bfe3f2eSlogwang 	rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
28562bfe3f2eSlogwang 	if (rc)
28572bfe3f2eSlogwang 		bp->vlan = vlan;
28582bfe3f2eSlogwang 	return rc;
28592bfe3f2eSlogwang }
28602bfe3f2eSlogwang 
28612bfe3f2eSlogwang static int
bnxt_dev_led_on_op(struct rte_eth_dev * dev)28622bfe3f2eSlogwang bnxt_dev_led_on_op(struct rte_eth_dev *dev)
28632bfe3f2eSlogwang {
28644b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
28654418919fSjohnjiang 	int rc;
28664418919fSjohnjiang 
28674418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
28684418919fSjohnjiang 	if (rc)
28694418919fSjohnjiang 		return rc;
28702bfe3f2eSlogwang 
28712bfe3f2eSlogwang 	return bnxt_hwrm_port_led_cfg(bp, true);
28722bfe3f2eSlogwang }
28732bfe3f2eSlogwang 
28742bfe3f2eSlogwang static int
bnxt_dev_led_off_op(struct rte_eth_dev * dev)28752bfe3f2eSlogwang bnxt_dev_led_off_op(struct rte_eth_dev *dev)
28762bfe3f2eSlogwang {
28774b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
28784418919fSjohnjiang 	int rc;
28794418919fSjohnjiang 
28804418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
28814418919fSjohnjiang 	if (rc)
28824418919fSjohnjiang 		return rc;
28832bfe3f2eSlogwang 
28842bfe3f2eSlogwang 	return bnxt_hwrm_port_led_cfg(bp, false);
28852bfe3f2eSlogwang }
28862bfe3f2eSlogwang 
28872bfe3f2eSlogwang static uint32_t
bnxt_rx_queue_count_op(struct rte_eth_dev * dev,uint16_t rx_queue_id)28882bfe3f2eSlogwang bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
28892bfe3f2eSlogwang {
28904418919fSjohnjiang 	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
28912bfe3f2eSlogwang 	uint32_t desc = 0, raw_cons = 0, cons;
28922bfe3f2eSlogwang 	struct bnxt_cp_ring_info *cpr;
28932bfe3f2eSlogwang 	struct bnxt_rx_queue *rxq;
28942bfe3f2eSlogwang 	struct rx_pkt_cmpl *rxcmp;
28954418919fSjohnjiang 	int rc;
28964418919fSjohnjiang 
28974418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
28984418919fSjohnjiang 	if (rc)
28994418919fSjohnjiang 		return rc;
29002bfe3f2eSlogwang 
29012bfe3f2eSlogwang 	rxq = dev->data->rx_queues[rx_queue_id];
29022bfe3f2eSlogwang 	cpr = rxq->cp_ring;
29034418919fSjohnjiang 	raw_cons = cpr->cp_raw_cons;
29042bfe3f2eSlogwang 
29054418919fSjohnjiang 	while (1) {
29062bfe3f2eSlogwang 		cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
29074418919fSjohnjiang 		rte_prefetch0(&cpr->cp_desc_ring[cons]);
29082bfe3f2eSlogwang 		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
29092bfe3f2eSlogwang 
29104418919fSjohnjiang 		if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
29114418919fSjohnjiang 			break;
29122bfe3f2eSlogwang 		} else {
29134418919fSjohnjiang 			raw_cons++;
29144418919fSjohnjiang 			desc++;
29152bfe3f2eSlogwang 		}
29162bfe3f2eSlogwang 	}
29172bfe3f2eSlogwang 
29182bfe3f2eSlogwang 	return desc;
29192bfe3f2eSlogwang }
29202bfe3f2eSlogwang 
29212bfe3f2eSlogwang static int
bnxt_rx_descriptor_status_op(void * rx_queue,uint16_t offset)29222bfe3f2eSlogwang bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
29232bfe3f2eSlogwang {
29242bfe3f2eSlogwang 	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
29252bfe3f2eSlogwang 	struct bnxt_rx_ring_info *rxr;
29262bfe3f2eSlogwang 	struct bnxt_cp_ring_info *cpr;
2927*2d9fd380Sjfb8856606 	struct rte_mbuf *rx_buf;
29282bfe3f2eSlogwang 	struct rx_pkt_cmpl *rxcmp;
29292bfe3f2eSlogwang 	uint32_t cons, cp_cons;
29304418919fSjohnjiang 	int rc;
29312bfe3f2eSlogwang 
29322bfe3f2eSlogwang 	if (!rxq)
29332bfe3f2eSlogwang 		return -EINVAL;
29342bfe3f2eSlogwang 
29354418919fSjohnjiang 	rc = is_bnxt_in_error(rxq->bp);
29364418919fSjohnjiang 	if (rc)
29374418919fSjohnjiang 		return rc;
29384418919fSjohnjiang 
29392bfe3f2eSlogwang 	cpr = rxq->cp_ring;
29402bfe3f2eSlogwang 	rxr = rxq->rx_ring;
29412bfe3f2eSlogwang 
29422bfe3f2eSlogwang 	if (offset >= rxq->nb_rx_desc)
29432bfe3f2eSlogwang 		return -EINVAL;
29442bfe3f2eSlogwang 
29452bfe3f2eSlogwang 	cons = RING_CMP(cpr->cp_ring_struct, offset);
29462bfe3f2eSlogwang 	cp_cons = cpr->cp_raw_cons;
29472bfe3f2eSlogwang 	rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
29482bfe3f2eSlogwang 
29492bfe3f2eSlogwang 	if (cons > cp_cons) {
29502bfe3f2eSlogwang 		if (CMPL_VALID(rxcmp, cpr->valid))
29512bfe3f2eSlogwang 			return RTE_ETH_RX_DESC_DONE;
29522bfe3f2eSlogwang 	} else {
29532bfe3f2eSlogwang 		if (CMPL_VALID(rxcmp, !cpr->valid))
29542bfe3f2eSlogwang 			return RTE_ETH_RX_DESC_DONE;
29552bfe3f2eSlogwang 	}
2956*2d9fd380Sjfb8856606 	rx_buf = rxr->rx_buf_ring[cons];
2957*2d9fd380Sjfb8856606 	if (rx_buf == NULL || rx_buf == &rxq->fake_mbuf)
29582bfe3f2eSlogwang 		return RTE_ETH_RX_DESC_UNAVAIL;
29592bfe3f2eSlogwang 
29602bfe3f2eSlogwang 
29612bfe3f2eSlogwang 	return RTE_ETH_RX_DESC_AVAIL;
29622bfe3f2eSlogwang }
29632bfe3f2eSlogwang 
29642bfe3f2eSlogwang static int
bnxt_tx_descriptor_status_op(void * tx_queue,uint16_t offset)29652bfe3f2eSlogwang bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
29662bfe3f2eSlogwang {
29672bfe3f2eSlogwang 	struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
29682bfe3f2eSlogwang 	struct bnxt_tx_ring_info *txr;
29692bfe3f2eSlogwang 	struct bnxt_cp_ring_info *cpr;
29702bfe3f2eSlogwang 	struct bnxt_sw_tx_bd *tx_buf;
29712bfe3f2eSlogwang 	struct tx_pkt_cmpl *txcmp;
29722bfe3f2eSlogwang 	uint32_t cons, cp_cons;
29734418919fSjohnjiang 	int rc;
29742bfe3f2eSlogwang 
29752bfe3f2eSlogwang 	if (!txq)
29762bfe3f2eSlogwang 		return -EINVAL;
29772bfe3f2eSlogwang 
29784418919fSjohnjiang 	rc = is_bnxt_in_error(txq->bp);
29794418919fSjohnjiang 	if (rc)
29804418919fSjohnjiang 		return rc;
29814418919fSjohnjiang 
29822bfe3f2eSlogwang 	cpr = txq->cp_ring;
29832bfe3f2eSlogwang 	txr = txq->tx_ring;
29842bfe3f2eSlogwang 
29852bfe3f2eSlogwang 	if (offset >= txq->nb_tx_desc)
29862bfe3f2eSlogwang 		return -EINVAL;
29872bfe3f2eSlogwang 
29882bfe3f2eSlogwang 	cons = RING_CMP(cpr->cp_ring_struct, offset);
29892bfe3f2eSlogwang 	txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
29902bfe3f2eSlogwang 	cp_cons = cpr->cp_raw_cons;
29912bfe3f2eSlogwang 
29922bfe3f2eSlogwang 	if (cons > cp_cons) {
29932bfe3f2eSlogwang 		if (CMPL_VALID(txcmp, cpr->valid))
29942bfe3f2eSlogwang 			return RTE_ETH_TX_DESC_UNAVAIL;
29952bfe3f2eSlogwang 	} else {
29962bfe3f2eSlogwang 		if (CMPL_VALID(txcmp, !cpr->valid))
29972bfe3f2eSlogwang 			return RTE_ETH_TX_DESC_UNAVAIL;
29982bfe3f2eSlogwang 	}
29992bfe3f2eSlogwang 	tx_buf = &txr->tx_buf_ring[cons];
30002bfe3f2eSlogwang 	if (tx_buf->mbuf == NULL)
30012bfe3f2eSlogwang 		return RTE_ETH_TX_DESC_DONE;
30022bfe3f2eSlogwang 
30032bfe3f2eSlogwang 	return RTE_ETH_TX_DESC_FULL;
30042bfe3f2eSlogwang }
30052bfe3f2eSlogwang 
3006*2d9fd380Sjfb8856606 int
bnxt_filter_ctrl_op(struct rte_eth_dev * dev,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)3007*2d9fd380Sjfb8856606 bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
30082bfe3f2eSlogwang 		    enum rte_filter_type filter_type,
30092bfe3f2eSlogwang 		    enum rte_filter_op filter_op, void *arg)
30102bfe3f2eSlogwang {
3011*2d9fd380Sjfb8856606 	struct bnxt *bp = dev->data->dev_private;
30122bfe3f2eSlogwang 	int ret = 0;
30132bfe3f2eSlogwang 
3014*2d9fd380Sjfb8856606 	if (!bp)
3015*2d9fd380Sjfb8856606 		return -EIO;
3016*2d9fd380Sjfb8856606 
3017*2d9fd380Sjfb8856606 	if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
3018*2d9fd380Sjfb8856606 		struct bnxt_representor *vfr = dev->data->dev_private;
3019*2d9fd380Sjfb8856606 		bp = vfr->parent_dev->data->dev_private;
3020*2d9fd380Sjfb8856606 		/* parent is deleted while children are still valid */
3021*2d9fd380Sjfb8856606 		if (!bp) {
3022*2d9fd380Sjfb8856606 			PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error %d:%d\n",
3023*2d9fd380Sjfb8856606 				    dev->data->port_id,
3024*2d9fd380Sjfb8856606 				    filter_type,
3025*2d9fd380Sjfb8856606 				    filter_op);
3026*2d9fd380Sjfb8856606 			return -EIO;
3027*2d9fd380Sjfb8856606 		}
3028*2d9fd380Sjfb8856606 	}
3029*2d9fd380Sjfb8856606 
3030*2d9fd380Sjfb8856606 	ret = is_bnxt_in_error(bp);
30314418919fSjohnjiang 	if (ret)
30324418919fSjohnjiang 		return ret;
30334418919fSjohnjiang 
30342bfe3f2eSlogwang 	switch (filter_type) {
30352bfe3f2eSlogwang 	case RTE_ETH_FILTER_GENERIC:
30362bfe3f2eSlogwang 		if (filter_op != RTE_ETH_FILTER_GET)
30372bfe3f2eSlogwang 			return -EINVAL;
3038*2d9fd380Sjfb8856606 
3039*2d9fd380Sjfb8856606 		/* PMD supports thread-safe flow operations.  rte_flow API
3040*2d9fd380Sjfb8856606 		 * functions can avoid mutex for multi-thread safety.
3041*2d9fd380Sjfb8856606 		 */
3042*2d9fd380Sjfb8856606 		dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
3043*2d9fd380Sjfb8856606 
3044*2d9fd380Sjfb8856606 		if (BNXT_TRUFLOW_EN(bp))
3045*2d9fd380Sjfb8856606 			*(const void **)arg = &bnxt_ulp_rte_flow_ops;
3046*2d9fd380Sjfb8856606 		else
30472bfe3f2eSlogwang 			*(const void **)arg = &bnxt_flow_ops;
30482bfe3f2eSlogwang 		break;
30492bfe3f2eSlogwang 	default:
3050d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR,
30512bfe3f2eSlogwang 			"Filter type (%d) not supported", filter_type);
30522bfe3f2eSlogwang 		ret = -EINVAL;
30532bfe3f2eSlogwang 		break;
30542bfe3f2eSlogwang 	}
30552bfe3f2eSlogwang 	return ret;
30562bfe3f2eSlogwang }
30572bfe3f2eSlogwang 
30582bfe3f2eSlogwang static const uint32_t *
bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev * dev)30592bfe3f2eSlogwang bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
30602bfe3f2eSlogwang {
30612bfe3f2eSlogwang 	static const uint32_t ptypes[] = {
30622bfe3f2eSlogwang 		RTE_PTYPE_L2_ETHER_VLAN,
30632bfe3f2eSlogwang 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
30642bfe3f2eSlogwang 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
30652bfe3f2eSlogwang 		RTE_PTYPE_L4_ICMP,
30662bfe3f2eSlogwang 		RTE_PTYPE_L4_TCP,
30672bfe3f2eSlogwang 		RTE_PTYPE_L4_UDP,
30682bfe3f2eSlogwang 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
30692bfe3f2eSlogwang 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
30702bfe3f2eSlogwang 		RTE_PTYPE_INNER_L4_ICMP,
30712bfe3f2eSlogwang 		RTE_PTYPE_INNER_L4_TCP,
30722bfe3f2eSlogwang 		RTE_PTYPE_INNER_L4_UDP,
30732bfe3f2eSlogwang 		RTE_PTYPE_UNKNOWN
30742bfe3f2eSlogwang 	};
30752bfe3f2eSlogwang 
30764418919fSjohnjiang 	if (!dev->rx_pkt_burst)
30772bfe3f2eSlogwang 		return NULL;
30784418919fSjohnjiang 
30794418919fSjohnjiang 	return ptypes;
30802bfe3f2eSlogwang }
30812bfe3f2eSlogwang 
bnxt_map_regs(struct bnxt * bp,uint32_t * reg_arr,int count,int reg_win)3082d30ea906Sjfb8856606 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3083d30ea906Sjfb8856606 			 int reg_win)
3084d30ea906Sjfb8856606 {
3085d30ea906Sjfb8856606 	uint32_t reg_base = *reg_arr & 0xfffff000;
3086d30ea906Sjfb8856606 	uint32_t win_off;
3087d30ea906Sjfb8856606 	int i;
30882bfe3f2eSlogwang 
3089d30ea906Sjfb8856606 	for (i = 0; i < count; i++) {
3090d30ea906Sjfb8856606 		if ((reg_arr[i] & 0xfffff000) != reg_base)
3091d30ea906Sjfb8856606 			return -ERANGE;
3092d30ea906Sjfb8856606 	}
3093d30ea906Sjfb8856606 	win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
30941646932aSjfb8856606 	rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3095d30ea906Sjfb8856606 	return 0;
3096d30ea906Sjfb8856606 }
3097d30ea906Sjfb8856606 
bnxt_map_ptp_regs(struct bnxt * bp)3098d30ea906Sjfb8856606 static int bnxt_map_ptp_regs(struct bnxt *bp)
3099d30ea906Sjfb8856606 {
3100d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3101d30ea906Sjfb8856606 	uint32_t *reg_arr;
3102d30ea906Sjfb8856606 	int rc, i;
3103d30ea906Sjfb8856606 
3104d30ea906Sjfb8856606 	reg_arr = ptp->rx_regs;
3105d30ea906Sjfb8856606 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3106d30ea906Sjfb8856606 	if (rc)
3107d30ea906Sjfb8856606 		return rc;
3108d30ea906Sjfb8856606 
3109d30ea906Sjfb8856606 	reg_arr = ptp->tx_regs;
3110d30ea906Sjfb8856606 	rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3111d30ea906Sjfb8856606 	if (rc)
3112d30ea906Sjfb8856606 		return rc;
3113d30ea906Sjfb8856606 
3114d30ea906Sjfb8856606 	for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3115d30ea906Sjfb8856606 		ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3116d30ea906Sjfb8856606 
3117d30ea906Sjfb8856606 	for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3118d30ea906Sjfb8856606 		ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3119d30ea906Sjfb8856606 
3120d30ea906Sjfb8856606 	return 0;
3121d30ea906Sjfb8856606 }
3122d30ea906Sjfb8856606 
bnxt_unmap_ptp_regs(struct bnxt * bp)3123d30ea906Sjfb8856606 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3124d30ea906Sjfb8856606 {
31251646932aSjfb8856606 	rte_write32(0, (uint8_t *)bp->bar0 +
31261646932aSjfb8856606 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
31271646932aSjfb8856606 	rte_write32(0, (uint8_t *)bp->bar0 +
31281646932aSjfb8856606 			 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3129d30ea906Sjfb8856606 }
3130d30ea906Sjfb8856606 
bnxt_cc_read(struct bnxt * bp)3131d30ea906Sjfb8856606 static uint64_t bnxt_cc_read(struct bnxt *bp)
3132d30ea906Sjfb8856606 {
3133d30ea906Sjfb8856606 	uint64_t ns;
3134d30ea906Sjfb8856606 
3135d30ea906Sjfb8856606 	ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3136d30ea906Sjfb8856606 			      BNXT_GRCPF_REG_SYNC_TIME));
3137d30ea906Sjfb8856606 	ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3138d30ea906Sjfb8856606 					  BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3139d30ea906Sjfb8856606 	return ns;
3140d30ea906Sjfb8856606 }
3141d30ea906Sjfb8856606 
bnxt_get_tx_ts(struct bnxt * bp,uint64_t * ts)3142d30ea906Sjfb8856606 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3143d30ea906Sjfb8856606 {
3144d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3145d30ea906Sjfb8856606 	uint32_t fifo;
3146d30ea906Sjfb8856606 
3147d30ea906Sjfb8856606 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3148d30ea906Sjfb8856606 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3149d30ea906Sjfb8856606 	if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3150d30ea906Sjfb8856606 		return -EAGAIN;
3151d30ea906Sjfb8856606 
3152d30ea906Sjfb8856606 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3153d30ea906Sjfb8856606 				ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3154d30ea906Sjfb8856606 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3155d30ea906Sjfb8856606 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3156d30ea906Sjfb8856606 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3157d30ea906Sjfb8856606 				ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3158d30ea906Sjfb8856606 
3159d30ea906Sjfb8856606 	return 0;
3160d30ea906Sjfb8856606 }
3161d30ea906Sjfb8856606 
bnxt_get_rx_ts(struct bnxt * bp,uint64_t * ts)3162d30ea906Sjfb8856606 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3163d30ea906Sjfb8856606 {
3164d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3165*2d9fd380Sjfb8856606 	struct bnxt_pf_info *pf = bp->pf;
3166d30ea906Sjfb8856606 	uint16_t port_id;
3167d30ea906Sjfb8856606 	uint32_t fifo;
3168d30ea906Sjfb8856606 
3169d30ea906Sjfb8856606 	if (!ptp)
3170d30ea906Sjfb8856606 		return -ENODEV;
3171d30ea906Sjfb8856606 
3172d30ea906Sjfb8856606 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3173d30ea906Sjfb8856606 				ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3174d30ea906Sjfb8856606 	if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3175d30ea906Sjfb8856606 		return -EAGAIN;
3176d30ea906Sjfb8856606 
3177d30ea906Sjfb8856606 	port_id = pf->port_id;
31781646932aSjfb8856606 	rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
31791646932aSjfb8856606 	       ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3180d30ea906Sjfb8856606 
3181d30ea906Sjfb8856606 	fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3182d30ea906Sjfb8856606 				   ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3183d30ea906Sjfb8856606 	if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3184d30ea906Sjfb8856606 /*		bnxt_clr_rx_ts(bp);	  TBD  */
3185d30ea906Sjfb8856606 		return -EBUSY;
3186d30ea906Sjfb8856606 	}
3187d30ea906Sjfb8856606 
3188d30ea906Sjfb8856606 	*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3189d30ea906Sjfb8856606 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3190d30ea906Sjfb8856606 	*ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3191d30ea906Sjfb8856606 				ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3192d30ea906Sjfb8856606 
3193d30ea906Sjfb8856606 	return 0;
3194d30ea906Sjfb8856606 }
3195d30ea906Sjfb8856606 
3196d30ea906Sjfb8856606 static int
bnxt_timesync_write_time(struct rte_eth_dev * dev,const struct timespec * ts)3197d30ea906Sjfb8856606 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3198d30ea906Sjfb8856606 {
3199d30ea906Sjfb8856606 	uint64_t ns;
32004b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
3201d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3202d30ea906Sjfb8856606 
3203d30ea906Sjfb8856606 	if (!ptp)
3204d30ea906Sjfb8856606 		return 0;
3205d30ea906Sjfb8856606 
3206d30ea906Sjfb8856606 	ns = rte_timespec_to_ns(ts);
3207d30ea906Sjfb8856606 	/* Set the timecounters to a new value. */
3208d30ea906Sjfb8856606 	ptp->tc.nsec = ns;
3209d30ea906Sjfb8856606 
3210d30ea906Sjfb8856606 	return 0;
3211d30ea906Sjfb8856606 }
3212d30ea906Sjfb8856606 
3213d30ea906Sjfb8856606 static int
bnxt_timesync_read_time(struct rte_eth_dev * dev,struct timespec * ts)3214d30ea906Sjfb8856606 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3215d30ea906Sjfb8856606 {
32164b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
3217d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
32184418919fSjohnjiang 	uint64_t ns, systime_cycles = 0;
32194418919fSjohnjiang 	int rc = 0;
3220d30ea906Sjfb8856606 
3221d30ea906Sjfb8856606 	if (!ptp)
3222d30ea906Sjfb8856606 		return 0;
3223d30ea906Sjfb8856606 
32244418919fSjohnjiang 	if (BNXT_CHIP_THOR(bp))
32254418919fSjohnjiang 		rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
32264418919fSjohnjiang 					     &systime_cycles);
32274418919fSjohnjiang 	else
3228d30ea906Sjfb8856606 		systime_cycles = bnxt_cc_read(bp);
32294418919fSjohnjiang 
3230d30ea906Sjfb8856606 	ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3231d30ea906Sjfb8856606 	*ts = rte_ns_to_timespec(ns);
3232d30ea906Sjfb8856606 
32334418919fSjohnjiang 	return rc;
3234d30ea906Sjfb8856606 }
3235d30ea906Sjfb8856606 static int
bnxt_timesync_enable(struct rte_eth_dev * dev)3236d30ea906Sjfb8856606 bnxt_timesync_enable(struct rte_eth_dev *dev)
3237d30ea906Sjfb8856606 {
32384b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
3239d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3240d30ea906Sjfb8856606 	uint32_t shift = 0;
32414418919fSjohnjiang 	int rc;
3242d30ea906Sjfb8856606 
3243d30ea906Sjfb8856606 	if (!ptp)
3244d30ea906Sjfb8856606 		return 0;
3245d30ea906Sjfb8856606 
3246d30ea906Sjfb8856606 	ptp->rx_filter = 1;
3247d30ea906Sjfb8856606 	ptp->tx_tstamp_en = 1;
3248d30ea906Sjfb8856606 	ptp->rxctl = BNXT_PTP_MSG_EVENTS;
3249d30ea906Sjfb8856606 
32504418919fSjohnjiang 	rc = bnxt_hwrm_ptp_cfg(bp);
32514418919fSjohnjiang 	if (rc)
32524418919fSjohnjiang 		return rc;
3253d30ea906Sjfb8856606 
3254d30ea906Sjfb8856606 	memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
3255d30ea906Sjfb8856606 	memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3256d30ea906Sjfb8856606 	memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3257d30ea906Sjfb8856606 
3258d30ea906Sjfb8856606 	ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3259d30ea906Sjfb8856606 	ptp->tc.cc_shift = shift;
3260d30ea906Sjfb8856606 	ptp->tc.nsec_mask = (1ULL << shift) - 1;
3261d30ea906Sjfb8856606 
3262d30ea906Sjfb8856606 	ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3263d30ea906Sjfb8856606 	ptp->rx_tstamp_tc.cc_shift = shift;
3264d30ea906Sjfb8856606 	ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3265d30ea906Sjfb8856606 
3266d30ea906Sjfb8856606 	ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3267d30ea906Sjfb8856606 	ptp->tx_tstamp_tc.cc_shift = shift;
3268d30ea906Sjfb8856606 	ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3269d30ea906Sjfb8856606 
32704418919fSjohnjiang 	if (!BNXT_CHIP_THOR(bp))
32714418919fSjohnjiang 		bnxt_map_ptp_regs(bp);
32724418919fSjohnjiang 
3273d30ea906Sjfb8856606 	return 0;
3274d30ea906Sjfb8856606 }
3275d30ea906Sjfb8856606 
3276d30ea906Sjfb8856606 static int
bnxt_timesync_disable(struct rte_eth_dev * dev)3277d30ea906Sjfb8856606 bnxt_timesync_disable(struct rte_eth_dev *dev)
3278d30ea906Sjfb8856606 {
32794b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
3280d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3281d30ea906Sjfb8856606 
3282d30ea906Sjfb8856606 	if (!ptp)
3283d30ea906Sjfb8856606 		return 0;
3284d30ea906Sjfb8856606 
3285d30ea906Sjfb8856606 	ptp->rx_filter = 0;
3286d30ea906Sjfb8856606 	ptp->tx_tstamp_en = 0;
3287d30ea906Sjfb8856606 	ptp->rxctl = 0;
3288d30ea906Sjfb8856606 
3289d30ea906Sjfb8856606 	bnxt_hwrm_ptp_cfg(bp);
3290d30ea906Sjfb8856606 
32914418919fSjohnjiang 	if (!BNXT_CHIP_THOR(bp))
3292d30ea906Sjfb8856606 		bnxt_unmap_ptp_regs(bp);
3293d30ea906Sjfb8856606 
3294d30ea906Sjfb8856606 	return 0;
3295d30ea906Sjfb8856606 }
3296d30ea906Sjfb8856606 
3297d30ea906Sjfb8856606 static int
bnxt_timesync_read_rx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp,uint32_t flags __rte_unused)3298d30ea906Sjfb8856606 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3299d30ea906Sjfb8856606 				 struct timespec *timestamp,
3300d30ea906Sjfb8856606 				 uint32_t flags __rte_unused)
3301d30ea906Sjfb8856606 {
33024b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
3303d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3304d30ea906Sjfb8856606 	uint64_t rx_tstamp_cycles = 0;
3305d30ea906Sjfb8856606 	uint64_t ns;
3306d30ea906Sjfb8856606 
3307d30ea906Sjfb8856606 	if (!ptp)
3308d30ea906Sjfb8856606 		return 0;
3309d30ea906Sjfb8856606 
33104418919fSjohnjiang 	if (BNXT_CHIP_THOR(bp))
33114418919fSjohnjiang 		rx_tstamp_cycles = ptp->rx_timestamp;
33124418919fSjohnjiang 	else
3313d30ea906Sjfb8856606 		bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
33144418919fSjohnjiang 
3315d30ea906Sjfb8856606 	ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
3316d30ea906Sjfb8856606 	*timestamp = rte_ns_to_timespec(ns);
3317d30ea906Sjfb8856606 	return  0;
3318d30ea906Sjfb8856606 }
3319d30ea906Sjfb8856606 
3320d30ea906Sjfb8856606 static int
bnxt_timesync_read_tx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp)3321d30ea906Sjfb8856606 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3322d30ea906Sjfb8856606 				 struct timespec *timestamp)
3323d30ea906Sjfb8856606 {
33244b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
3325d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3326d30ea906Sjfb8856606 	uint64_t tx_tstamp_cycles = 0;
3327d30ea906Sjfb8856606 	uint64_t ns;
33284418919fSjohnjiang 	int rc = 0;
3329d30ea906Sjfb8856606 
3330d30ea906Sjfb8856606 	if (!ptp)
3331d30ea906Sjfb8856606 		return 0;
3332d30ea906Sjfb8856606 
33334418919fSjohnjiang 	if (BNXT_CHIP_THOR(bp))
33344418919fSjohnjiang 		rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
33354418919fSjohnjiang 					     &tx_tstamp_cycles);
33364418919fSjohnjiang 	else
33374418919fSjohnjiang 		rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
33384418919fSjohnjiang 
3339d30ea906Sjfb8856606 	ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
3340d30ea906Sjfb8856606 	*timestamp = rte_ns_to_timespec(ns);
3341d30ea906Sjfb8856606 
33424418919fSjohnjiang 	return rc;
3343d30ea906Sjfb8856606 }
3344d30ea906Sjfb8856606 
3345d30ea906Sjfb8856606 static int
bnxt_timesync_adjust_time(struct rte_eth_dev * dev,int64_t delta)3346d30ea906Sjfb8856606 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3347d30ea906Sjfb8856606 {
33484b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
3349d30ea906Sjfb8856606 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3350d30ea906Sjfb8856606 
3351d30ea906Sjfb8856606 	if (!ptp)
3352d30ea906Sjfb8856606 		return 0;
3353d30ea906Sjfb8856606 
3354d30ea906Sjfb8856606 	ptp->tc.nsec += delta;
3355d30ea906Sjfb8856606 
3356d30ea906Sjfb8856606 	return 0;
3357d30ea906Sjfb8856606 }
33582bfe3f2eSlogwang 
33592bfe3f2eSlogwang static int
bnxt_get_eeprom_length_op(struct rte_eth_dev * dev)33602bfe3f2eSlogwang bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
33612bfe3f2eSlogwang {
33624b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
33632bfe3f2eSlogwang 	int rc;
33642bfe3f2eSlogwang 	uint32_t dir_entries;
33652bfe3f2eSlogwang 	uint32_t entry_length;
33662bfe3f2eSlogwang 
33674418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
33684418919fSjohnjiang 	if (rc)
33694418919fSjohnjiang 		return rc;
33704418919fSjohnjiang 
33714418919fSjohnjiang 	PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
3372d30ea906Sjfb8856606 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
33732bfe3f2eSlogwang 		    bp->pdev->addr.devid, bp->pdev->addr.function);
33742bfe3f2eSlogwang 
33752bfe3f2eSlogwang 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
33762bfe3f2eSlogwang 	if (rc != 0)
33772bfe3f2eSlogwang 		return rc;
33782bfe3f2eSlogwang 
33792bfe3f2eSlogwang 	return dir_entries * entry_length;
33802bfe3f2eSlogwang }
33812bfe3f2eSlogwang 
33822bfe3f2eSlogwang static int
bnxt_get_eeprom_op(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)33832bfe3f2eSlogwang bnxt_get_eeprom_op(struct rte_eth_dev *dev,
33842bfe3f2eSlogwang 		struct rte_dev_eeprom_info *in_eeprom)
33852bfe3f2eSlogwang {
33864b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
33872bfe3f2eSlogwang 	uint32_t index;
33882bfe3f2eSlogwang 	uint32_t offset;
33894418919fSjohnjiang 	int rc;
33902bfe3f2eSlogwang 
33914418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
33924418919fSjohnjiang 	if (rc)
33934418919fSjohnjiang 		return rc;
33944418919fSjohnjiang 
33954418919fSjohnjiang 	PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
33964418919fSjohnjiang 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
33974418919fSjohnjiang 		    bp->pdev->addr.devid, bp->pdev->addr.function,
33984418919fSjohnjiang 		    in_eeprom->offset, in_eeprom->length);
33992bfe3f2eSlogwang 
34002bfe3f2eSlogwang 	if (in_eeprom->offset == 0) /* special offset value to get directory */
34012bfe3f2eSlogwang 		return bnxt_get_nvram_directory(bp, in_eeprom->length,
34022bfe3f2eSlogwang 						in_eeprom->data);
34032bfe3f2eSlogwang 
34042bfe3f2eSlogwang 	index = in_eeprom->offset >> 24;
34052bfe3f2eSlogwang 	offset = in_eeprom->offset & 0xffffff;
34062bfe3f2eSlogwang 
34072bfe3f2eSlogwang 	if (index != 0)
34082bfe3f2eSlogwang 		return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
34092bfe3f2eSlogwang 					   in_eeprom->length, in_eeprom->data);
34102bfe3f2eSlogwang 
34112bfe3f2eSlogwang 	return 0;
34122bfe3f2eSlogwang }
34132bfe3f2eSlogwang 
bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)34142bfe3f2eSlogwang static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
34152bfe3f2eSlogwang {
34162bfe3f2eSlogwang 	switch (dir_type) {
34172bfe3f2eSlogwang 	case BNX_DIR_TYPE_CHIMP_PATCH:
34182bfe3f2eSlogwang 	case BNX_DIR_TYPE_BOOTCODE:
34192bfe3f2eSlogwang 	case BNX_DIR_TYPE_BOOTCODE_2:
34202bfe3f2eSlogwang 	case BNX_DIR_TYPE_APE_FW:
34212bfe3f2eSlogwang 	case BNX_DIR_TYPE_APE_PATCH:
34222bfe3f2eSlogwang 	case BNX_DIR_TYPE_KONG_FW:
34232bfe3f2eSlogwang 	case BNX_DIR_TYPE_KONG_PATCH:
34242bfe3f2eSlogwang 	case BNX_DIR_TYPE_BONO_FW:
34252bfe3f2eSlogwang 	case BNX_DIR_TYPE_BONO_PATCH:
3426d30ea906Sjfb8856606 		/* FALLTHROUGH */
34272bfe3f2eSlogwang 		return true;
34282bfe3f2eSlogwang 	}
34292bfe3f2eSlogwang 
34302bfe3f2eSlogwang 	return false;
34312bfe3f2eSlogwang }
34322bfe3f2eSlogwang 
bnxt_dir_type_is_other_exec_format(uint16_t dir_type)34332bfe3f2eSlogwang static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
34342bfe3f2eSlogwang {
34352bfe3f2eSlogwang 	switch (dir_type) {
34362bfe3f2eSlogwang 	case BNX_DIR_TYPE_AVS:
34372bfe3f2eSlogwang 	case BNX_DIR_TYPE_EXP_ROM_MBA:
34382bfe3f2eSlogwang 	case BNX_DIR_TYPE_PCIE:
34392bfe3f2eSlogwang 	case BNX_DIR_TYPE_TSCF_UCODE:
34402bfe3f2eSlogwang 	case BNX_DIR_TYPE_EXT_PHY:
34412bfe3f2eSlogwang 	case BNX_DIR_TYPE_CCM:
34422bfe3f2eSlogwang 	case BNX_DIR_TYPE_ISCSI_BOOT:
34432bfe3f2eSlogwang 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
34442bfe3f2eSlogwang 	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3445d30ea906Sjfb8856606 		/* FALLTHROUGH */
34462bfe3f2eSlogwang 		return true;
34472bfe3f2eSlogwang 	}
34482bfe3f2eSlogwang 
34492bfe3f2eSlogwang 	return false;
34502bfe3f2eSlogwang }
34512bfe3f2eSlogwang 
bnxt_dir_type_is_executable(uint16_t dir_type)34522bfe3f2eSlogwang static bool bnxt_dir_type_is_executable(uint16_t dir_type)
34532bfe3f2eSlogwang {
34542bfe3f2eSlogwang 	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
34552bfe3f2eSlogwang 		bnxt_dir_type_is_other_exec_format(dir_type);
34562bfe3f2eSlogwang }
34572bfe3f2eSlogwang 
34582bfe3f2eSlogwang static int
bnxt_set_eeprom_op(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)34592bfe3f2eSlogwang bnxt_set_eeprom_op(struct rte_eth_dev *dev,
34602bfe3f2eSlogwang 		struct rte_dev_eeprom_info *in_eeprom)
34612bfe3f2eSlogwang {
34624b05018fSfengbojiang 	struct bnxt *bp = dev->data->dev_private;
34632bfe3f2eSlogwang 	uint8_t index, dir_op;
34642bfe3f2eSlogwang 	uint16_t type, ext, ordinal, attr;
34654418919fSjohnjiang 	int rc;
34662bfe3f2eSlogwang 
34674418919fSjohnjiang 	rc = is_bnxt_in_error(bp);
34684418919fSjohnjiang 	if (rc)
34694418919fSjohnjiang 		return rc;
34704418919fSjohnjiang 
34714418919fSjohnjiang 	PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
34724418919fSjohnjiang 		    bp->pdev->addr.domain, bp->pdev->addr.bus,
34734418919fSjohnjiang 		    bp->pdev->addr.devid, bp->pdev->addr.function,
34744418919fSjohnjiang 		    in_eeprom->offset, in_eeprom->length);
34752bfe3f2eSlogwang 
34762bfe3f2eSlogwang 	if (!BNXT_PF(bp)) {
3477d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
34782bfe3f2eSlogwang 		return -EINVAL;
34792bfe3f2eSlogwang 	}
34802bfe3f2eSlogwang 
34812bfe3f2eSlogwang 	type = in_eeprom->magic >> 16;
34822bfe3f2eSlogwang 
34832bfe3f2eSlogwang 	if (type == 0xffff) { /* special value for directory operations */
34842bfe3f2eSlogwang 		index = in_eeprom->magic & 0xff;
34852bfe3f2eSlogwang 		dir_op = in_eeprom->magic >> 8;
34862bfe3f2eSlogwang 		if (index == 0)
34872bfe3f2eSlogwang 			return -EINVAL;
34882bfe3f2eSlogwang 		switch (dir_op) {
34892bfe3f2eSlogwang 		case 0x0e: /* erase */
34902bfe3f2eSlogwang 			if (in_eeprom->offset != ~in_eeprom->magic)
34912bfe3f2eSlogwang 				return -EINVAL;
34922bfe3f2eSlogwang 			return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
34932bfe3f2eSlogwang 		default:
34942bfe3f2eSlogwang 			return -EINVAL;
34952bfe3f2eSlogwang 		}
34962bfe3f2eSlogwang 	}
34972bfe3f2eSlogwang 
34982bfe3f2eSlogwang 	/* Create or re-write an NVM item: */
34992bfe3f2eSlogwang 	if (bnxt_dir_type_is_executable(type) == true)
35002bfe3f2eSlogwang 		return -EOPNOTSUPP;
35012bfe3f2eSlogwang 	ext = in_eeprom->magic & 0xffff;
35022bfe3f2eSlogwang 	ordinal = in_eeprom->offset >> 16;
35032bfe3f2eSlogwang 	attr = in_eeprom->offset & 0xffff;
35042bfe3f2eSlogwang 
35052bfe3f2eSlogwang 	return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
35062bfe3f2eSlogwang 				     in_eeprom->data, in_eeprom->length);
35072bfe3f2eSlogwang }
35082bfe3f2eSlogwang 
3509a9643ea8Slogwang /*
3510a9643ea8Slogwang  * Initialization
3511a9643ea8Slogwang  */
3512a9643ea8Slogwang 
35132bfe3f2eSlogwang static const struct eth_dev_ops bnxt_dev_ops = {
3514a9643ea8Slogwang 	.dev_infos_get = bnxt_dev_info_get_op,
3515a9643ea8Slogwang 	.dev_close = bnxt_dev_close_op,
3516a9643ea8Slogwang 	.dev_configure = bnxt_dev_configure_op,
3517a9643ea8Slogwang 	.dev_start = bnxt_dev_start_op,
3518a9643ea8Slogwang 	.dev_stop = bnxt_dev_stop_op,
3519a9643ea8Slogwang 	.dev_set_link_up = bnxt_dev_set_link_up_op,
3520a9643ea8Slogwang 	.dev_set_link_down = bnxt_dev_set_link_down_op,
3521a9643ea8Slogwang 	.stats_get = bnxt_stats_get_op,
3522a9643ea8Slogwang 	.stats_reset = bnxt_stats_reset_op,
3523a9643ea8Slogwang 	.rx_queue_setup = bnxt_rx_queue_setup_op,
3524a9643ea8Slogwang 	.rx_queue_release = bnxt_rx_queue_release_op,
3525a9643ea8Slogwang 	.tx_queue_setup = bnxt_tx_queue_setup_op,
3526a9643ea8Slogwang 	.tx_queue_release = bnxt_tx_queue_release_op,
35272bfe3f2eSlogwang 	.rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
35282bfe3f2eSlogwang 	.rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
3529a9643ea8Slogwang 	.reta_update = bnxt_reta_update_op,
3530a9643ea8Slogwang 	.reta_query = bnxt_reta_query_op,
3531a9643ea8Slogwang 	.rss_hash_update = bnxt_rss_hash_update_op,
3532a9643ea8Slogwang 	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
3533a9643ea8Slogwang 	.link_update = bnxt_link_update_op,
3534a9643ea8Slogwang 	.promiscuous_enable = bnxt_promiscuous_enable_op,
3535a9643ea8Slogwang 	.promiscuous_disable = bnxt_promiscuous_disable_op,
3536a9643ea8Slogwang 	.allmulticast_enable = bnxt_allmulticast_enable_op,
3537a9643ea8Slogwang 	.allmulticast_disable = bnxt_allmulticast_disable_op,
3538a9643ea8Slogwang 	.mac_addr_add = bnxt_mac_addr_add_op,
3539a9643ea8Slogwang 	.mac_addr_remove = bnxt_mac_addr_remove_op,
3540a9643ea8Slogwang 	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
3541a9643ea8Slogwang 	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
35422bfe3f2eSlogwang 	.udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
35432bfe3f2eSlogwang 	.udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
35442bfe3f2eSlogwang 	.vlan_filter_set = bnxt_vlan_filter_set_op,
35452bfe3f2eSlogwang 	.vlan_offload_set = bnxt_vlan_offload_set_op,
35464418919fSjohnjiang 	.vlan_tpid_set = bnxt_vlan_tpid_set_op,
35472bfe3f2eSlogwang 	.vlan_pvid_set = bnxt_vlan_pvid_set_op,
35482bfe3f2eSlogwang 	.mtu_set = bnxt_mtu_set_op,
35492bfe3f2eSlogwang 	.mac_addr_set = bnxt_set_default_mac_addr_op,
35502bfe3f2eSlogwang 	.xstats_get = bnxt_dev_xstats_get_op,
35512bfe3f2eSlogwang 	.xstats_get_names = bnxt_dev_xstats_get_names_op,
35522bfe3f2eSlogwang 	.xstats_reset = bnxt_dev_xstats_reset_op,
35532bfe3f2eSlogwang 	.fw_version_get = bnxt_fw_version_get,
35542bfe3f2eSlogwang 	.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
35552bfe3f2eSlogwang 	.rxq_info_get = bnxt_rxq_info_get_op,
35562bfe3f2eSlogwang 	.txq_info_get = bnxt_txq_info_get_op,
3557*2d9fd380Sjfb8856606 	.rx_burst_mode_get = bnxt_rx_burst_mode_get,
3558*2d9fd380Sjfb8856606 	.tx_burst_mode_get = bnxt_tx_burst_mode_get,
35592bfe3f2eSlogwang 	.dev_led_on = bnxt_dev_led_on_op,
35602bfe3f2eSlogwang 	.dev_led_off = bnxt_dev_led_off_op,
3561d30ea906Sjfb8856606 	.rx_queue_start = bnxt_rx_queue_start,
3562d30ea906Sjfb8856606 	.rx_queue_stop = bnxt_rx_queue_stop,
3563d30ea906Sjfb8856606 	.tx_queue_start = bnxt_tx_queue_start,
3564d30ea906Sjfb8856606 	.tx_queue_stop = bnxt_tx_queue_stop,
35652bfe3f2eSlogwang 	.filter_ctrl = bnxt_filter_ctrl_op,
35662bfe3f2eSlogwang 	.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
35672bfe3f2eSlogwang 	.get_eeprom_length    = bnxt_get_eeprom_length_op,
35682bfe3f2eSlogwang 	.get_eeprom           = bnxt_get_eeprom_op,
35692bfe3f2eSlogwang 	.set_eeprom           = bnxt_set_eeprom_op,
3570d30ea906Sjfb8856606 	.timesync_enable      = bnxt_timesync_enable,
3571d30ea906Sjfb8856606 	.timesync_disable     = bnxt_timesync_disable,
3572d30ea906Sjfb8856606 	.timesync_read_time   = bnxt_timesync_read_time,
3573d30ea906Sjfb8856606 	.timesync_write_time   = bnxt_timesync_write_time,
3574d30ea906Sjfb8856606 	.timesync_adjust_time = bnxt_timesync_adjust_time,
3575d30ea906Sjfb8856606 	.timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
3576d30ea906Sjfb8856606 	.timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
3577a9643ea8Slogwang };
3578a9643ea8Slogwang 
bnxt_map_reset_regs(struct bnxt * bp,uint32_t reg)35794418919fSjohnjiang static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
35804418919fSjohnjiang {
35814418919fSjohnjiang 	uint32_t offset;
35824418919fSjohnjiang 
35834418919fSjohnjiang 	/* Only pre-map the reset GRC registers using window 3 */
35844418919fSjohnjiang 	rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
35854418919fSjohnjiang 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
35864418919fSjohnjiang 
35874418919fSjohnjiang 	offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
35884418919fSjohnjiang 
35894418919fSjohnjiang 	return offset;
35904418919fSjohnjiang }
35914418919fSjohnjiang 
bnxt_map_fw_health_status_regs(struct bnxt * bp)35924418919fSjohnjiang int bnxt_map_fw_health_status_regs(struct bnxt *bp)
35934418919fSjohnjiang {
35944418919fSjohnjiang 	struct bnxt_error_recovery_info *info = bp->recovery_info;
35954418919fSjohnjiang 	uint32_t reg_base = 0xffffffff;
35964418919fSjohnjiang 	int i;
35974418919fSjohnjiang 
35984418919fSjohnjiang 	/* Only pre-map the monitoring GRC registers using window 2 */
35994418919fSjohnjiang 	for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
36004418919fSjohnjiang 		uint32_t reg = info->status_regs[i];
36014418919fSjohnjiang 
36024418919fSjohnjiang 		if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
36034418919fSjohnjiang 			continue;
36044418919fSjohnjiang 
36054418919fSjohnjiang 		if (reg_base == 0xffffffff)
36064418919fSjohnjiang 			reg_base = reg & 0xfffff000;
36074418919fSjohnjiang 		if ((reg & 0xfffff000) != reg_base)
36084418919fSjohnjiang 			return -ERANGE;
36094418919fSjohnjiang 
36104418919fSjohnjiang 		/* Use mask 0xffc as the Lower 2 bits indicates
36114418919fSjohnjiang 		 * address space location
36124418919fSjohnjiang 		 */
36134418919fSjohnjiang 		info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
36144418919fSjohnjiang 						(reg & 0xffc);
36154418919fSjohnjiang 	}
36164418919fSjohnjiang 
36174418919fSjohnjiang 	if (reg_base == 0xffffffff)
36184418919fSjohnjiang 		return 0;
36194418919fSjohnjiang 
36204418919fSjohnjiang 	rte_write32(reg_base, (uint8_t *)bp->bar0 +
36214418919fSjohnjiang 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
36224418919fSjohnjiang 
36234418919fSjohnjiang 	return 0;
36244418919fSjohnjiang }
36254418919fSjohnjiang 
bnxt_write_fw_reset_reg(struct bnxt * bp,uint32_t index)36264418919fSjohnjiang static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
36274418919fSjohnjiang {
36284418919fSjohnjiang 	struct bnxt_error_recovery_info *info = bp->recovery_info;
36294418919fSjohnjiang 	uint32_t delay = info->delay_after_reset[index];
36304418919fSjohnjiang 	uint32_t val = info->reset_reg_val[index];
36314418919fSjohnjiang 	uint32_t reg = info->reset_reg[index];
36324418919fSjohnjiang 	uint32_t type, offset;
36334418919fSjohnjiang 
36344418919fSjohnjiang 	type = BNXT_FW_STATUS_REG_TYPE(reg);
36354418919fSjohnjiang 	offset = BNXT_FW_STATUS_REG_OFF(reg);
36364418919fSjohnjiang 
36374418919fSjohnjiang 	switch (type) {
36384418919fSjohnjiang 	case BNXT_FW_STATUS_REG_TYPE_CFG:
36394418919fSjohnjiang 		rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
36404418919fSjohnjiang 		break;
36414418919fSjohnjiang 	case BNXT_FW_STATUS_REG_TYPE_GRC:
36424418919fSjohnjiang 		offset = bnxt_map_reset_regs(bp, offset);
36434418919fSjohnjiang 		rte_write32(val, (uint8_t *)bp->bar0 + offset);
36444418919fSjohnjiang 		break;
36454418919fSjohnjiang 	case BNXT_FW_STATUS_REG_TYPE_BAR0:
36464418919fSjohnjiang 		rte_write32(val, (uint8_t *)bp->bar0 + offset);
36474418919fSjohnjiang 		break;
36484418919fSjohnjiang 	}
36494418919fSjohnjiang 	/* wait on a specific interval of time until core reset is complete */
36504418919fSjohnjiang 	if (delay)
36514418919fSjohnjiang 		rte_delay_ms(delay);
36524418919fSjohnjiang }
36534418919fSjohnjiang 
bnxt_dev_cleanup(struct bnxt * bp)36544418919fSjohnjiang static void bnxt_dev_cleanup(struct bnxt *bp)
36554418919fSjohnjiang {
36560c6bd470Sfengbojiang 	bp->eth_dev->data->dev_link.link_status = 0;
3657*2d9fd380Sjfb8856606 	bp->link_info->link_up = 0;
36584418919fSjohnjiang 	if (bp->eth_dev->data->dev_started)
36594418919fSjohnjiang 		bnxt_dev_stop_op(bp->eth_dev);
36604418919fSjohnjiang 
36614418919fSjohnjiang 	bnxt_uninit_resources(bp, true);
36624418919fSjohnjiang }
36634418919fSjohnjiang 
bnxt_restore_vlan_filters(struct bnxt * bp)36644418919fSjohnjiang static int bnxt_restore_vlan_filters(struct bnxt *bp)
36654418919fSjohnjiang {
36664418919fSjohnjiang 	struct rte_eth_dev *dev = bp->eth_dev;
36674418919fSjohnjiang 	struct rte_vlan_filter_conf *vfc;
36684418919fSjohnjiang 	int vidx, vbit, rc;
36694418919fSjohnjiang 	uint16_t vlan_id;
36704418919fSjohnjiang 
36714418919fSjohnjiang 	for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
36724418919fSjohnjiang 		vfc = &dev->data->vlan_filter_conf;
36734418919fSjohnjiang 		vidx = vlan_id / 64;
36744418919fSjohnjiang 		vbit = vlan_id % 64;
36754418919fSjohnjiang 
36764418919fSjohnjiang 		/* Each bit corresponds to a VLAN id */
36774418919fSjohnjiang 		if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
36784418919fSjohnjiang 			rc = bnxt_add_vlan_filter(bp, vlan_id);
36794418919fSjohnjiang 			if (rc)
36804418919fSjohnjiang 				return rc;
36814418919fSjohnjiang 		}
36824418919fSjohnjiang 	}
36834418919fSjohnjiang 
36844418919fSjohnjiang 	return 0;
36854418919fSjohnjiang }
36864418919fSjohnjiang 
bnxt_restore_mac_filters(struct bnxt * bp)36874418919fSjohnjiang static int bnxt_restore_mac_filters(struct bnxt *bp)
36884418919fSjohnjiang {
36894418919fSjohnjiang 	struct rte_eth_dev *dev = bp->eth_dev;
36904418919fSjohnjiang 	struct rte_eth_dev_info dev_info;
36914418919fSjohnjiang 	struct rte_ether_addr *addr;
36924418919fSjohnjiang 	uint64_t pool_mask;
36934418919fSjohnjiang 	uint32_t pool = 0;
36944418919fSjohnjiang 	uint16_t i;
36954418919fSjohnjiang 	int rc;
36964418919fSjohnjiang 
36970c6bd470Sfengbojiang 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
36984418919fSjohnjiang 		return 0;
36994418919fSjohnjiang 
37004418919fSjohnjiang 	rc = bnxt_dev_info_get_op(dev, &dev_info);
37014418919fSjohnjiang 	if (rc)
37024418919fSjohnjiang 		return rc;
37034418919fSjohnjiang 
37044418919fSjohnjiang 	/* replay MAC address configuration */
37054418919fSjohnjiang 	for (i = 1; i < dev_info.max_mac_addrs; i++) {
37064418919fSjohnjiang 		addr = &dev->data->mac_addrs[i];
37074418919fSjohnjiang 
37084418919fSjohnjiang 		/* skip zero address */
37094418919fSjohnjiang 		if (rte_is_zero_ether_addr(addr))
37104418919fSjohnjiang 			continue;
37114418919fSjohnjiang 
37124418919fSjohnjiang 		pool = 0;
37134418919fSjohnjiang 		pool_mask = dev->data->mac_pool_sel[i];
37144418919fSjohnjiang 
37154418919fSjohnjiang 		do {
37164418919fSjohnjiang 			if (pool_mask & 1ULL) {
37174418919fSjohnjiang 				rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
37184418919fSjohnjiang 				if (rc)
37194418919fSjohnjiang 					return rc;
37204418919fSjohnjiang 			}
37214418919fSjohnjiang 			pool_mask >>= 1;
37224418919fSjohnjiang 			pool++;
37234418919fSjohnjiang 		} while (pool_mask);
37244418919fSjohnjiang 	}
37254418919fSjohnjiang 
37264418919fSjohnjiang 	return 0;
37274418919fSjohnjiang }
37284418919fSjohnjiang 
bnxt_restore_filters(struct bnxt * bp)37294418919fSjohnjiang static int bnxt_restore_filters(struct bnxt *bp)
37304418919fSjohnjiang {
37314418919fSjohnjiang 	struct rte_eth_dev *dev = bp->eth_dev;
37324418919fSjohnjiang 	int ret = 0;
37334418919fSjohnjiang 
37344418919fSjohnjiang 	if (dev->data->all_multicast) {
37354418919fSjohnjiang 		ret = bnxt_allmulticast_enable_op(dev);
37364418919fSjohnjiang 		if (ret)
37374418919fSjohnjiang 			return ret;
37384418919fSjohnjiang 	}
37394418919fSjohnjiang 	if (dev->data->promiscuous) {
37404418919fSjohnjiang 		ret = bnxt_promiscuous_enable_op(dev);
37414418919fSjohnjiang 		if (ret)
37424418919fSjohnjiang 			return ret;
37434418919fSjohnjiang 	}
37444418919fSjohnjiang 
37454418919fSjohnjiang 	ret = bnxt_restore_mac_filters(bp);
37464418919fSjohnjiang 	if (ret)
37474418919fSjohnjiang 		return ret;
37484418919fSjohnjiang 
37494418919fSjohnjiang 	ret = bnxt_restore_vlan_filters(bp);
37504418919fSjohnjiang 	/* TODO restore other filters as well */
37514418919fSjohnjiang 	return ret;
37524418919fSjohnjiang }
37534418919fSjohnjiang 
bnxt_dev_recover(void * arg)37544418919fSjohnjiang static void bnxt_dev_recover(void *arg)
37554418919fSjohnjiang {
37564418919fSjohnjiang 	struct bnxt *bp = arg;
37574418919fSjohnjiang 	int timeout = bp->fw_reset_max_msecs;
37584418919fSjohnjiang 	int rc = 0;
37594418919fSjohnjiang 
37604418919fSjohnjiang 	/* Clear Error flag so that device re-init should happen */
37614418919fSjohnjiang 	bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
37624418919fSjohnjiang 
37634418919fSjohnjiang 	do {
37644418919fSjohnjiang 		rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
37654418919fSjohnjiang 		if (rc == 0)
37664418919fSjohnjiang 			break;
37674418919fSjohnjiang 		rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
37684418919fSjohnjiang 		timeout -= BNXT_FW_READY_WAIT_INTERVAL;
37694418919fSjohnjiang 	} while (rc && timeout);
37704418919fSjohnjiang 
37714418919fSjohnjiang 	if (rc) {
37724418919fSjohnjiang 		PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
37734418919fSjohnjiang 		goto err;
37744418919fSjohnjiang 	}
37754418919fSjohnjiang 
37764418919fSjohnjiang 	rc = bnxt_init_resources(bp, true);
37774418919fSjohnjiang 	if (rc) {
37784418919fSjohnjiang 		PMD_DRV_LOG(ERR,
37794418919fSjohnjiang 			    "Failed to initialize resources after reset\n");
37804418919fSjohnjiang 		goto err;
37814418919fSjohnjiang 	}
37824418919fSjohnjiang 	/* clear reset flag as the device is initialized now */
37834418919fSjohnjiang 	bp->flags &= ~BNXT_FLAG_FW_RESET;
37844418919fSjohnjiang 
37854418919fSjohnjiang 	rc = bnxt_dev_start_op(bp->eth_dev);
37864418919fSjohnjiang 	if (rc) {
37874418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
37884418919fSjohnjiang 		goto err_start;
37894418919fSjohnjiang 	}
37904418919fSjohnjiang 
37914418919fSjohnjiang 	rc = bnxt_restore_filters(bp);
37924418919fSjohnjiang 	if (rc)
37934418919fSjohnjiang 		goto err_start;
37944418919fSjohnjiang 
37954418919fSjohnjiang 	PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
37964418919fSjohnjiang 	return;
37974418919fSjohnjiang err_start:
37984418919fSjohnjiang 	bnxt_dev_stop_op(bp->eth_dev);
37994418919fSjohnjiang err:
38004418919fSjohnjiang 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
38014418919fSjohnjiang 	bnxt_uninit_resources(bp, false);
38024418919fSjohnjiang 	PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
38034418919fSjohnjiang }
38044418919fSjohnjiang 
bnxt_dev_reset_and_resume(void * arg)38054418919fSjohnjiang void bnxt_dev_reset_and_resume(void *arg)
38064418919fSjohnjiang {
38074418919fSjohnjiang 	struct bnxt *bp = arg;
38084418919fSjohnjiang 	int rc;
38094418919fSjohnjiang 
38104418919fSjohnjiang 	bnxt_dev_cleanup(bp);
38114418919fSjohnjiang 
38124418919fSjohnjiang 	bnxt_wait_for_device_shutdown(bp);
38134418919fSjohnjiang 
38144418919fSjohnjiang 	rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
38154418919fSjohnjiang 			       bnxt_dev_recover, (void *)bp);
38164418919fSjohnjiang 	if (rc)
38174418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Error setting recovery alarm");
38184418919fSjohnjiang }
38194418919fSjohnjiang 
bnxt_read_fw_status_reg(struct bnxt * bp,uint32_t index)38204418919fSjohnjiang uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
38214418919fSjohnjiang {
38224418919fSjohnjiang 	struct bnxt_error_recovery_info *info = bp->recovery_info;
38234418919fSjohnjiang 	uint32_t reg = info->status_regs[index];
38244418919fSjohnjiang 	uint32_t type, offset, val = 0;
38254418919fSjohnjiang 
38264418919fSjohnjiang 	type = BNXT_FW_STATUS_REG_TYPE(reg);
38274418919fSjohnjiang 	offset = BNXT_FW_STATUS_REG_OFF(reg);
38284418919fSjohnjiang 
38294418919fSjohnjiang 	switch (type) {
38304418919fSjohnjiang 	case BNXT_FW_STATUS_REG_TYPE_CFG:
38314418919fSjohnjiang 		rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
38324418919fSjohnjiang 		break;
38334418919fSjohnjiang 	case BNXT_FW_STATUS_REG_TYPE_GRC:
38344418919fSjohnjiang 		offset = info->mapped_status_regs[index];
38354418919fSjohnjiang 		/* FALLTHROUGH */
38364418919fSjohnjiang 	case BNXT_FW_STATUS_REG_TYPE_BAR0:
38374418919fSjohnjiang 		val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
38384418919fSjohnjiang 				       offset));
38394418919fSjohnjiang 		break;
38404418919fSjohnjiang 	}
38414418919fSjohnjiang 
38424418919fSjohnjiang 	return val;
38434418919fSjohnjiang }
38444418919fSjohnjiang 
bnxt_fw_reset_all(struct bnxt * bp)38454418919fSjohnjiang static int bnxt_fw_reset_all(struct bnxt *bp)
38464418919fSjohnjiang {
38474418919fSjohnjiang 	struct bnxt_error_recovery_info *info = bp->recovery_info;
38484418919fSjohnjiang 	uint32_t i;
38494418919fSjohnjiang 	int rc = 0;
38504418919fSjohnjiang 
38514418919fSjohnjiang 	if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
38524418919fSjohnjiang 		/* Reset through master function driver */
38534418919fSjohnjiang 		for (i = 0; i < info->reg_array_cnt; i++)
38544418919fSjohnjiang 			bnxt_write_fw_reset_reg(bp, i);
38554418919fSjohnjiang 		/* Wait for time specified by FW after triggering reset */
38564418919fSjohnjiang 		rte_delay_ms(info->master_func_wait_period_after_reset);
38574418919fSjohnjiang 	} else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
38584418919fSjohnjiang 		/* Reset with the help of Kong processor */
38594418919fSjohnjiang 		rc = bnxt_hwrm_fw_reset(bp);
38604418919fSjohnjiang 		if (rc)
38614418919fSjohnjiang 			PMD_DRV_LOG(ERR, "Failed to reset FW\n");
38624418919fSjohnjiang 	}
38634418919fSjohnjiang 
38644418919fSjohnjiang 	return rc;
38654418919fSjohnjiang }
38664418919fSjohnjiang 
bnxt_fw_reset_cb(void * arg)38674418919fSjohnjiang static void bnxt_fw_reset_cb(void *arg)
38684418919fSjohnjiang {
38694418919fSjohnjiang 	struct bnxt *bp = arg;
38704418919fSjohnjiang 	struct bnxt_error_recovery_info *info = bp->recovery_info;
38714418919fSjohnjiang 	int rc = 0;
38724418919fSjohnjiang 
38734418919fSjohnjiang 	/* Only Master function can do FW reset */
38744418919fSjohnjiang 	if (bnxt_is_master_func(bp) &&
38754418919fSjohnjiang 	    bnxt_is_recovery_enabled(bp)) {
38764418919fSjohnjiang 		rc = bnxt_fw_reset_all(bp);
38774418919fSjohnjiang 		if (rc) {
38784418919fSjohnjiang 			PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
38794418919fSjohnjiang 			return;
38804418919fSjohnjiang 		}
38814418919fSjohnjiang 	}
38824418919fSjohnjiang 
38834418919fSjohnjiang 	/* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
38844418919fSjohnjiang 	 * EXCEPTION_FATAL_ASYNC event to all the functions
38854418919fSjohnjiang 	 * (including MASTER FUNC). After receiving this Async, all the active
38864418919fSjohnjiang 	 * drivers should treat this case as FW initiated recovery
38874418919fSjohnjiang 	 */
38884418919fSjohnjiang 	if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
38894418919fSjohnjiang 		bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
38904418919fSjohnjiang 		bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
38914418919fSjohnjiang 
38924418919fSjohnjiang 		/* To recover from error */
38934418919fSjohnjiang 		rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
38944418919fSjohnjiang 				  (void *)bp);
38954418919fSjohnjiang 	}
38964418919fSjohnjiang }
38974418919fSjohnjiang 
38984418919fSjohnjiang /* Driver should poll FW heartbeat, reset_counter with the frequency
38994418919fSjohnjiang  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
39004418919fSjohnjiang  * When the driver detects heartbeat stop or change in reset_counter,
39014418919fSjohnjiang  * it has to trigger a reset to recover from the error condition.
39024418919fSjohnjiang  * A “master PF” is the function who will have the privilege to
39034418919fSjohnjiang  * initiate the chimp reset. The master PF will be elected by the
39044418919fSjohnjiang  * firmware and will be notified through async message.
39054418919fSjohnjiang  */
bnxt_check_fw_health(void * arg)39064418919fSjohnjiang static void bnxt_check_fw_health(void *arg)
39074418919fSjohnjiang {
39084418919fSjohnjiang 	struct bnxt *bp = arg;
39094418919fSjohnjiang 	struct bnxt_error_recovery_info *info = bp->recovery_info;
39104418919fSjohnjiang 	uint32_t val = 0, wait_msec;
39114418919fSjohnjiang 
39124418919fSjohnjiang 	if (!info || !bnxt_is_recovery_enabled(bp) ||
39134418919fSjohnjiang 	    is_bnxt_in_error(bp))
39144418919fSjohnjiang 		return;
39154418919fSjohnjiang 
39164418919fSjohnjiang 	val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
39174418919fSjohnjiang 	if (val == info->last_heart_beat)
39184418919fSjohnjiang 		goto reset;
39194418919fSjohnjiang 
39204418919fSjohnjiang 	info->last_heart_beat = val;
39214418919fSjohnjiang 
39224418919fSjohnjiang 	val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
39234418919fSjohnjiang 	if (val != info->last_reset_counter)
39244418919fSjohnjiang 		goto reset;
39254418919fSjohnjiang 
39264418919fSjohnjiang 	info->last_reset_counter = val;
39274418919fSjohnjiang 
39284418919fSjohnjiang 	rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
39294418919fSjohnjiang 			  bnxt_check_fw_health, (void *)bp);
39304418919fSjohnjiang 
39314418919fSjohnjiang 	return;
39324418919fSjohnjiang reset:
39334418919fSjohnjiang 	/* Stop DMA to/from device */
39344418919fSjohnjiang 	bp->flags |= BNXT_FLAG_FATAL_ERROR;
39354418919fSjohnjiang 	bp->flags |= BNXT_FLAG_FW_RESET;
39364418919fSjohnjiang 
39374418919fSjohnjiang 	PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
39384418919fSjohnjiang 
39394418919fSjohnjiang 	if (bnxt_is_master_func(bp))
39404418919fSjohnjiang 		wait_msec = info->master_func_wait_period;
39414418919fSjohnjiang 	else
39424418919fSjohnjiang 		wait_msec = info->normal_func_wait_period;
39434418919fSjohnjiang 
39444418919fSjohnjiang 	rte_eal_alarm_set(US_PER_MS * wait_msec,
39454418919fSjohnjiang 			  bnxt_fw_reset_cb, (void *)bp);
39464418919fSjohnjiang }
39474418919fSjohnjiang 
bnxt_schedule_fw_health_check(struct bnxt * bp)39484418919fSjohnjiang void bnxt_schedule_fw_health_check(struct bnxt *bp)
39494418919fSjohnjiang {
39504418919fSjohnjiang 	uint32_t polling_freq;
39514418919fSjohnjiang 
39520c6bd470Sfengbojiang 	pthread_mutex_lock(&bp->health_check_lock);
39530c6bd470Sfengbojiang 
39544418919fSjohnjiang 	if (!bnxt_is_recovery_enabled(bp))
39550c6bd470Sfengbojiang 		goto done;
39564418919fSjohnjiang 
39574418919fSjohnjiang 	if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
39580c6bd470Sfengbojiang 		goto done;
39594418919fSjohnjiang 
39604418919fSjohnjiang 	polling_freq = bp->recovery_info->driver_polling_freq;
39614418919fSjohnjiang 
39624418919fSjohnjiang 	rte_eal_alarm_set(US_PER_MS * polling_freq,
39634418919fSjohnjiang 			  bnxt_check_fw_health, (void *)bp);
39644418919fSjohnjiang 	bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
39650c6bd470Sfengbojiang 
39660c6bd470Sfengbojiang done:
39670c6bd470Sfengbojiang 	pthread_mutex_unlock(&bp->health_check_lock);
39684418919fSjohnjiang }
39694418919fSjohnjiang 
bnxt_cancel_fw_health_check(struct bnxt * bp)39704418919fSjohnjiang static void bnxt_cancel_fw_health_check(struct bnxt *bp)
39714418919fSjohnjiang {
39724418919fSjohnjiang 	if (!bnxt_is_recovery_enabled(bp))
39734418919fSjohnjiang 		return;
39744418919fSjohnjiang 
39754418919fSjohnjiang 	rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
39764418919fSjohnjiang 	bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
39774418919fSjohnjiang }
39784418919fSjohnjiang 
bnxt_vf_pciid(uint16_t device_id)3979*2d9fd380Sjfb8856606 static bool bnxt_vf_pciid(uint16_t device_id)
3980a9643ea8Slogwang {
3981*2d9fd380Sjfb8856606 	switch (device_id) {
3982*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57304_VF:
3983*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57406_VF:
3984*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_5731X_VF:
3985*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_5741X_VF:
3986*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57414_VF:
3987*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
3988*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
3989*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_58802_VF:
3990*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57500_VF1:
3991*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57500_VF2:
3992*2d9fd380Sjfb8856606 		/* FALLTHROUGH */
3993d30ea906Sjfb8856606 		return true;
3994*2d9fd380Sjfb8856606 	default:
3995d30ea906Sjfb8856606 		return false;
3996d30ea906Sjfb8856606 	}
3997*2d9fd380Sjfb8856606 }
3998d30ea906Sjfb8856606 
bnxt_thor_device(uint16_t device_id)3999*2d9fd380Sjfb8856606 static bool bnxt_thor_device(uint16_t device_id)
40004418919fSjohnjiang {
4001*2d9fd380Sjfb8856606 	switch (device_id) {
4002*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57508:
4003*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57504:
4004*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57502:
4005*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57508_MF1:
4006*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57504_MF1:
4007*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57502_MF1:
4008*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57508_MF2:
4009*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57504_MF2:
4010*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57502_MF2:
4011*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57500_VF1:
4012*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_57500_VF2:
4013*2d9fd380Sjfb8856606 		/* FALLTHROUGH */
40144418919fSjohnjiang 		return true;
4015*2d9fd380Sjfb8856606 	default:
40164418919fSjohnjiang 		return false;
40174418919fSjohnjiang 	}
4018*2d9fd380Sjfb8856606 }
40194418919fSjohnjiang 
bnxt_stratus_device(struct bnxt * bp)4020d30ea906Sjfb8856606 bool bnxt_stratus_device(struct bnxt *bp)
4021d30ea906Sjfb8856606 {
4022*2d9fd380Sjfb8856606 	uint16_t device_id = bp->pdev->id.device_id;
4023d30ea906Sjfb8856606 
4024*2d9fd380Sjfb8856606 	switch (device_id) {
4025*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_STRATUS_NIC:
4026*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4027*2d9fd380Sjfb8856606 	case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4028*2d9fd380Sjfb8856606 		/* FALLTHROUGH */
4029a9643ea8Slogwang 		return true;
4030*2d9fd380Sjfb8856606 	default:
4031a9643ea8Slogwang 		return false;
4032a9643ea8Slogwang 	}
4033*2d9fd380Sjfb8856606 }
4034a9643ea8Slogwang 
bnxt_init_board(struct rte_eth_dev * eth_dev)4035a9643ea8Slogwang static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4036a9643ea8Slogwang {
40372bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
40384b05018fSfengbojiang 	struct bnxt *bp = eth_dev->data->dev_private;
4039a9643ea8Slogwang 
4040a9643ea8Slogwang 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
40414b05018fSfengbojiang 	bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
40424b05018fSfengbojiang 	bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
40434b05018fSfengbojiang 	if (!bp->bar0 || !bp->doorbell_base) {
40444b05018fSfengbojiang 		PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
40454b05018fSfengbojiang 		return -ENODEV;
4046a9643ea8Slogwang 	}
4047a9643ea8Slogwang 
4048a9643ea8Slogwang 	bp->eth_dev = eth_dev;
40492bfe3f2eSlogwang 	bp->pdev = pci_dev;
4050a9643ea8Slogwang 
4051a9643ea8Slogwang 	return 0;
4052a9643ea8Slogwang }
4053a9643ea8Slogwang 
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,uint32_t mem_size,const char * suffix,uint16_t idx)4054*2d9fd380Sjfb8856606 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
40554418919fSjohnjiang 				  struct bnxt_ctx_pg_info *ctx_pg,
40564418919fSjohnjiang 				  uint32_t mem_size,
40574418919fSjohnjiang 				  const char *suffix,
40584418919fSjohnjiang 				  uint16_t idx)
4059a9643ea8Slogwang {
40604418919fSjohnjiang 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
40612bfe3f2eSlogwang 	const struct rte_memzone *mz = NULL;
40624418919fSjohnjiang 	char mz_name[RTE_MEMZONE_NAMESIZE];
40632bfe3f2eSlogwang 	rte_iova_t mz_phys_addr;
40644418919fSjohnjiang 	uint64_t valid_bits = 0;
40654418919fSjohnjiang 	uint32_t sz;
40664418919fSjohnjiang 	int i;
4067a9643ea8Slogwang 
40684418919fSjohnjiang 	if (!mem_size)
40694b05018fSfengbojiang 		return 0;
40702bfe3f2eSlogwang 
40714418919fSjohnjiang 	rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
40724418919fSjohnjiang 			 BNXT_PAGE_SIZE;
40734418919fSjohnjiang 	rmem->page_size = BNXT_PAGE_SIZE;
40744418919fSjohnjiang 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
40754418919fSjohnjiang 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
40764418919fSjohnjiang 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4077a9643ea8Slogwang 
40784418919fSjohnjiang 	valid_bits = PTU_PTE_VALID;
40794418919fSjohnjiang 
40804418919fSjohnjiang 	if (rmem->nr_pages > 1) {
40814418919fSjohnjiang 		snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
40824418919fSjohnjiang 			 "bnxt_ctx_pg_tbl%s_%x_%d",
40834418919fSjohnjiang 			 suffix, idx, bp->eth_dev->data->port_id);
40844418919fSjohnjiang 		mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
40854418919fSjohnjiang 		mz = rte_memzone_lookup(mz_name);
40864418919fSjohnjiang 		if (!mz) {
40874418919fSjohnjiang 			mz = rte_memzone_reserve_aligned(mz_name,
40884418919fSjohnjiang 						rmem->nr_pages * 8,
40894418919fSjohnjiang 						SOCKET_ID_ANY,
40904418919fSjohnjiang 						RTE_MEMZONE_2MB |
40914418919fSjohnjiang 						RTE_MEMZONE_SIZE_HINT_ONLY |
40924418919fSjohnjiang 						RTE_MEMZONE_IOVA_CONTIG,
40934418919fSjohnjiang 						BNXT_PAGE_SIZE);
40944418919fSjohnjiang 			if (mz == NULL)
40954418919fSjohnjiang 				return -ENOMEM;
4096a9643ea8Slogwang 		}
4097a9643ea8Slogwang 
40984418919fSjohnjiang 		memset(mz->addr, 0, mz->len);
40994418919fSjohnjiang 		mz_phys_addr = mz->iova;
41004418919fSjohnjiang 
41014418919fSjohnjiang 		rmem->pg_tbl = mz->addr;
41024418919fSjohnjiang 		rmem->pg_tbl_map = mz_phys_addr;
41034418919fSjohnjiang 		rmem->pg_tbl_mz = mz;
41044418919fSjohnjiang 	}
41054418919fSjohnjiang 
41064418919fSjohnjiang 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
41074418919fSjohnjiang 		 suffix, idx, bp->eth_dev->data->port_id);
41084418919fSjohnjiang 	mz = rte_memzone_lookup(mz_name);
41094418919fSjohnjiang 	if (!mz) {
41104418919fSjohnjiang 		mz = rte_memzone_reserve_aligned(mz_name,
41114418919fSjohnjiang 						 mem_size,
41124418919fSjohnjiang 						 SOCKET_ID_ANY,
41134418919fSjohnjiang 						 RTE_MEMZONE_1GB |
41144418919fSjohnjiang 						 RTE_MEMZONE_SIZE_HINT_ONLY |
41154418919fSjohnjiang 						 RTE_MEMZONE_IOVA_CONTIG,
41164418919fSjohnjiang 						 BNXT_PAGE_SIZE);
41174418919fSjohnjiang 		if (mz == NULL)
41184418919fSjohnjiang 			return -ENOMEM;
41194418919fSjohnjiang 	}
41204418919fSjohnjiang 
41214418919fSjohnjiang 	memset(mz->addr, 0, mz->len);
41224418919fSjohnjiang 	mz_phys_addr = mz->iova;
41234418919fSjohnjiang 
41244418919fSjohnjiang 	for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
41254418919fSjohnjiang 		rmem->pg_arr[i] = ((char *)mz->addr) + sz;
41264418919fSjohnjiang 		rmem->dma_arr[i] = mz_phys_addr + sz;
41274418919fSjohnjiang 
41284418919fSjohnjiang 		if (rmem->nr_pages > 1) {
41294418919fSjohnjiang 			if (i == rmem->nr_pages - 2 &&
41304418919fSjohnjiang 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
41314418919fSjohnjiang 				valid_bits |= PTU_PTE_NEXT_TO_LAST;
41324418919fSjohnjiang 			else if (i == rmem->nr_pages - 1 &&
41334418919fSjohnjiang 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
41344418919fSjohnjiang 				valid_bits |= PTU_PTE_LAST;
41354418919fSjohnjiang 
41364418919fSjohnjiang 			rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
41374418919fSjohnjiang 							   valid_bits);
41384418919fSjohnjiang 		}
41394418919fSjohnjiang 	}
41404418919fSjohnjiang 
41414418919fSjohnjiang 	rmem->mz = mz;
41424418919fSjohnjiang 	if (rmem->vmem_size)
41434418919fSjohnjiang 		rmem->vmem = (void **)mz->addr;
41444418919fSjohnjiang 	rmem->dma_arr[0] = mz_phys_addr;
41454418919fSjohnjiang 	return 0;
41464418919fSjohnjiang }
41474418919fSjohnjiang 
bnxt_free_ctx_mem(struct bnxt * bp)41484418919fSjohnjiang static void bnxt_free_ctx_mem(struct bnxt *bp)
41494418919fSjohnjiang {
41504418919fSjohnjiang 	int i;
41514418919fSjohnjiang 
41524418919fSjohnjiang 	if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
41534418919fSjohnjiang 		return;
41544418919fSjohnjiang 
41554418919fSjohnjiang 	bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
41564418919fSjohnjiang 	rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
41574418919fSjohnjiang 	rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
41584418919fSjohnjiang 	rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
41594418919fSjohnjiang 	rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
41604418919fSjohnjiang 	rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
41614418919fSjohnjiang 	rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
41624418919fSjohnjiang 	rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
41634418919fSjohnjiang 	rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
41644418919fSjohnjiang 	rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
41654418919fSjohnjiang 	rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
41664418919fSjohnjiang 
4167*2d9fd380Sjfb8856606 	for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
41684418919fSjohnjiang 		if (bp->ctx->tqm_mem[i])
41694418919fSjohnjiang 			rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
41704418919fSjohnjiang 	}
41714418919fSjohnjiang 
41724418919fSjohnjiang 	rte_free(bp->ctx);
41734418919fSjohnjiang 	bp->ctx = NULL;
41744418919fSjohnjiang }
41754418919fSjohnjiang 
41764418919fSjohnjiang #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
41774418919fSjohnjiang 
41784418919fSjohnjiang #define min_t(type, x, y) ({                    \
41794418919fSjohnjiang 	type __min1 = (x);                      \
41804418919fSjohnjiang 	type __min2 = (y);                      \
41814418919fSjohnjiang 	__min1 < __min2 ? __min1 : __min2; })
41824418919fSjohnjiang 
41834418919fSjohnjiang #define max_t(type, x, y) ({                    \
41844418919fSjohnjiang 	type __max1 = (x);                      \
41854418919fSjohnjiang 	type __max2 = (y);                      \
41864418919fSjohnjiang 	__max1 > __max2 ? __max1 : __max2; })
41874418919fSjohnjiang 
41884418919fSjohnjiang #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
41894418919fSjohnjiang 
bnxt_alloc_ctx_mem(struct bnxt * bp)41904418919fSjohnjiang int bnxt_alloc_ctx_mem(struct bnxt *bp)
41914418919fSjohnjiang {
41924418919fSjohnjiang 	struct bnxt_ctx_pg_info *ctx_pg;
41934418919fSjohnjiang 	struct bnxt_ctx_mem_info *ctx;
41944418919fSjohnjiang 	uint32_t mem_size, ena, entries;
4195*2d9fd380Sjfb8856606 	uint32_t entries_sp, min;
41964418919fSjohnjiang 	int i, rc;
41974418919fSjohnjiang 
41984418919fSjohnjiang 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
41994418919fSjohnjiang 	if (rc) {
42004418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
42014418919fSjohnjiang 		return rc;
42024418919fSjohnjiang 	}
42034418919fSjohnjiang 	ctx = bp->ctx;
42044418919fSjohnjiang 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
42054418919fSjohnjiang 		return 0;
42064418919fSjohnjiang 
42074418919fSjohnjiang 	ctx_pg = &ctx->qp_mem;
42084418919fSjohnjiang 	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
42094418919fSjohnjiang 	mem_size = ctx->qp_entry_size * ctx_pg->entries;
42104418919fSjohnjiang 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
42114418919fSjohnjiang 	if (rc)
42124418919fSjohnjiang 		return rc;
42134418919fSjohnjiang 
42144418919fSjohnjiang 	ctx_pg = &ctx->srq_mem;
42154418919fSjohnjiang 	ctx_pg->entries = ctx->srq_max_l2_entries;
42164418919fSjohnjiang 	mem_size = ctx->srq_entry_size * ctx_pg->entries;
42174418919fSjohnjiang 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
42184418919fSjohnjiang 	if (rc)
42194418919fSjohnjiang 		return rc;
42204418919fSjohnjiang 
42214418919fSjohnjiang 	ctx_pg = &ctx->cq_mem;
42224418919fSjohnjiang 	ctx_pg->entries = ctx->cq_max_l2_entries;
42234418919fSjohnjiang 	mem_size = ctx->cq_entry_size * ctx_pg->entries;
42244418919fSjohnjiang 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
42254418919fSjohnjiang 	if (rc)
42264418919fSjohnjiang 		return rc;
42274418919fSjohnjiang 
42284418919fSjohnjiang 	ctx_pg = &ctx->vnic_mem;
42294418919fSjohnjiang 	ctx_pg->entries = ctx->vnic_max_vnic_entries +
42304418919fSjohnjiang 		ctx->vnic_max_ring_table_entries;
42314418919fSjohnjiang 	mem_size = ctx->vnic_entry_size * ctx_pg->entries;
42324418919fSjohnjiang 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
42334418919fSjohnjiang 	if (rc)
42344418919fSjohnjiang 		return rc;
42354418919fSjohnjiang 
42364418919fSjohnjiang 	ctx_pg = &ctx->stat_mem;
42374418919fSjohnjiang 	ctx_pg->entries = ctx->stat_max_entries;
42384418919fSjohnjiang 	mem_size = ctx->stat_entry_size * ctx_pg->entries;
42394418919fSjohnjiang 	rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
42404418919fSjohnjiang 	if (rc)
42414418919fSjohnjiang 		return rc;
42424418919fSjohnjiang 
4243*2d9fd380Sjfb8856606 	min = ctx->tqm_min_entries_per_ring;
4244*2d9fd380Sjfb8856606 
4245*2d9fd380Sjfb8856606 	entries_sp = ctx->qp_max_l2_entries +
42464418919fSjohnjiang 		     ctx->vnic_max_vnic_entries +
4247*2d9fd380Sjfb8856606 		     2 * ctx->qp_min_qp1_entries + min;
4248*2d9fd380Sjfb8856606 	entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple);
4249*2d9fd380Sjfb8856606 
4250*2d9fd380Sjfb8856606 	entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries;
42514418919fSjohnjiang 	entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
4252*2d9fd380Sjfb8856606 	entries = clamp_t(uint32_t, entries, min,
42534418919fSjohnjiang 			  ctx->tqm_max_entries_per_ring);
4254*2d9fd380Sjfb8856606 	for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
42554418919fSjohnjiang 		ctx_pg = ctx->tqm_mem[i];
4256*2d9fd380Sjfb8856606 		ctx_pg->entries = i ? entries : entries_sp;
42574418919fSjohnjiang 		mem_size = ctx->tqm_entry_size * ctx_pg->entries;
42584418919fSjohnjiang 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
42594418919fSjohnjiang 		if (rc)
42604418919fSjohnjiang 			return rc;
42614418919fSjohnjiang 		ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
42624418919fSjohnjiang 	}
42634418919fSjohnjiang 
42644418919fSjohnjiang 	ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
42654418919fSjohnjiang 	rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
42664418919fSjohnjiang 	if (rc)
42674418919fSjohnjiang 		PMD_DRV_LOG(ERR,
42684418919fSjohnjiang 			    "Failed to configure context mem: rc = %d\n", rc);
42694418919fSjohnjiang 	else
42704418919fSjohnjiang 		ctx->flags |= BNXT_CTX_FLAG_INITED;
42714418919fSjohnjiang 
42724418919fSjohnjiang 	return rc;
42734418919fSjohnjiang }
42744418919fSjohnjiang 
bnxt_alloc_stats_mem(struct bnxt * bp)42754418919fSjohnjiang static int bnxt_alloc_stats_mem(struct bnxt *bp)
42764418919fSjohnjiang {
42774418919fSjohnjiang 	struct rte_pci_device *pci_dev = bp->pdev;
42784418919fSjohnjiang 	char mz_name[RTE_MEMZONE_NAMESIZE];
42794418919fSjohnjiang 	const struct rte_memzone *mz = NULL;
42804418919fSjohnjiang 	uint32_t total_alloc_len;
42814418919fSjohnjiang 	rte_iova_t mz_phys_addr;
42824418919fSjohnjiang 
42834418919fSjohnjiang 	if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
42844418919fSjohnjiang 		return 0;
42854418919fSjohnjiang 
42862bfe3f2eSlogwang 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
42874418919fSjohnjiang 		 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
42882bfe3f2eSlogwang 		 pci_dev->addr.bus, pci_dev->addr.devid,
42892bfe3f2eSlogwang 		 pci_dev->addr.function, "rx_port_stats");
42902bfe3f2eSlogwang 	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
42912bfe3f2eSlogwang 	mz = rte_memzone_lookup(mz_name);
42924418919fSjohnjiang 	total_alloc_len =
42934418919fSjohnjiang 		RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
42944418919fSjohnjiang 				       sizeof(struct rx_port_stats_ext) + 512);
42952bfe3f2eSlogwang 	if (!mz) {
42962bfe3f2eSlogwang 		mz = rte_memzone_reserve(mz_name, total_alloc_len,
42972bfe3f2eSlogwang 					 SOCKET_ID_ANY,
42982bfe3f2eSlogwang 					 RTE_MEMZONE_2MB |
4299d30ea906Sjfb8856606 					 RTE_MEMZONE_SIZE_HINT_ONLY |
4300d30ea906Sjfb8856606 					 RTE_MEMZONE_IOVA_CONTIG);
43012bfe3f2eSlogwang 		if (mz == NULL)
43022bfe3f2eSlogwang 			return -ENOMEM;
43032bfe3f2eSlogwang 	}
43042bfe3f2eSlogwang 	memset(mz->addr, 0, mz->len);
43052bfe3f2eSlogwang 	mz_phys_addr = mz->iova;
43062bfe3f2eSlogwang 
43072bfe3f2eSlogwang 	bp->rx_mem_zone = (const void *)mz;
43082bfe3f2eSlogwang 	bp->hw_rx_port_stats = mz->addr;
43092bfe3f2eSlogwang 	bp->hw_rx_port_stats_map = mz_phys_addr;
43102bfe3f2eSlogwang 
43112bfe3f2eSlogwang 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
43124418919fSjohnjiang 		 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
43132bfe3f2eSlogwang 		 pci_dev->addr.bus, pci_dev->addr.devid,
43142bfe3f2eSlogwang 		 pci_dev->addr.function, "tx_port_stats");
43152bfe3f2eSlogwang 	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
43162bfe3f2eSlogwang 	mz = rte_memzone_lookup(mz_name);
43174418919fSjohnjiang 	total_alloc_len =
43184418919fSjohnjiang 		RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
43194418919fSjohnjiang 				       sizeof(struct tx_port_stats_ext) + 512);
43202bfe3f2eSlogwang 	if (!mz) {
4321d30ea906Sjfb8856606 		mz = rte_memzone_reserve(mz_name,
4322d30ea906Sjfb8856606 					 total_alloc_len,
43232bfe3f2eSlogwang 					 SOCKET_ID_ANY,
43242bfe3f2eSlogwang 					 RTE_MEMZONE_2MB |
4325d30ea906Sjfb8856606 					 RTE_MEMZONE_SIZE_HINT_ONLY |
4326d30ea906Sjfb8856606 					 RTE_MEMZONE_IOVA_CONTIG);
43272bfe3f2eSlogwang 		if (mz == NULL)
43282bfe3f2eSlogwang 			return -ENOMEM;
43292bfe3f2eSlogwang 	}
43302bfe3f2eSlogwang 	memset(mz->addr, 0, mz->len);
43312bfe3f2eSlogwang 	mz_phys_addr = mz->iova;
43322bfe3f2eSlogwang 
43332bfe3f2eSlogwang 	bp->tx_mem_zone = (const void *)mz;
43342bfe3f2eSlogwang 	bp->hw_tx_port_stats = mz->addr;
43352bfe3f2eSlogwang 	bp->hw_tx_port_stats_map = mz_phys_addr;
43362bfe3f2eSlogwang 	bp->flags |= BNXT_FLAG_PORT_STATS;
4337d30ea906Sjfb8856606 
4338d30ea906Sjfb8856606 	/* Display extended statistics if FW supports it */
4339d30ea906Sjfb8856606 	if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
43404418919fSjohnjiang 	    bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
43414418919fSjohnjiang 	    !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
43424418919fSjohnjiang 		return 0;
4343d30ea906Sjfb8856606 
4344d30ea906Sjfb8856606 	bp->hw_rx_port_stats_ext = (void *)
43454b05018fSfengbojiang 		((uint8_t *)bp->hw_rx_port_stats +
43464b05018fSfengbojiang 		 sizeof(struct rx_port_stats));
4347d30ea906Sjfb8856606 	bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
4348d30ea906Sjfb8856606 		sizeof(struct rx_port_stats);
4349d30ea906Sjfb8856606 	bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
4350d30ea906Sjfb8856606 
43514418919fSjohnjiang 	if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
43524418919fSjohnjiang 	    bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
4353d30ea906Sjfb8856606 		bp->hw_tx_port_stats_ext = (void *)
43544b05018fSfengbojiang 			((uint8_t *)bp->hw_tx_port_stats +
43554b05018fSfengbojiang 			 sizeof(struct tx_port_stats));
4356d30ea906Sjfb8856606 		bp->hw_tx_port_stats_ext_map =
4357d30ea906Sjfb8856606 			bp->hw_tx_port_stats_map +
4358d30ea906Sjfb8856606 			sizeof(struct tx_port_stats);
4359d30ea906Sjfb8856606 		bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
4360d30ea906Sjfb8856606 	}
43614418919fSjohnjiang 
43624418919fSjohnjiang 	return 0;
43632bfe3f2eSlogwang }
43642bfe3f2eSlogwang 
bnxt_setup_mac_addr(struct rte_eth_dev * eth_dev)43654418919fSjohnjiang static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
43664418919fSjohnjiang {
43674418919fSjohnjiang 	struct bnxt *bp = eth_dev->data->dev_private;
43684418919fSjohnjiang 	int rc = 0;
4369a9643ea8Slogwang 
4370a9643ea8Slogwang 	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
43714418919fSjohnjiang 					       RTE_ETHER_ADDR_LEN *
43724418919fSjohnjiang 					       bp->max_l2_ctx,
43734418919fSjohnjiang 					       0);
4374a9643ea8Slogwang 	if (eth_dev->data->mac_addrs == NULL) {
43754418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
43764418919fSjohnjiang 		return -ENOMEM;
4377a9643ea8Slogwang 	}
4378d30ea906Sjfb8856606 
43790c6bd470Sfengbojiang 	if (!BNXT_HAS_DFLT_MAC_SET(bp)) {
43804418919fSjohnjiang 		if (BNXT_PF(bp))
43814418919fSjohnjiang 			return -EINVAL;
43822bfe3f2eSlogwang 
43834418919fSjohnjiang 		/* Generate a random MAC address, if none was assigned by PF */
43844418919fSjohnjiang 		PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
43854418919fSjohnjiang 		bnxt_eth_hw_addr_random(bp->mac_addr);
43864418919fSjohnjiang 		PMD_DRV_LOG(INFO,
43874418919fSjohnjiang 			    "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
43884418919fSjohnjiang 			    bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
43894418919fSjohnjiang 			    bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
43904418919fSjohnjiang 
43914418919fSjohnjiang 		rc = bnxt_hwrm_set_mac(bp);
43920c6bd470Sfengbojiang 		if (rc)
43934418919fSjohnjiang 			return rc;
43942bfe3f2eSlogwang 	}
43952bfe3f2eSlogwang 
43964418919fSjohnjiang 	/* Copy the permanent MAC from the FUNC_QCAPS response */
43974418919fSjohnjiang 	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
43984418919fSjohnjiang 
43994418919fSjohnjiang 	return rc;
44004418919fSjohnjiang }
44014418919fSjohnjiang 
bnxt_restore_dflt_mac(struct bnxt * bp)44024418919fSjohnjiang static int bnxt_restore_dflt_mac(struct bnxt *bp)
44034418919fSjohnjiang {
44044418919fSjohnjiang 	int rc = 0;
44054418919fSjohnjiang 
44064418919fSjohnjiang 	/* MAC is already configured in FW */
44070c6bd470Sfengbojiang 	if (BNXT_HAS_DFLT_MAC_SET(bp))
44084418919fSjohnjiang 		return 0;
44094418919fSjohnjiang 
44104418919fSjohnjiang 	/* Restore the old MAC configured */
44114418919fSjohnjiang 	rc = bnxt_hwrm_set_mac(bp);
44124418919fSjohnjiang 	if (rc)
44134418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
44144418919fSjohnjiang 
44154418919fSjohnjiang 	return rc;
44164418919fSjohnjiang }
44174418919fSjohnjiang 
bnxt_config_vf_req_fwd(struct bnxt * bp)44184418919fSjohnjiang static void bnxt_config_vf_req_fwd(struct bnxt *bp)
44194418919fSjohnjiang {
44204418919fSjohnjiang 	if (!BNXT_PF(bp))
44214418919fSjohnjiang 		return;
44224418919fSjohnjiang 
4423*2d9fd380Sjfb8856606 	memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
4424*2d9fd380Sjfb8856606 
4425*2d9fd380Sjfb8856606 	if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
4426*2d9fd380Sjfb8856606 		BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG);
4427*2d9fd380Sjfb8856606 	BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG);
4428*2d9fd380Sjfb8856606 	BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG);
4429*2d9fd380Sjfb8856606 	BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC);
4430*2d9fd380Sjfb8856606 	BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD);
4431a9643ea8Slogwang }
4432a9643ea8Slogwang 
4433*2d9fd380Sjfb8856606 uint16_t
bnxt_get_svif(uint16_t port_id,bool func_svif,enum bnxt_ulp_intf_type type)4434*2d9fd380Sjfb8856606 bnxt_get_svif(uint16_t port_id, bool func_svif,
4435*2d9fd380Sjfb8856606 	      enum bnxt_ulp_intf_type type)
4436*2d9fd380Sjfb8856606 {
4437*2d9fd380Sjfb8856606 	struct rte_eth_dev *eth_dev;
4438*2d9fd380Sjfb8856606 	struct bnxt *bp;
4439*2d9fd380Sjfb8856606 
4440*2d9fd380Sjfb8856606 	eth_dev = &rte_eth_devices[port_id];
4441*2d9fd380Sjfb8856606 	if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4442*2d9fd380Sjfb8856606 		struct bnxt_representor *vfr = eth_dev->data->dev_private;
4443*2d9fd380Sjfb8856606 		if (!vfr)
4444*2d9fd380Sjfb8856606 			return 0;
4445*2d9fd380Sjfb8856606 
4446*2d9fd380Sjfb8856606 		if (type == BNXT_ULP_INTF_TYPE_VF_REP)
4447*2d9fd380Sjfb8856606 			return vfr->svif;
4448*2d9fd380Sjfb8856606 
4449*2d9fd380Sjfb8856606 		eth_dev = vfr->parent_dev;
4450*2d9fd380Sjfb8856606 	}
4451*2d9fd380Sjfb8856606 
4452*2d9fd380Sjfb8856606 	bp = eth_dev->data->dev_private;
4453*2d9fd380Sjfb8856606 
4454*2d9fd380Sjfb8856606 	return func_svif ? bp->func_svif : bp->port_svif;
4455*2d9fd380Sjfb8856606 }
4456*2d9fd380Sjfb8856606 
4457*2d9fd380Sjfb8856606 uint16_t
bnxt_get_vnic_id(uint16_t port,enum bnxt_ulp_intf_type type)4458*2d9fd380Sjfb8856606 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
4459*2d9fd380Sjfb8856606 {
4460*2d9fd380Sjfb8856606 	struct rte_eth_dev *eth_dev;
4461*2d9fd380Sjfb8856606 	struct bnxt_vnic_info *vnic;
4462*2d9fd380Sjfb8856606 	struct bnxt *bp;
4463*2d9fd380Sjfb8856606 
4464*2d9fd380Sjfb8856606 	eth_dev = &rte_eth_devices[port];
4465*2d9fd380Sjfb8856606 	if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4466*2d9fd380Sjfb8856606 		struct bnxt_representor *vfr = eth_dev->data->dev_private;
4467*2d9fd380Sjfb8856606 		if (!vfr)
4468*2d9fd380Sjfb8856606 			return 0;
4469*2d9fd380Sjfb8856606 
4470*2d9fd380Sjfb8856606 		if (type == BNXT_ULP_INTF_TYPE_VF_REP)
4471*2d9fd380Sjfb8856606 			return vfr->dflt_vnic_id;
4472*2d9fd380Sjfb8856606 
4473*2d9fd380Sjfb8856606 		eth_dev = vfr->parent_dev;
4474*2d9fd380Sjfb8856606 	}
4475*2d9fd380Sjfb8856606 
4476*2d9fd380Sjfb8856606 	bp = eth_dev->data->dev_private;
4477*2d9fd380Sjfb8856606 
4478*2d9fd380Sjfb8856606 	vnic = BNXT_GET_DEFAULT_VNIC(bp);
4479*2d9fd380Sjfb8856606 
4480*2d9fd380Sjfb8856606 	return vnic->fw_vnic_id;
4481*2d9fd380Sjfb8856606 }
4482*2d9fd380Sjfb8856606 
4483*2d9fd380Sjfb8856606 uint16_t
bnxt_get_fw_func_id(uint16_t port,enum bnxt_ulp_intf_type type)4484*2d9fd380Sjfb8856606 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type)
4485*2d9fd380Sjfb8856606 {
4486*2d9fd380Sjfb8856606 	struct rte_eth_dev *eth_dev;
4487*2d9fd380Sjfb8856606 	struct bnxt *bp;
4488*2d9fd380Sjfb8856606 
4489*2d9fd380Sjfb8856606 	eth_dev = &rte_eth_devices[port];
4490*2d9fd380Sjfb8856606 	if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4491*2d9fd380Sjfb8856606 		struct bnxt_representor *vfr = eth_dev->data->dev_private;
4492*2d9fd380Sjfb8856606 		if (!vfr)
4493*2d9fd380Sjfb8856606 			return 0;
4494*2d9fd380Sjfb8856606 
4495*2d9fd380Sjfb8856606 		if (type == BNXT_ULP_INTF_TYPE_VF_REP)
4496*2d9fd380Sjfb8856606 			return vfr->fw_fid;
4497*2d9fd380Sjfb8856606 
4498*2d9fd380Sjfb8856606 		eth_dev = vfr->parent_dev;
4499*2d9fd380Sjfb8856606 	}
4500*2d9fd380Sjfb8856606 
4501*2d9fd380Sjfb8856606 	bp = eth_dev->data->dev_private;
4502*2d9fd380Sjfb8856606 
4503*2d9fd380Sjfb8856606 	return bp->fw_fid;
4504*2d9fd380Sjfb8856606 }
4505*2d9fd380Sjfb8856606 
4506*2d9fd380Sjfb8856606 enum bnxt_ulp_intf_type
bnxt_get_interface_type(uint16_t port)4507*2d9fd380Sjfb8856606 bnxt_get_interface_type(uint16_t port)
4508*2d9fd380Sjfb8856606 {
4509*2d9fd380Sjfb8856606 	struct rte_eth_dev *eth_dev;
4510*2d9fd380Sjfb8856606 	struct bnxt *bp;
4511*2d9fd380Sjfb8856606 
4512*2d9fd380Sjfb8856606 	eth_dev = &rte_eth_devices[port];
4513*2d9fd380Sjfb8856606 	if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev))
4514*2d9fd380Sjfb8856606 		return BNXT_ULP_INTF_TYPE_VF_REP;
4515*2d9fd380Sjfb8856606 
4516*2d9fd380Sjfb8856606 	bp = eth_dev->data->dev_private;
4517*2d9fd380Sjfb8856606 	if (BNXT_PF(bp))
4518*2d9fd380Sjfb8856606 		return BNXT_ULP_INTF_TYPE_PF;
4519*2d9fd380Sjfb8856606 	else if (BNXT_VF_IS_TRUSTED(bp))
4520*2d9fd380Sjfb8856606 		return BNXT_ULP_INTF_TYPE_TRUSTED_VF;
4521*2d9fd380Sjfb8856606 	else if (BNXT_VF(bp))
4522*2d9fd380Sjfb8856606 		return BNXT_ULP_INTF_TYPE_VF;
4523*2d9fd380Sjfb8856606 
4524*2d9fd380Sjfb8856606 	return BNXT_ULP_INTF_TYPE_INVALID;
4525*2d9fd380Sjfb8856606 }
4526*2d9fd380Sjfb8856606 
4527*2d9fd380Sjfb8856606 uint16_t
bnxt_get_phy_port_id(uint16_t port_id)4528*2d9fd380Sjfb8856606 bnxt_get_phy_port_id(uint16_t port_id)
4529*2d9fd380Sjfb8856606 {
4530*2d9fd380Sjfb8856606 	struct bnxt_representor *vfr;
4531*2d9fd380Sjfb8856606 	struct rte_eth_dev *eth_dev;
4532*2d9fd380Sjfb8856606 	struct bnxt *bp;
4533*2d9fd380Sjfb8856606 
4534*2d9fd380Sjfb8856606 	eth_dev = &rte_eth_devices[port_id];
4535*2d9fd380Sjfb8856606 	if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4536*2d9fd380Sjfb8856606 		vfr = eth_dev->data->dev_private;
4537*2d9fd380Sjfb8856606 		if (!vfr)
4538*2d9fd380Sjfb8856606 			return 0;
4539*2d9fd380Sjfb8856606 
4540*2d9fd380Sjfb8856606 		eth_dev = vfr->parent_dev;
4541*2d9fd380Sjfb8856606 	}
4542*2d9fd380Sjfb8856606 
4543*2d9fd380Sjfb8856606 	bp = eth_dev->data->dev_private;
4544*2d9fd380Sjfb8856606 
4545*2d9fd380Sjfb8856606 	return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id;
4546*2d9fd380Sjfb8856606 }
4547*2d9fd380Sjfb8856606 
4548*2d9fd380Sjfb8856606 uint16_t
bnxt_get_parif(uint16_t port_id,enum bnxt_ulp_intf_type type)4549*2d9fd380Sjfb8856606 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type)
4550*2d9fd380Sjfb8856606 {
4551*2d9fd380Sjfb8856606 	struct rte_eth_dev *eth_dev;
4552*2d9fd380Sjfb8856606 	struct bnxt *bp;
4553*2d9fd380Sjfb8856606 
4554*2d9fd380Sjfb8856606 	eth_dev = &rte_eth_devices[port_id];
4555*2d9fd380Sjfb8856606 	if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
4556*2d9fd380Sjfb8856606 		struct bnxt_representor *vfr = eth_dev->data->dev_private;
4557*2d9fd380Sjfb8856606 		if (!vfr)
4558*2d9fd380Sjfb8856606 			return 0;
4559*2d9fd380Sjfb8856606 
4560*2d9fd380Sjfb8856606 		if (type == BNXT_ULP_INTF_TYPE_VF_REP)
4561*2d9fd380Sjfb8856606 			return vfr->fw_fid - 1;
4562*2d9fd380Sjfb8856606 
4563*2d9fd380Sjfb8856606 		eth_dev = vfr->parent_dev;
4564*2d9fd380Sjfb8856606 	}
4565*2d9fd380Sjfb8856606 
4566*2d9fd380Sjfb8856606 	bp = eth_dev->data->dev_private;
4567*2d9fd380Sjfb8856606 
4568*2d9fd380Sjfb8856606 	return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1;
4569*2d9fd380Sjfb8856606 }
4570*2d9fd380Sjfb8856606 
4571*2d9fd380Sjfb8856606 uint16_t
bnxt_get_vport(uint16_t port_id)4572*2d9fd380Sjfb8856606 bnxt_get_vport(uint16_t port_id)
4573*2d9fd380Sjfb8856606 {
4574*2d9fd380Sjfb8856606 	return (1 << bnxt_get_phy_port_id(port_id));
4575*2d9fd380Sjfb8856606 }
4576*2d9fd380Sjfb8856606 
bnxt_alloc_error_recovery_info(struct bnxt * bp)4577*2d9fd380Sjfb8856606 static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
4578*2d9fd380Sjfb8856606 {
4579*2d9fd380Sjfb8856606 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4580*2d9fd380Sjfb8856606 
4581*2d9fd380Sjfb8856606 	if (info) {
4582*2d9fd380Sjfb8856606 		if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))
4583*2d9fd380Sjfb8856606 			memset(info, 0, sizeof(*info));
4584*2d9fd380Sjfb8856606 		return;
4585*2d9fd380Sjfb8856606 	}
4586*2d9fd380Sjfb8856606 
4587*2d9fd380Sjfb8856606 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4588*2d9fd380Sjfb8856606 		return;
4589*2d9fd380Sjfb8856606 
4590*2d9fd380Sjfb8856606 	info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4591*2d9fd380Sjfb8856606 			   sizeof(*info), 0);
4592*2d9fd380Sjfb8856606 	if (!info)
4593*2d9fd380Sjfb8856606 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
4594*2d9fd380Sjfb8856606 
4595*2d9fd380Sjfb8856606 	bp->recovery_info = info;
4596*2d9fd380Sjfb8856606 }
4597*2d9fd380Sjfb8856606 
bnxt_check_fw_status(struct bnxt * bp)4598*2d9fd380Sjfb8856606 static void bnxt_check_fw_status(struct bnxt *bp)
4599*2d9fd380Sjfb8856606 {
4600*2d9fd380Sjfb8856606 	uint32_t fw_status;
4601*2d9fd380Sjfb8856606 
4602*2d9fd380Sjfb8856606 	if (!(bp->recovery_info &&
4603*2d9fd380Sjfb8856606 	      (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)))
4604*2d9fd380Sjfb8856606 		return;
4605*2d9fd380Sjfb8856606 
4606*2d9fd380Sjfb8856606 	fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
4607*2d9fd380Sjfb8856606 	if (fw_status != BNXT_FW_STATUS_HEALTHY)
4608*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
4609*2d9fd380Sjfb8856606 			    fw_status);
4610*2d9fd380Sjfb8856606 }
4611*2d9fd380Sjfb8856606 
bnxt_map_hcomm_fw_status_reg(struct bnxt * bp)4612*2d9fd380Sjfb8856606 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp)
4613*2d9fd380Sjfb8856606 {
4614*2d9fd380Sjfb8856606 	struct bnxt_error_recovery_info *info = bp->recovery_info;
4615*2d9fd380Sjfb8856606 	uint32_t status_loc;
4616*2d9fd380Sjfb8856606 	uint32_t sig_ver;
4617*2d9fd380Sjfb8856606 
4618*2d9fd380Sjfb8856606 	rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 +
4619*2d9fd380Sjfb8856606 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4620*2d9fd380Sjfb8856606 	sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4621*2d9fd380Sjfb8856606 				   BNXT_GRCP_WINDOW_2_BASE +
4622*2d9fd380Sjfb8856606 				   offsetof(struct hcomm_status,
4623*2d9fd380Sjfb8856606 					    sig_ver)));
4624*2d9fd380Sjfb8856606 	/* If the signature is absent, then FW does not support this feature */
4625*2d9fd380Sjfb8856606 	if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) !=
4626*2d9fd380Sjfb8856606 	    HCOMM_STATUS_SIGNATURE_VAL)
4627*2d9fd380Sjfb8856606 		return 0;
4628*2d9fd380Sjfb8856606 
4629*2d9fd380Sjfb8856606 	if (!info) {
4630*2d9fd380Sjfb8856606 		info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4631*2d9fd380Sjfb8856606 				   sizeof(*info), 0);
4632*2d9fd380Sjfb8856606 		if (!info)
4633*2d9fd380Sjfb8856606 			return -ENOMEM;
4634*2d9fd380Sjfb8856606 		bp->recovery_info = info;
46352bfe3f2eSlogwang 	} else {
4636*2d9fd380Sjfb8856606 		memset(info, 0, sizeof(*info));
46372bfe3f2eSlogwang 	}
46382bfe3f2eSlogwang 
4639*2d9fd380Sjfb8856606 	status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4640*2d9fd380Sjfb8856606 				      BNXT_GRCP_WINDOW_2_BASE +
4641*2d9fd380Sjfb8856606 				      offsetof(struct hcomm_status,
4642*2d9fd380Sjfb8856606 					       fw_status_loc)));
4643*2d9fd380Sjfb8856606 
4644*2d9fd380Sjfb8856606 	/* Only pre-map the FW health status GRC register */
4645*2d9fd380Sjfb8856606 	if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC)
4646*2d9fd380Sjfb8856606 		return 0;
4647*2d9fd380Sjfb8856606 
4648*2d9fd380Sjfb8856606 	info->status_regs[BNXT_FW_STATUS_REG] = status_loc;
4649*2d9fd380Sjfb8856606 	info->mapped_status_regs[BNXT_FW_STATUS_REG] =
4650*2d9fd380Sjfb8856606 		BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK);
4651*2d9fd380Sjfb8856606 
4652*2d9fd380Sjfb8856606 	rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 +
4653*2d9fd380Sjfb8856606 		    BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4654*2d9fd380Sjfb8856606 
4655*2d9fd380Sjfb8856606 	bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS;
4656*2d9fd380Sjfb8856606 
4657*2d9fd380Sjfb8856606 	return 0;
4658a9643ea8Slogwang }
4659a9643ea8Slogwang 
bnxt_init_fw(struct bnxt * bp)46604418919fSjohnjiang static int bnxt_init_fw(struct bnxt *bp)
46614418919fSjohnjiang {
46624418919fSjohnjiang 	uint16_t mtu;
46634418919fSjohnjiang 	int rc = 0;
46644418919fSjohnjiang 
46654418919fSjohnjiang 	bp->fw_cap = 0;
46664418919fSjohnjiang 
4667*2d9fd380Sjfb8856606 	rc = bnxt_map_hcomm_fw_status_reg(bp);
46684418919fSjohnjiang 	if (rc)
46694418919fSjohnjiang 		return rc;
4670a9643ea8Slogwang 
4671*2d9fd380Sjfb8856606 	rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
4672*2d9fd380Sjfb8856606 	if (rc) {
4673*2d9fd380Sjfb8856606 		bnxt_check_fw_status(bp);
4674*2d9fd380Sjfb8856606 		return rc;
4675*2d9fd380Sjfb8856606 	}
4676*2d9fd380Sjfb8856606 
46772bfe3f2eSlogwang 	rc = bnxt_hwrm_func_reset(bp);
46784418919fSjohnjiang 	if (rc)
46794418919fSjohnjiang 		return -EIO;
46804418919fSjohnjiang 
46814418919fSjohnjiang 	rc = bnxt_hwrm_vnic_qcaps(bp);
46824418919fSjohnjiang 	if (rc)
46834418919fSjohnjiang 		return rc;
46844418919fSjohnjiang 
46854418919fSjohnjiang 	rc = bnxt_hwrm_queue_qportcfg(bp);
46864418919fSjohnjiang 	if (rc)
46874418919fSjohnjiang 		return rc;
46884418919fSjohnjiang 
46894418919fSjohnjiang 	/* Get the MAX capabilities for this function.
46904418919fSjohnjiang 	 * This function also allocates context memory for TQM rings and
46914418919fSjohnjiang 	 * informs the firmware about this allocated backing store memory.
46924418919fSjohnjiang 	 */
46934418919fSjohnjiang 	rc = bnxt_hwrm_func_qcaps(bp);
46944418919fSjohnjiang 	if (rc)
46954418919fSjohnjiang 		return rc;
46964418919fSjohnjiang 
46974418919fSjohnjiang 	rc = bnxt_hwrm_func_qcfg(bp, &mtu);
46984418919fSjohnjiang 	if (rc)
46994418919fSjohnjiang 		return rc;
47004418919fSjohnjiang 
4701*2d9fd380Sjfb8856606 	bnxt_hwrm_port_mac_qcfg(bp);
4702*2d9fd380Sjfb8856606 
4703*2d9fd380Sjfb8856606 	bnxt_hwrm_parent_pf_qcfg(bp);
4704*2d9fd380Sjfb8856606 
4705*2d9fd380Sjfb8856606 	bnxt_hwrm_port_phy_qcaps(bp);
4706*2d9fd380Sjfb8856606 
4707*2d9fd380Sjfb8856606 	bnxt_alloc_error_recovery_info(bp);
47084418919fSjohnjiang 	/* Get the adapter error recovery support info */
47094418919fSjohnjiang 	rc = bnxt_hwrm_error_recovery_qcfg(bp);
47104418919fSjohnjiang 	if (rc)
47114418919fSjohnjiang 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
47124418919fSjohnjiang 
47134418919fSjohnjiang 	bnxt_hwrm_port_led_qcaps(bp);
47144418919fSjohnjiang 
47154418919fSjohnjiang 	return 0;
47164418919fSjohnjiang }
47174418919fSjohnjiang 
47184418919fSjohnjiang static int
bnxt_init_locks(struct bnxt * bp)47194418919fSjohnjiang bnxt_init_locks(struct bnxt *bp)
47204418919fSjohnjiang {
47214418919fSjohnjiang 	int err;
47224418919fSjohnjiang 
47234418919fSjohnjiang 	err = pthread_mutex_init(&bp->flow_lock, NULL);
47244418919fSjohnjiang 	if (err) {
47254418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
47264418919fSjohnjiang 		return err;
47274418919fSjohnjiang 	}
47284418919fSjohnjiang 
47294418919fSjohnjiang 	err = pthread_mutex_init(&bp->def_cp_lock, NULL);
47304418919fSjohnjiang 	if (err)
47314418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
47320c6bd470Sfengbojiang 
47330c6bd470Sfengbojiang 	err = pthread_mutex_init(&bp->health_check_lock, NULL);
47340c6bd470Sfengbojiang 	if (err)
47350c6bd470Sfengbojiang 		PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n");
47364418919fSjohnjiang 	return err;
47374418919fSjohnjiang }
47384418919fSjohnjiang 
bnxt_init_resources(struct bnxt * bp,bool reconfig_dev)47394418919fSjohnjiang static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
47404418919fSjohnjiang {
4741*2d9fd380Sjfb8856606 	int rc = 0;
47424418919fSjohnjiang 
47434418919fSjohnjiang 	rc = bnxt_init_fw(bp);
47444418919fSjohnjiang 	if (rc)
47454418919fSjohnjiang 		return rc;
47464418919fSjohnjiang 
47474418919fSjohnjiang 	if (!reconfig_dev) {
47484418919fSjohnjiang 		rc = bnxt_setup_mac_addr(bp->eth_dev);
47494418919fSjohnjiang 		if (rc)
47504418919fSjohnjiang 			return rc;
47514418919fSjohnjiang 	} else {
47524418919fSjohnjiang 		rc = bnxt_restore_dflt_mac(bp);
47534418919fSjohnjiang 		if (rc)
47544418919fSjohnjiang 			return rc;
47554418919fSjohnjiang 	}
47564418919fSjohnjiang 
47574418919fSjohnjiang 	bnxt_config_vf_req_fwd(bp);
47584418919fSjohnjiang 
47594418919fSjohnjiang 	rc = bnxt_hwrm_func_driver_register(bp);
47602bfe3f2eSlogwang 	if (rc) {
47614418919fSjohnjiang 		PMD_DRV_LOG(ERR, "Failed to register driver");
47624418919fSjohnjiang 		return -EBUSY;
47632bfe3f2eSlogwang 	}
47642bfe3f2eSlogwang 
47652bfe3f2eSlogwang 	if (BNXT_PF(bp)) {
47662bfe3f2eSlogwang 		if (bp->pdev->max_vfs) {
47672bfe3f2eSlogwang 			rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
47682bfe3f2eSlogwang 			if (rc) {
4769d30ea906Sjfb8856606 				PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
47704418919fSjohnjiang 				return rc;
47712bfe3f2eSlogwang 			}
47722bfe3f2eSlogwang 		} else {
47732bfe3f2eSlogwang 			rc = bnxt_hwrm_allocate_pf_only(bp);
47742bfe3f2eSlogwang 			if (rc) {
4775d30ea906Sjfb8856606 				PMD_DRV_LOG(ERR,
47764418919fSjohnjiang 					    "Failed to allocate PF resources");
47774418919fSjohnjiang 				return rc;
47782bfe3f2eSlogwang 			}
47792bfe3f2eSlogwang 		}
47802bfe3f2eSlogwang 	}
47812bfe3f2eSlogwang 
47824418919fSjohnjiang 	rc = bnxt_alloc_mem(bp, reconfig_dev);
47834418919fSjohnjiang 	if (rc)
47844418919fSjohnjiang 		return rc;
47852bfe3f2eSlogwang 
47862bfe3f2eSlogwang 	rc = bnxt_setup_int(bp);
47872bfe3f2eSlogwang 	if (rc)
47884418919fSjohnjiang 		return rc;
47892bfe3f2eSlogwang 
47902bfe3f2eSlogwang 	rc = bnxt_request_int(bp);
47912bfe3f2eSlogwang 	if (rc)
47924418919fSjohnjiang 		return rc;
47934418919fSjohnjiang 
4794*2d9fd380Sjfb8856606 	rc = bnxt_init_ctx_mem(bp);
4795*2d9fd380Sjfb8856606 	if (rc) {
4796*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
4797*2d9fd380Sjfb8856606 		return rc;
4798*2d9fd380Sjfb8856606 	}
4799*2d9fd380Sjfb8856606 
48004418919fSjohnjiang 	rc = bnxt_init_locks(bp);
48014418919fSjohnjiang 	if (rc)
48024418919fSjohnjiang 		return rc;
48034418919fSjohnjiang 
48044418919fSjohnjiang 	return 0;
48054418919fSjohnjiang }
48064418919fSjohnjiang 
48074418919fSjohnjiang static int
bnxt_parse_devarg_truflow(__rte_unused const char * key,const char * value,void * opaque_arg)4808*2d9fd380Sjfb8856606 bnxt_parse_devarg_truflow(__rte_unused const char *key,
4809*2d9fd380Sjfb8856606 			  const char *value, void *opaque_arg)
4810*2d9fd380Sjfb8856606 {
4811*2d9fd380Sjfb8856606 	struct bnxt *bp = opaque_arg;
4812*2d9fd380Sjfb8856606 	unsigned long truflow;
4813*2d9fd380Sjfb8856606 	char *end = NULL;
4814*2d9fd380Sjfb8856606 
4815*2d9fd380Sjfb8856606 	if (!value || !opaque_arg) {
4816*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4817*2d9fd380Sjfb8856606 			    "Invalid parameter passed to truflow devargs.\n");
4818*2d9fd380Sjfb8856606 		return -EINVAL;
4819*2d9fd380Sjfb8856606 	}
4820*2d9fd380Sjfb8856606 
4821*2d9fd380Sjfb8856606 	truflow = strtoul(value, &end, 10);
4822*2d9fd380Sjfb8856606 	if (end == NULL || *end != '\0' ||
4823*2d9fd380Sjfb8856606 	    (truflow == ULONG_MAX && errno == ERANGE)) {
4824*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4825*2d9fd380Sjfb8856606 			    "Invalid parameter passed to truflow devargs.\n");
4826*2d9fd380Sjfb8856606 		return -EINVAL;
4827*2d9fd380Sjfb8856606 	}
4828*2d9fd380Sjfb8856606 
4829*2d9fd380Sjfb8856606 	if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
4830*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4831*2d9fd380Sjfb8856606 			    "Invalid value passed to truflow devargs.\n");
4832*2d9fd380Sjfb8856606 		return -EINVAL;
4833*2d9fd380Sjfb8856606 	}
4834*2d9fd380Sjfb8856606 
4835*2d9fd380Sjfb8856606 	if (truflow) {
4836*2d9fd380Sjfb8856606 		bp->flags |= BNXT_FLAG_TRUFLOW_EN;
4837*2d9fd380Sjfb8856606 		PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
4838*2d9fd380Sjfb8856606 	} else {
4839*2d9fd380Sjfb8856606 		bp->flags &= ~BNXT_FLAG_TRUFLOW_EN;
4840*2d9fd380Sjfb8856606 		PMD_DRV_LOG(INFO, "Host-based truflow feature disabled.\n");
4841*2d9fd380Sjfb8856606 	}
4842*2d9fd380Sjfb8856606 
4843*2d9fd380Sjfb8856606 	return 0;
4844*2d9fd380Sjfb8856606 }
4845*2d9fd380Sjfb8856606 
4846*2d9fd380Sjfb8856606 static int
bnxt_parse_devarg_flow_xstat(__rte_unused const char * key,const char * value,void * opaque_arg)4847*2d9fd380Sjfb8856606 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
4848*2d9fd380Sjfb8856606 			     const char *value, void *opaque_arg)
4849*2d9fd380Sjfb8856606 {
4850*2d9fd380Sjfb8856606 	struct bnxt *bp = opaque_arg;
4851*2d9fd380Sjfb8856606 	unsigned long flow_xstat;
4852*2d9fd380Sjfb8856606 	char *end = NULL;
4853*2d9fd380Sjfb8856606 
4854*2d9fd380Sjfb8856606 	if (!value || !opaque_arg) {
4855*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4856*2d9fd380Sjfb8856606 			    "Invalid parameter passed to flow_xstat devarg.\n");
4857*2d9fd380Sjfb8856606 		return -EINVAL;
4858*2d9fd380Sjfb8856606 	}
4859*2d9fd380Sjfb8856606 
4860*2d9fd380Sjfb8856606 	flow_xstat = strtoul(value, &end, 10);
4861*2d9fd380Sjfb8856606 	if (end == NULL || *end != '\0' ||
4862*2d9fd380Sjfb8856606 	    (flow_xstat == ULONG_MAX && errno == ERANGE)) {
4863*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4864*2d9fd380Sjfb8856606 			    "Invalid parameter passed to flow_xstat devarg.\n");
4865*2d9fd380Sjfb8856606 		return -EINVAL;
4866*2d9fd380Sjfb8856606 	}
4867*2d9fd380Sjfb8856606 
4868*2d9fd380Sjfb8856606 	if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
4869*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4870*2d9fd380Sjfb8856606 			    "Invalid value passed to flow_xstat devarg.\n");
4871*2d9fd380Sjfb8856606 		return -EINVAL;
4872*2d9fd380Sjfb8856606 	}
4873*2d9fd380Sjfb8856606 
4874*2d9fd380Sjfb8856606 	bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
4875*2d9fd380Sjfb8856606 	if (BNXT_FLOW_XSTATS_EN(bp))
4876*2d9fd380Sjfb8856606 		PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
4877*2d9fd380Sjfb8856606 
4878*2d9fd380Sjfb8856606 	return 0;
4879*2d9fd380Sjfb8856606 }
4880*2d9fd380Sjfb8856606 
4881*2d9fd380Sjfb8856606 static int
bnxt_parse_devarg_max_num_kflows(__rte_unused const char * key,const char * value,void * opaque_arg)4882*2d9fd380Sjfb8856606 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key,
4883*2d9fd380Sjfb8856606 					const char *value, void *opaque_arg)
4884*2d9fd380Sjfb8856606 {
4885*2d9fd380Sjfb8856606 	struct bnxt *bp = opaque_arg;
4886*2d9fd380Sjfb8856606 	unsigned long max_num_kflows;
4887*2d9fd380Sjfb8856606 	char *end = NULL;
4888*2d9fd380Sjfb8856606 
4889*2d9fd380Sjfb8856606 	if (!value || !opaque_arg) {
4890*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4891*2d9fd380Sjfb8856606 			"Invalid parameter passed to max_num_kflows devarg.\n");
4892*2d9fd380Sjfb8856606 		return -EINVAL;
4893*2d9fd380Sjfb8856606 	}
4894*2d9fd380Sjfb8856606 
4895*2d9fd380Sjfb8856606 	max_num_kflows = strtoul(value, &end, 10);
4896*2d9fd380Sjfb8856606 	if (end == NULL || *end != '\0' ||
4897*2d9fd380Sjfb8856606 		(max_num_kflows == ULONG_MAX && errno == ERANGE)) {
4898*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4899*2d9fd380Sjfb8856606 			"Invalid parameter passed to max_num_kflows devarg.\n");
4900*2d9fd380Sjfb8856606 		return -EINVAL;
4901*2d9fd380Sjfb8856606 	}
4902*2d9fd380Sjfb8856606 
4903*2d9fd380Sjfb8856606 	if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) {
4904*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4905*2d9fd380Sjfb8856606 			"Invalid value passed to max_num_kflows devarg.\n");
4906*2d9fd380Sjfb8856606 		return -EINVAL;
4907*2d9fd380Sjfb8856606 	}
4908*2d9fd380Sjfb8856606 
4909*2d9fd380Sjfb8856606 	bp->max_num_kflows = max_num_kflows;
4910*2d9fd380Sjfb8856606 	if (bp->max_num_kflows)
4911*2d9fd380Sjfb8856606 		PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n",
4912*2d9fd380Sjfb8856606 				max_num_kflows);
4913*2d9fd380Sjfb8856606 
4914*2d9fd380Sjfb8856606 	return 0;
4915*2d9fd380Sjfb8856606 }
4916*2d9fd380Sjfb8856606 
4917*2d9fd380Sjfb8856606 static int
bnxt_parse_devarg_rep_is_pf(__rte_unused const char * key,const char * value,void * opaque_arg)4918*2d9fd380Sjfb8856606 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key,
4919*2d9fd380Sjfb8856606 			    const char *value, void *opaque_arg)
4920*2d9fd380Sjfb8856606 {
4921*2d9fd380Sjfb8856606 	struct bnxt_representor *vfr_bp = opaque_arg;
4922*2d9fd380Sjfb8856606 	unsigned long rep_is_pf;
4923*2d9fd380Sjfb8856606 	char *end = NULL;
4924*2d9fd380Sjfb8856606 
4925*2d9fd380Sjfb8856606 	if (!value || !opaque_arg) {
4926*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4927*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_is_pf devargs.\n");
4928*2d9fd380Sjfb8856606 		return -EINVAL;
4929*2d9fd380Sjfb8856606 	}
4930*2d9fd380Sjfb8856606 
4931*2d9fd380Sjfb8856606 	rep_is_pf = strtoul(value, &end, 10);
4932*2d9fd380Sjfb8856606 	if (end == NULL || *end != '\0' ||
4933*2d9fd380Sjfb8856606 	    (rep_is_pf == ULONG_MAX && errno == ERANGE)) {
4934*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4935*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_is_pf devargs.\n");
4936*2d9fd380Sjfb8856606 		return -EINVAL;
4937*2d9fd380Sjfb8856606 	}
4938*2d9fd380Sjfb8856606 
4939*2d9fd380Sjfb8856606 	if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) {
4940*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4941*2d9fd380Sjfb8856606 			    "Invalid value passed to rep_is_pf devargs.\n");
4942*2d9fd380Sjfb8856606 		return -EINVAL;
4943*2d9fd380Sjfb8856606 	}
4944*2d9fd380Sjfb8856606 
4945*2d9fd380Sjfb8856606 	vfr_bp->flags |= rep_is_pf;
4946*2d9fd380Sjfb8856606 	if (BNXT_REP_PF(vfr_bp))
4947*2d9fd380Sjfb8856606 		PMD_DRV_LOG(INFO, "PF representor\n");
4948*2d9fd380Sjfb8856606 	else
4949*2d9fd380Sjfb8856606 		PMD_DRV_LOG(INFO, "VF representor\n");
4950*2d9fd380Sjfb8856606 
4951*2d9fd380Sjfb8856606 	return 0;
4952*2d9fd380Sjfb8856606 }
4953*2d9fd380Sjfb8856606 
4954*2d9fd380Sjfb8856606 static int
bnxt_parse_devarg_rep_based_pf(__rte_unused const char * key,const char * value,void * opaque_arg)4955*2d9fd380Sjfb8856606 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key,
4956*2d9fd380Sjfb8856606 			       const char *value, void *opaque_arg)
4957*2d9fd380Sjfb8856606 {
4958*2d9fd380Sjfb8856606 	struct bnxt_representor *vfr_bp = opaque_arg;
4959*2d9fd380Sjfb8856606 	unsigned long rep_based_pf;
4960*2d9fd380Sjfb8856606 	char *end = NULL;
4961*2d9fd380Sjfb8856606 
4962*2d9fd380Sjfb8856606 	if (!value || !opaque_arg) {
4963*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4964*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_based_pf "
4965*2d9fd380Sjfb8856606 			    "devargs.\n");
4966*2d9fd380Sjfb8856606 		return -EINVAL;
4967*2d9fd380Sjfb8856606 	}
4968*2d9fd380Sjfb8856606 
4969*2d9fd380Sjfb8856606 	rep_based_pf = strtoul(value, &end, 10);
4970*2d9fd380Sjfb8856606 	if (end == NULL || *end != '\0' ||
4971*2d9fd380Sjfb8856606 	    (rep_based_pf == ULONG_MAX && errno == ERANGE)) {
4972*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4973*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_based_pf "
4974*2d9fd380Sjfb8856606 			    "devargs.\n");
4975*2d9fd380Sjfb8856606 		return -EINVAL;
4976*2d9fd380Sjfb8856606 	}
4977*2d9fd380Sjfb8856606 
4978*2d9fd380Sjfb8856606 	if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) {
4979*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
4980*2d9fd380Sjfb8856606 			    "Invalid value passed to rep_based_pf devargs.\n");
4981*2d9fd380Sjfb8856606 		return -EINVAL;
4982*2d9fd380Sjfb8856606 	}
4983*2d9fd380Sjfb8856606 
4984*2d9fd380Sjfb8856606 	vfr_bp->rep_based_pf = rep_based_pf;
4985*2d9fd380Sjfb8856606 	vfr_bp->flags |= BNXT_REP_BASED_PF_VALID;
4986*2d9fd380Sjfb8856606 
4987*2d9fd380Sjfb8856606 	PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf);
4988*2d9fd380Sjfb8856606 
4989*2d9fd380Sjfb8856606 	return 0;
4990*2d9fd380Sjfb8856606 }
4991*2d9fd380Sjfb8856606 
4992*2d9fd380Sjfb8856606 static int
bnxt_parse_devarg_rep_q_r2f(__rte_unused const char * key,const char * value,void * opaque_arg)4993*2d9fd380Sjfb8856606 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key,
4994*2d9fd380Sjfb8856606 			    const char *value, void *opaque_arg)
4995*2d9fd380Sjfb8856606 {
4996*2d9fd380Sjfb8856606 	struct bnxt_representor *vfr_bp = opaque_arg;
4997*2d9fd380Sjfb8856606 	unsigned long rep_q_r2f;
4998*2d9fd380Sjfb8856606 	char *end = NULL;
4999*2d9fd380Sjfb8856606 
5000*2d9fd380Sjfb8856606 	if (!value || !opaque_arg) {
5001*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5002*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_q_r2f "
5003*2d9fd380Sjfb8856606 			    "devargs.\n");
5004*2d9fd380Sjfb8856606 		return -EINVAL;
5005*2d9fd380Sjfb8856606 	}
5006*2d9fd380Sjfb8856606 
5007*2d9fd380Sjfb8856606 	rep_q_r2f = strtoul(value, &end, 10);
5008*2d9fd380Sjfb8856606 	if (end == NULL || *end != '\0' ||
5009*2d9fd380Sjfb8856606 	    (rep_q_r2f == ULONG_MAX && errno == ERANGE)) {
5010*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5011*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_q_r2f "
5012*2d9fd380Sjfb8856606 			    "devargs.\n");
5013*2d9fd380Sjfb8856606 		return -EINVAL;
5014*2d9fd380Sjfb8856606 	}
5015*2d9fd380Sjfb8856606 
5016*2d9fd380Sjfb8856606 	if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) {
5017*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5018*2d9fd380Sjfb8856606 			    "Invalid value passed to rep_q_r2f devargs.\n");
5019*2d9fd380Sjfb8856606 		return -EINVAL;
5020*2d9fd380Sjfb8856606 	}
5021*2d9fd380Sjfb8856606 
5022*2d9fd380Sjfb8856606 	vfr_bp->rep_q_r2f = rep_q_r2f;
5023*2d9fd380Sjfb8856606 	vfr_bp->flags |= BNXT_REP_Q_R2F_VALID;
5024*2d9fd380Sjfb8856606 	PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f);
5025*2d9fd380Sjfb8856606 
5026*2d9fd380Sjfb8856606 	return 0;
5027*2d9fd380Sjfb8856606 }
5028*2d9fd380Sjfb8856606 
5029*2d9fd380Sjfb8856606 static int
bnxt_parse_devarg_rep_q_f2r(__rte_unused const char * key,const char * value,void * opaque_arg)5030*2d9fd380Sjfb8856606 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key,
5031*2d9fd380Sjfb8856606 			    const char *value, void *opaque_arg)
5032*2d9fd380Sjfb8856606 {
5033*2d9fd380Sjfb8856606 	struct bnxt_representor *vfr_bp = opaque_arg;
5034*2d9fd380Sjfb8856606 	unsigned long rep_q_f2r;
5035*2d9fd380Sjfb8856606 	char *end = NULL;
5036*2d9fd380Sjfb8856606 
5037*2d9fd380Sjfb8856606 	if (!value || !opaque_arg) {
5038*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5039*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_q_f2r "
5040*2d9fd380Sjfb8856606 			    "devargs.\n");
5041*2d9fd380Sjfb8856606 		return -EINVAL;
5042*2d9fd380Sjfb8856606 	}
5043*2d9fd380Sjfb8856606 
5044*2d9fd380Sjfb8856606 	rep_q_f2r = strtoul(value, &end, 10);
5045*2d9fd380Sjfb8856606 	if (end == NULL || *end != '\0' ||
5046*2d9fd380Sjfb8856606 	    (rep_q_f2r == ULONG_MAX && errno == ERANGE)) {
5047*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5048*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_q_f2r "
5049*2d9fd380Sjfb8856606 			    "devargs.\n");
5050*2d9fd380Sjfb8856606 		return -EINVAL;
5051*2d9fd380Sjfb8856606 	}
5052*2d9fd380Sjfb8856606 
5053*2d9fd380Sjfb8856606 	if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) {
5054*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5055*2d9fd380Sjfb8856606 			    "Invalid value passed to rep_q_f2r devargs.\n");
5056*2d9fd380Sjfb8856606 		return -EINVAL;
5057*2d9fd380Sjfb8856606 	}
5058*2d9fd380Sjfb8856606 
5059*2d9fd380Sjfb8856606 	vfr_bp->rep_q_f2r = rep_q_f2r;
5060*2d9fd380Sjfb8856606 	vfr_bp->flags |= BNXT_REP_Q_F2R_VALID;
5061*2d9fd380Sjfb8856606 	PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r);
5062*2d9fd380Sjfb8856606 
5063*2d9fd380Sjfb8856606 	return 0;
5064*2d9fd380Sjfb8856606 }
5065*2d9fd380Sjfb8856606 
5066*2d9fd380Sjfb8856606 static int
bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char * key,const char * value,void * opaque_arg)5067*2d9fd380Sjfb8856606 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key,
5068*2d9fd380Sjfb8856606 			     const char *value, void *opaque_arg)
5069*2d9fd380Sjfb8856606 {
5070*2d9fd380Sjfb8856606 	struct bnxt_representor *vfr_bp = opaque_arg;
5071*2d9fd380Sjfb8856606 	unsigned long rep_fc_r2f;
5072*2d9fd380Sjfb8856606 	char *end = NULL;
5073*2d9fd380Sjfb8856606 
5074*2d9fd380Sjfb8856606 	if (!value || !opaque_arg) {
5075*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5076*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_fc_r2f "
5077*2d9fd380Sjfb8856606 			    "devargs.\n");
5078*2d9fd380Sjfb8856606 		return -EINVAL;
5079*2d9fd380Sjfb8856606 	}
5080*2d9fd380Sjfb8856606 
5081*2d9fd380Sjfb8856606 	rep_fc_r2f = strtoul(value, &end, 10);
5082*2d9fd380Sjfb8856606 	if (end == NULL || *end != '\0' ||
5083*2d9fd380Sjfb8856606 	    (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) {
5084*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5085*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_fc_r2f "
5086*2d9fd380Sjfb8856606 			    "devargs.\n");
5087*2d9fd380Sjfb8856606 		return -EINVAL;
5088*2d9fd380Sjfb8856606 	}
5089*2d9fd380Sjfb8856606 
5090*2d9fd380Sjfb8856606 	if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) {
5091*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5092*2d9fd380Sjfb8856606 			    "Invalid value passed to rep_fc_r2f devargs.\n");
5093*2d9fd380Sjfb8856606 		return -EINVAL;
5094*2d9fd380Sjfb8856606 	}
5095*2d9fd380Sjfb8856606 
5096*2d9fd380Sjfb8856606 	vfr_bp->flags |= BNXT_REP_FC_R2F_VALID;
5097*2d9fd380Sjfb8856606 	vfr_bp->rep_fc_r2f = rep_fc_r2f;
5098*2d9fd380Sjfb8856606 	PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f);
5099*2d9fd380Sjfb8856606 
5100*2d9fd380Sjfb8856606 	return 0;
5101*2d9fd380Sjfb8856606 }
5102*2d9fd380Sjfb8856606 
5103*2d9fd380Sjfb8856606 static int
bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char * key,const char * value,void * opaque_arg)5104*2d9fd380Sjfb8856606 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key,
5105*2d9fd380Sjfb8856606 			     const char *value, void *opaque_arg)
5106*2d9fd380Sjfb8856606 {
5107*2d9fd380Sjfb8856606 	struct bnxt_representor *vfr_bp = opaque_arg;
5108*2d9fd380Sjfb8856606 	unsigned long rep_fc_f2r;
5109*2d9fd380Sjfb8856606 	char *end = NULL;
5110*2d9fd380Sjfb8856606 
5111*2d9fd380Sjfb8856606 	if (!value || !opaque_arg) {
5112*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5113*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_fc_f2r "
5114*2d9fd380Sjfb8856606 			    "devargs.\n");
5115*2d9fd380Sjfb8856606 		return -EINVAL;
5116*2d9fd380Sjfb8856606 	}
5117*2d9fd380Sjfb8856606 
5118*2d9fd380Sjfb8856606 	rep_fc_f2r = strtoul(value, &end, 10);
5119*2d9fd380Sjfb8856606 	if (end == NULL || *end != '\0' ||
5120*2d9fd380Sjfb8856606 	    (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) {
5121*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5122*2d9fd380Sjfb8856606 			    "Invalid parameter passed to rep_fc_f2r "
5123*2d9fd380Sjfb8856606 			    "devargs.\n");
5124*2d9fd380Sjfb8856606 		return -EINVAL;
5125*2d9fd380Sjfb8856606 	}
5126*2d9fd380Sjfb8856606 
5127*2d9fd380Sjfb8856606 	if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) {
5128*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5129*2d9fd380Sjfb8856606 			    "Invalid value passed to rep_fc_f2r devargs.\n");
5130*2d9fd380Sjfb8856606 		return -EINVAL;
5131*2d9fd380Sjfb8856606 	}
5132*2d9fd380Sjfb8856606 
5133*2d9fd380Sjfb8856606 	vfr_bp->flags |= BNXT_REP_FC_F2R_VALID;
5134*2d9fd380Sjfb8856606 	vfr_bp->rep_fc_f2r = rep_fc_f2r;
5135*2d9fd380Sjfb8856606 	PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r);
5136*2d9fd380Sjfb8856606 
5137*2d9fd380Sjfb8856606 	return 0;
5138*2d9fd380Sjfb8856606 }
5139*2d9fd380Sjfb8856606 
5140*2d9fd380Sjfb8856606 static void
bnxt_parse_dev_args(struct bnxt * bp,struct rte_devargs * devargs)5141*2d9fd380Sjfb8856606 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
5142*2d9fd380Sjfb8856606 {
5143*2d9fd380Sjfb8856606 	struct rte_kvargs *kvlist;
5144*2d9fd380Sjfb8856606 
5145*2d9fd380Sjfb8856606 	if (devargs == NULL)
5146*2d9fd380Sjfb8856606 		return;
5147*2d9fd380Sjfb8856606 
5148*2d9fd380Sjfb8856606 	kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
5149*2d9fd380Sjfb8856606 	if (kvlist == NULL)
5150*2d9fd380Sjfb8856606 		return;
5151*2d9fd380Sjfb8856606 
5152*2d9fd380Sjfb8856606 	/*
5153*2d9fd380Sjfb8856606 	 * Handler for "truflow" devarg.
5154*2d9fd380Sjfb8856606 	 * Invoked as for ex: "-a 0000:00:0d.0,host-based-truflow=1"
5155*2d9fd380Sjfb8856606 	 */
5156*2d9fd380Sjfb8856606 	rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
5157*2d9fd380Sjfb8856606 			   bnxt_parse_devarg_truflow, bp);
5158*2d9fd380Sjfb8856606 
5159*2d9fd380Sjfb8856606 	/*
5160*2d9fd380Sjfb8856606 	 * Handler for "flow_xstat" devarg.
5161*2d9fd380Sjfb8856606 	 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1"
5162*2d9fd380Sjfb8856606 	 */
5163*2d9fd380Sjfb8856606 	rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
5164*2d9fd380Sjfb8856606 			   bnxt_parse_devarg_flow_xstat, bp);
5165*2d9fd380Sjfb8856606 
5166*2d9fd380Sjfb8856606 	/*
5167*2d9fd380Sjfb8856606 	 * Handler for "max_num_kflows" devarg.
5168*2d9fd380Sjfb8856606 	 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32"
5169*2d9fd380Sjfb8856606 	 */
5170*2d9fd380Sjfb8856606 	rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS,
5171*2d9fd380Sjfb8856606 			   bnxt_parse_devarg_max_num_kflows, bp);
5172*2d9fd380Sjfb8856606 
5173*2d9fd380Sjfb8856606 	rte_kvargs_free(kvlist);
5174*2d9fd380Sjfb8856606 }
5175*2d9fd380Sjfb8856606 
bnxt_alloc_switch_domain(struct bnxt * bp)5176*2d9fd380Sjfb8856606 static int bnxt_alloc_switch_domain(struct bnxt *bp)
5177*2d9fd380Sjfb8856606 {
5178*2d9fd380Sjfb8856606 	int rc = 0;
5179*2d9fd380Sjfb8856606 
5180*2d9fd380Sjfb8856606 	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
5181*2d9fd380Sjfb8856606 		rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
5182*2d9fd380Sjfb8856606 		if (rc)
5183*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR,
5184*2d9fd380Sjfb8856606 				    "Failed to alloc switch domain: %d\n", rc);
5185*2d9fd380Sjfb8856606 		else
5186*2d9fd380Sjfb8856606 			PMD_DRV_LOG(INFO,
5187*2d9fd380Sjfb8856606 				    "Switch domain allocated %d\n",
5188*2d9fd380Sjfb8856606 				    bp->switch_domain_id);
5189*2d9fd380Sjfb8856606 	}
5190*2d9fd380Sjfb8856606 
5191*2d9fd380Sjfb8856606 	return rc;
5192*2d9fd380Sjfb8856606 }
5193*2d9fd380Sjfb8856606 
5194*2d9fd380Sjfb8856606 static int
bnxt_dev_init(struct rte_eth_dev * eth_dev,void * params __rte_unused)5195*2d9fd380Sjfb8856606 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
51964418919fSjohnjiang {
51974418919fSjohnjiang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
51984418919fSjohnjiang 	static int version_printed;
51994418919fSjohnjiang 	struct bnxt *bp;
52004418919fSjohnjiang 	int rc;
52014418919fSjohnjiang 
52024418919fSjohnjiang 	if (version_printed++ == 0)
52034418919fSjohnjiang 		PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
52044418919fSjohnjiang 
52054418919fSjohnjiang 	eth_dev->dev_ops = &bnxt_dev_ops;
5206*2d9fd380Sjfb8856606 	eth_dev->rx_queue_count = bnxt_rx_queue_count_op;
5207*2d9fd380Sjfb8856606 	eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op;
5208*2d9fd380Sjfb8856606 	eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op;
52094418919fSjohnjiang 	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
52104418919fSjohnjiang 	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
52114418919fSjohnjiang 
52124418919fSjohnjiang 	/*
52134418919fSjohnjiang 	 * For secondary processes, we don't initialise any further
52144418919fSjohnjiang 	 * as primary has already done this work.
52154418919fSjohnjiang 	 */
52164418919fSjohnjiang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
52174418919fSjohnjiang 		return 0;
52184418919fSjohnjiang 
52194418919fSjohnjiang 	rte_eth_copy_pci_info(eth_dev, pci_dev);
5220*2d9fd380Sjfb8856606 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
52214418919fSjohnjiang 
52224418919fSjohnjiang 	bp = eth_dev->data->dev_private;
52234418919fSjohnjiang 
5224*2d9fd380Sjfb8856606 	/* Parse dev arguments passed on when starting the DPDK application. */
5225*2d9fd380Sjfb8856606 	bnxt_parse_dev_args(bp, pci_dev->device.devargs);
5226*2d9fd380Sjfb8856606 
5227*2d9fd380Sjfb8856606 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
5228*2d9fd380Sjfb8856606 
52294418919fSjohnjiang 	if (bnxt_vf_pciid(pci_dev->id.device_id))
52304418919fSjohnjiang 		bp->flags |= BNXT_FLAG_VF;
52314418919fSjohnjiang 
52324418919fSjohnjiang 	if (bnxt_thor_device(pci_dev->id.device_id))
52334418919fSjohnjiang 		bp->flags |= BNXT_FLAG_THOR_CHIP;
52344418919fSjohnjiang 
52354418919fSjohnjiang 	if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
52364418919fSjohnjiang 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
52374418919fSjohnjiang 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
52384418919fSjohnjiang 	    pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
52394418919fSjohnjiang 		bp->flags |= BNXT_FLAG_STINGRAY;
52404418919fSjohnjiang 
5241*2d9fd380Sjfb8856606 	if (BNXT_TRUFLOW_EN(bp)) {
5242*2d9fd380Sjfb8856606 		/* extra mbuf field is required to store CFA code from mark */
5243*2d9fd380Sjfb8856606 		static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = {
5244*2d9fd380Sjfb8856606 			.name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME,
5245*2d9fd380Sjfb8856606 			.size = sizeof(bnxt_cfa_code_dynfield_t),
5246*2d9fd380Sjfb8856606 			.align = __alignof__(bnxt_cfa_code_dynfield_t),
5247*2d9fd380Sjfb8856606 		};
5248*2d9fd380Sjfb8856606 		bnxt_cfa_code_dynfield_offset =
5249*2d9fd380Sjfb8856606 			rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc);
5250*2d9fd380Sjfb8856606 		if (bnxt_cfa_code_dynfield_offset < 0) {
5251*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR,
5252*2d9fd380Sjfb8856606 			    "Failed to register mbuf field for TruFlow mark\n");
5253*2d9fd380Sjfb8856606 			return -rte_errno;
5254*2d9fd380Sjfb8856606 		}
5255*2d9fd380Sjfb8856606 	}
5256*2d9fd380Sjfb8856606 
52574418919fSjohnjiang 	rc = bnxt_init_board(eth_dev);
52584418919fSjohnjiang 	if (rc) {
52594418919fSjohnjiang 		PMD_DRV_LOG(ERR,
52604418919fSjohnjiang 			    "Failed to initialize board rc: %x\n", rc);
52614418919fSjohnjiang 		return rc;
52624418919fSjohnjiang 	}
52634418919fSjohnjiang 
5264*2d9fd380Sjfb8856606 	rc = bnxt_alloc_pf_info(bp);
5265*2d9fd380Sjfb8856606 	if (rc)
5266*2d9fd380Sjfb8856606 		goto error_free;
5267*2d9fd380Sjfb8856606 
5268*2d9fd380Sjfb8856606 	rc = bnxt_alloc_link_info(bp);
5269*2d9fd380Sjfb8856606 	if (rc)
5270*2d9fd380Sjfb8856606 		goto error_free;
5271*2d9fd380Sjfb8856606 
5272*2d9fd380Sjfb8856606 	rc = bnxt_alloc_parent_info(bp);
5273*2d9fd380Sjfb8856606 	if (rc)
5274*2d9fd380Sjfb8856606 		goto error_free;
5275*2d9fd380Sjfb8856606 
52764418919fSjohnjiang 	rc = bnxt_alloc_hwrm_resources(bp);
52774418919fSjohnjiang 	if (rc) {
52784418919fSjohnjiang 		PMD_DRV_LOG(ERR,
52794418919fSjohnjiang 			    "Failed to allocate hwrm resource rc: %x\n", rc);
52804b05018fSfengbojiang 		goto error_free;
52814418919fSjohnjiang 	}
5282*2d9fd380Sjfb8856606 	rc = bnxt_alloc_leds_info(bp);
5283*2d9fd380Sjfb8856606 	if (rc)
5284*2d9fd380Sjfb8856606 		goto error_free;
5285*2d9fd380Sjfb8856606 
5286*2d9fd380Sjfb8856606 	rc = bnxt_alloc_cos_queues(bp);
5287*2d9fd380Sjfb8856606 	if (rc)
5288*2d9fd380Sjfb8856606 		goto error_free;
5289*2d9fd380Sjfb8856606 
52904418919fSjohnjiang 	rc = bnxt_init_resources(bp, false);
52914418919fSjohnjiang 	if (rc)
52924418919fSjohnjiang 		goto error_free;
52934418919fSjohnjiang 
52944418919fSjohnjiang 	rc = bnxt_alloc_stats_mem(bp);
52954418919fSjohnjiang 	if (rc)
52964418919fSjohnjiang 		goto error_free;
52974418919fSjohnjiang 
5298*2d9fd380Sjfb8856606 	bnxt_alloc_switch_domain(bp);
5299*2d9fd380Sjfb8856606 
53004418919fSjohnjiang 	PMD_DRV_LOG(INFO,
53014418919fSjohnjiang 		    DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
53024418919fSjohnjiang 		    pci_dev->mem_resource[0].phys_addr,
53034418919fSjohnjiang 		    pci_dev->mem_resource[0].addr);
5304a9643ea8Slogwang 
5305a9643ea8Slogwang 	return 0;
5306a9643ea8Slogwang 
5307a9643ea8Slogwang error_free:
53082bfe3f2eSlogwang 	bnxt_dev_uninit(eth_dev);
53094418919fSjohnjiang 	return rc;
53104418919fSjohnjiang }
53114418919fSjohnjiang 
5312*2d9fd380Sjfb8856606 
bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info * ctx)5313*2d9fd380Sjfb8856606 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx)
5314*2d9fd380Sjfb8856606 {
5315*2d9fd380Sjfb8856606 	if (!ctx)
5316*2d9fd380Sjfb8856606 		return;
5317*2d9fd380Sjfb8856606 
5318*2d9fd380Sjfb8856606 	if (ctx->va)
5319*2d9fd380Sjfb8856606 		rte_free(ctx->va);
5320*2d9fd380Sjfb8856606 
5321*2d9fd380Sjfb8856606 	ctx->va = NULL;
5322*2d9fd380Sjfb8856606 	ctx->dma = RTE_BAD_IOVA;
5323*2d9fd380Sjfb8856606 	ctx->ctx_id = BNXT_CTX_VAL_INVAL;
5324*2d9fd380Sjfb8856606 }
5325*2d9fd380Sjfb8856606 
bnxt_unregister_fc_ctx_mem(struct bnxt * bp)5326*2d9fd380Sjfb8856606 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp)
5327*2d9fd380Sjfb8856606 {
5328*2d9fd380Sjfb8856606 	bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
5329*2d9fd380Sjfb8856606 				  CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5330*2d9fd380Sjfb8856606 				  bp->flow_stat->rx_fc_out_tbl.ctx_id,
5331*2d9fd380Sjfb8856606 				  bp->flow_stat->max_fc,
5332*2d9fd380Sjfb8856606 				  false);
5333*2d9fd380Sjfb8856606 
5334*2d9fd380Sjfb8856606 	bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
5335*2d9fd380Sjfb8856606 				  CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5336*2d9fd380Sjfb8856606 				  bp->flow_stat->tx_fc_out_tbl.ctx_id,
5337*2d9fd380Sjfb8856606 				  bp->flow_stat->max_fc,
5338*2d9fd380Sjfb8856606 				  false);
5339*2d9fd380Sjfb8856606 
5340*2d9fd380Sjfb8856606 	if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5341*2d9fd380Sjfb8856606 		bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id);
5342*2d9fd380Sjfb8856606 	bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5343*2d9fd380Sjfb8856606 
5344*2d9fd380Sjfb8856606 	if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5345*2d9fd380Sjfb8856606 		bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id);
5346*2d9fd380Sjfb8856606 	bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5347*2d9fd380Sjfb8856606 
5348*2d9fd380Sjfb8856606 	if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5349*2d9fd380Sjfb8856606 		bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id);
5350*2d9fd380Sjfb8856606 	bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5351*2d9fd380Sjfb8856606 
5352*2d9fd380Sjfb8856606 	if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5353*2d9fd380Sjfb8856606 		bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id);
5354*2d9fd380Sjfb8856606 	bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5355*2d9fd380Sjfb8856606 }
5356*2d9fd380Sjfb8856606 
bnxt_uninit_fc_ctx_mem(struct bnxt * bp)5357*2d9fd380Sjfb8856606 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp)
5358*2d9fd380Sjfb8856606 {
5359*2d9fd380Sjfb8856606 	bnxt_unregister_fc_ctx_mem(bp);
5360*2d9fd380Sjfb8856606 
5361*2d9fd380Sjfb8856606 	bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl);
5362*2d9fd380Sjfb8856606 	bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl);
5363*2d9fd380Sjfb8856606 	bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl);
5364*2d9fd380Sjfb8856606 	bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl);
5365*2d9fd380Sjfb8856606 }
5366*2d9fd380Sjfb8856606 
bnxt_uninit_ctx_mem(struct bnxt * bp)5367*2d9fd380Sjfb8856606 static void bnxt_uninit_ctx_mem(struct bnxt *bp)
5368*2d9fd380Sjfb8856606 {
5369*2d9fd380Sjfb8856606 	if (BNXT_FLOW_XSTATS_EN(bp))
5370*2d9fd380Sjfb8856606 		bnxt_uninit_fc_ctx_mem(bp);
5371*2d9fd380Sjfb8856606 }
5372*2d9fd380Sjfb8856606 
5373*2d9fd380Sjfb8856606 static void
bnxt_free_error_recovery_info(struct bnxt * bp)5374*2d9fd380Sjfb8856606 bnxt_free_error_recovery_info(struct bnxt *bp)
5375*2d9fd380Sjfb8856606 {
5376*2d9fd380Sjfb8856606 	rte_free(bp->recovery_info);
5377*2d9fd380Sjfb8856606 	bp->recovery_info = NULL;
5378*2d9fd380Sjfb8856606 	bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5379*2d9fd380Sjfb8856606 }
5380*2d9fd380Sjfb8856606 
53814418919fSjohnjiang static void
bnxt_uninit_locks(struct bnxt * bp)53824418919fSjohnjiang bnxt_uninit_locks(struct bnxt *bp)
53834418919fSjohnjiang {
53844418919fSjohnjiang 	pthread_mutex_destroy(&bp->flow_lock);
53854418919fSjohnjiang 	pthread_mutex_destroy(&bp->def_cp_lock);
53860c6bd470Sfengbojiang 	pthread_mutex_destroy(&bp->health_check_lock);
5387*2d9fd380Sjfb8856606 	if (bp->rep_info) {
5388*2d9fd380Sjfb8856606 		pthread_mutex_destroy(&bp->rep_info->vfr_lock);
5389*2d9fd380Sjfb8856606 		pthread_mutex_destroy(&bp->rep_info->vfr_start_lock);
5390*2d9fd380Sjfb8856606 	}
53914418919fSjohnjiang }
53924418919fSjohnjiang 
53934418919fSjohnjiang static int
bnxt_uninit_resources(struct bnxt * bp,bool reconfig_dev)53944418919fSjohnjiang bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
53954418919fSjohnjiang {
53964418919fSjohnjiang 	int rc;
53974418919fSjohnjiang 
53984418919fSjohnjiang 	bnxt_free_int(bp);
53994418919fSjohnjiang 	bnxt_free_mem(bp, reconfig_dev);
5400*2d9fd380Sjfb8856606 
54014418919fSjohnjiang 	bnxt_hwrm_func_buf_unrgtr(bp);
5402*2d9fd380Sjfb8856606 	rte_free(bp->pf->vf_req_buf);
5403*2d9fd380Sjfb8856606 
54044418919fSjohnjiang 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
54054418919fSjohnjiang 	bp->flags &= ~BNXT_FLAG_REGISTERED;
54064418919fSjohnjiang 	bnxt_free_ctx_mem(bp);
54074418919fSjohnjiang 	if (!reconfig_dev) {
54084418919fSjohnjiang 		bnxt_free_hwrm_resources(bp);
5409*2d9fd380Sjfb8856606 		bnxt_free_error_recovery_info(bp);
5410*2d9fd380Sjfb8856606 	}
54114418919fSjohnjiang 
5412*2d9fd380Sjfb8856606 	bnxt_uninit_ctx_mem(bp);
54134418919fSjohnjiang 
54144418919fSjohnjiang 	bnxt_uninit_locks(bp);
5415*2d9fd380Sjfb8856606 	bnxt_free_flow_stats_info(bp);
5416*2d9fd380Sjfb8856606 	bnxt_free_rep_info(bp);
54174418919fSjohnjiang 	rte_free(bp->ptp_cfg);
54184418919fSjohnjiang 	bp->ptp_cfg = NULL;
5419a9643ea8Slogwang 	return rc;
5420a9643ea8Slogwang }
5421a9643ea8Slogwang 
5422a9643ea8Slogwang static int
bnxt_dev_uninit(struct rte_eth_dev * eth_dev)5423d30ea906Sjfb8856606 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
5424d30ea906Sjfb8856606 {
54252bfe3f2eSlogwang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
54262bfe3f2eSlogwang 		return -EPERM;
54272bfe3f2eSlogwang 
5428d30ea906Sjfb8856606 	PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
54294b05018fSfengbojiang 
5430*2d9fd380Sjfb8856606 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
5431a9643ea8Slogwang 		bnxt_dev_close_op(eth_dev);
5432*2d9fd380Sjfb8856606 
5433*2d9fd380Sjfb8856606 	return 0;
5434*2d9fd380Sjfb8856606 }
5435*2d9fd380Sjfb8856606 
bnxt_pci_remove_dev_with_reps(struct rte_eth_dev * eth_dev)5436*2d9fd380Sjfb8856606 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev)
5437*2d9fd380Sjfb8856606 {
5438*2d9fd380Sjfb8856606 	struct bnxt *bp = eth_dev->data->dev_private;
5439*2d9fd380Sjfb8856606 	struct rte_eth_dev *vf_rep_eth_dev;
5440*2d9fd380Sjfb8856606 	int ret = 0, i;
5441*2d9fd380Sjfb8856606 
5442*2d9fd380Sjfb8856606 	if (!bp)
5443*2d9fd380Sjfb8856606 		return -EINVAL;
5444*2d9fd380Sjfb8856606 
5445*2d9fd380Sjfb8856606 	for (i = 0; i < bp->num_reps; i++) {
5446*2d9fd380Sjfb8856606 		vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev;
5447*2d9fd380Sjfb8856606 		if (!vf_rep_eth_dev)
5448*2d9fd380Sjfb8856606 			continue;
5449*2d9fd380Sjfb8856606 		PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n",
5450*2d9fd380Sjfb8856606 			    vf_rep_eth_dev->data->port_id);
5451*2d9fd380Sjfb8856606 		rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit);
5452*2d9fd380Sjfb8856606 	}
5453*2d9fd380Sjfb8856606 	PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n",
5454*2d9fd380Sjfb8856606 		    eth_dev->data->port_id);
5455*2d9fd380Sjfb8856606 	ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit);
5456*2d9fd380Sjfb8856606 
5457*2d9fd380Sjfb8856606 	return ret;
5458*2d9fd380Sjfb8856606 }
5459*2d9fd380Sjfb8856606 
bnxt_free_rep_info(struct bnxt * bp)5460*2d9fd380Sjfb8856606 static void bnxt_free_rep_info(struct bnxt *bp)
5461*2d9fd380Sjfb8856606 {
5462*2d9fd380Sjfb8856606 	rte_free(bp->rep_info);
5463*2d9fd380Sjfb8856606 	bp->rep_info = NULL;
5464*2d9fd380Sjfb8856606 	rte_free(bp->cfa_code_map);
5465*2d9fd380Sjfb8856606 	bp->cfa_code_map = NULL;
5466*2d9fd380Sjfb8856606 }
5467*2d9fd380Sjfb8856606 
bnxt_init_rep_info(struct bnxt * bp)5468*2d9fd380Sjfb8856606 static int bnxt_init_rep_info(struct bnxt *bp)
5469*2d9fd380Sjfb8856606 {
5470*2d9fd380Sjfb8856606 	int i = 0, rc;
5471*2d9fd380Sjfb8856606 
5472*2d9fd380Sjfb8856606 	if (bp->rep_info)
5473*2d9fd380Sjfb8856606 		return 0;
5474*2d9fd380Sjfb8856606 
5475*2d9fd380Sjfb8856606 	bp->rep_info = rte_zmalloc("bnxt_rep_info",
5476*2d9fd380Sjfb8856606 				   sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS,
5477*2d9fd380Sjfb8856606 				   0);
5478*2d9fd380Sjfb8856606 	if (!bp->rep_info) {
5479*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
5480*2d9fd380Sjfb8856606 		return -ENOMEM;
5481*2d9fd380Sjfb8856606 	}
5482*2d9fd380Sjfb8856606 	bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map",
5483*2d9fd380Sjfb8856606 				       sizeof(*bp->cfa_code_map) *
5484*2d9fd380Sjfb8856606 				       BNXT_MAX_CFA_CODE, 0);
5485*2d9fd380Sjfb8856606 	if (!bp->cfa_code_map) {
5486*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n");
5487*2d9fd380Sjfb8856606 		bnxt_free_rep_info(bp);
5488*2d9fd380Sjfb8856606 		return -ENOMEM;
5489*2d9fd380Sjfb8856606 	}
5490*2d9fd380Sjfb8856606 
5491*2d9fd380Sjfb8856606 	for (i = 0; i < BNXT_MAX_CFA_CODE; i++)
5492*2d9fd380Sjfb8856606 		bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID;
5493*2d9fd380Sjfb8856606 
5494*2d9fd380Sjfb8856606 	rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL);
5495*2d9fd380Sjfb8856606 	if (rc) {
5496*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n");
5497*2d9fd380Sjfb8856606 		bnxt_free_rep_info(bp);
5498*2d9fd380Sjfb8856606 		return rc;
5499*2d9fd380Sjfb8856606 	}
5500*2d9fd380Sjfb8856606 
5501*2d9fd380Sjfb8856606 	rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL);
5502*2d9fd380Sjfb8856606 	if (rc) {
5503*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n");
5504*2d9fd380Sjfb8856606 		bnxt_free_rep_info(bp);
5505*2d9fd380Sjfb8856606 		return rc;
5506*2d9fd380Sjfb8856606 	}
55072bfe3f2eSlogwang 
5508a9643ea8Slogwang 	return rc;
5509a9643ea8Slogwang }
5510a9643ea8Slogwang 
bnxt_rep_port_probe(struct rte_pci_device * pci_dev,struct rte_eth_devargs * eth_da,struct rte_eth_dev * backing_eth_dev,const char * dev_args)5511*2d9fd380Sjfb8856606 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
5512*2d9fd380Sjfb8856606 			       struct rte_eth_devargs *eth_da,
5513*2d9fd380Sjfb8856606 			       struct rte_eth_dev *backing_eth_dev,
5514*2d9fd380Sjfb8856606 			       const char *dev_args)
5515*2d9fd380Sjfb8856606 {
5516*2d9fd380Sjfb8856606 	struct rte_eth_dev *vf_rep_eth_dev;
5517*2d9fd380Sjfb8856606 	char name[RTE_ETH_NAME_MAX_LEN];
5518*2d9fd380Sjfb8856606 	struct bnxt *backing_bp;
5519*2d9fd380Sjfb8856606 	uint16_t num_rep;
5520*2d9fd380Sjfb8856606 	int i, ret = 0;
5521*2d9fd380Sjfb8856606 	struct rte_kvargs *kvlist = NULL;
5522*2d9fd380Sjfb8856606 
5523*2d9fd380Sjfb8856606 	num_rep = eth_da->nb_representor_ports;
5524*2d9fd380Sjfb8856606 	if (num_rep > BNXT_MAX_VF_REPS) {
5525*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
5526*2d9fd380Sjfb8856606 			    num_rep, BNXT_MAX_VF_REPS);
5527*2d9fd380Sjfb8856606 		return -EINVAL;
5528*2d9fd380Sjfb8856606 	}
5529*2d9fd380Sjfb8856606 
5530*2d9fd380Sjfb8856606 	if (num_rep >= RTE_MAX_ETHPORTS) {
5531*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5532*2d9fd380Sjfb8856606 			    "nb_representor_ports = %d > %d MAX ETHPORTS\n",
5533*2d9fd380Sjfb8856606 			    num_rep, RTE_MAX_ETHPORTS);
5534*2d9fd380Sjfb8856606 		return -EINVAL;
5535*2d9fd380Sjfb8856606 	}
5536*2d9fd380Sjfb8856606 
5537*2d9fd380Sjfb8856606 	backing_bp = backing_eth_dev->data->dev_private;
5538*2d9fd380Sjfb8856606 
5539*2d9fd380Sjfb8856606 	if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
5540*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
5541*2d9fd380Sjfb8856606 			    "Not a PF or trusted VF. No Representor support\n");
5542*2d9fd380Sjfb8856606 		/* Returning an error is not an option.
5543*2d9fd380Sjfb8856606 		 * Applications are not handling this correctly
5544*2d9fd380Sjfb8856606 		 */
5545*2d9fd380Sjfb8856606 		return 0;
5546*2d9fd380Sjfb8856606 	}
5547*2d9fd380Sjfb8856606 
5548*2d9fd380Sjfb8856606 	if (bnxt_init_rep_info(backing_bp))
5549*2d9fd380Sjfb8856606 		return 0;
5550*2d9fd380Sjfb8856606 
5551*2d9fd380Sjfb8856606 	for (i = 0; i < num_rep; i++) {
5552*2d9fd380Sjfb8856606 		struct bnxt_representor representor = {
5553*2d9fd380Sjfb8856606 			.vf_id = eth_da->representor_ports[i],
5554*2d9fd380Sjfb8856606 			.switch_domain_id = backing_bp->switch_domain_id,
5555*2d9fd380Sjfb8856606 			.parent_dev = backing_eth_dev
5556*2d9fd380Sjfb8856606 		};
5557*2d9fd380Sjfb8856606 
5558*2d9fd380Sjfb8856606 		if (representor.vf_id >= BNXT_MAX_VF_REPS) {
5559*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n",
5560*2d9fd380Sjfb8856606 				    representor.vf_id, BNXT_MAX_VF_REPS);
5561*2d9fd380Sjfb8856606 			continue;
5562*2d9fd380Sjfb8856606 		}
5563*2d9fd380Sjfb8856606 
5564*2d9fd380Sjfb8856606 		/* representor port net_bdf_port */
5565*2d9fd380Sjfb8856606 		snprintf(name, sizeof(name), "net_%s_representor_%d",
5566*2d9fd380Sjfb8856606 			 pci_dev->device.name, eth_da->representor_ports[i]);
5567*2d9fd380Sjfb8856606 
5568*2d9fd380Sjfb8856606 		kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args);
5569*2d9fd380Sjfb8856606 		if (kvlist) {
5570*2d9fd380Sjfb8856606 			/*
5571*2d9fd380Sjfb8856606 			 * Handler for "rep_is_pf" devarg.
5572*2d9fd380Sjfb8856606 			 * Invoked as for ex: "-a 000:00:0d.0,
5573*2d9fd380Sjfb8856606 			 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5574*2d9fd380Sjfb8856606 			 */
5575*2d9fd380Sjfb8856606 			ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF,
5576*2d9fd380Sjfb8856606 						 bnxt_parse_devarg_rep_is_pf,
5577*2d9fd380Sjfb8856606 						 (void *)&representor);
5578*2d9fd380Sjfb8856606 			if (ret) {
5579*2d9fd380Sjfb8856606 				ret = -EINVAL;
5580*2d9fd380Sjfb8856606 				goto err;
5581*2d9fd380Sjfb8856606 			}
5582*2d9fd380Sjfb8856606 			/*
5583*2d9fd380Sjfb8856606 			 * Handler for "rep_based_pf" devarg.
5584*2d9fd380Sjfb8856606 			 * Invoked as for ex: "-a 000:00:0d.0,
5585*2d9fd380Sjfb8856606 			 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5586*2d9fd380Sjfb8856606 			 */
5587*2d9fd380Sjfb8856606 			ret = rte_kvargs_process(kvlist,
5588*2d9fd380Sjfb8856606 						 BNXT_DEVARG_REP_BASED_PF,
5589*2d9fd380Sjfb8856606 						 bnxt_parse_devarg_rep_based_pf,
5590*2d9fd380Sjfb8856606 						 (void *)&representor);
5591*2d9fd380Sjfb8856606 			if (ret) {
5592*2d9fd380Sjfb8856606 				ret = -EINVAL;
5593*2d9fd380Sjfb8856606 				goto err;
5594*2d9fd380Sjfb8856606 			}
5595*2d9fd380Sjfb8856606 			/*
5596*2d9fd380Sjfb8856606 			 * Handler for "rep_based_pf" devarg.
5597*2d9fd380Sjfb8856606 			 * Invoked as for ex: "-a 000:00:0d.0,
5598*2d9fd380Sjfb8856606 			 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5599*2d9fd380Sjfb8856606 			 */
5600*2d9fd380Sjfb8856606 			ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F,
5601*2d9fd380Sjfb8856606 						 bnxt_parse_devarg_rep_q_r2f,
5602*2d9fd380Sjfb8856606 						 (void *)&representor);
5603*2d9fd380Sjfb8856606 			if (ret) {
5604*2d9fd380Sjfb8856606 				ret = -EINVAL;
5605*2d9fd380Sjfb8856606 				goto err;
5606*2d9fd380Sjfb8856606 			}
5607*2d9fd380Sjfb8856606 			/*
5608*2d9fd380Sjfb8856606 			 * Handler for "rep_based_pf" devarg.
5609*2d9fd380Sjfb8856606 			 * Invoked as for ex: "-a 000:00:0d.0,
5610*2d9fd380Sjfb8856606 			 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5611*2d9fd380Sjfb8856606 			 */
5612*2d9fd380Sjfb8856606 			ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R,
5613*2d9fd380Sjfb8856606 						 bnxt_parse_devarg_rep_q_f2r,
5614*2d9fd380Sjfb8856606 						 (void *)&representor);
5615*2d9fd380Sjfb8856606 			if (ret) {
5616*2d9fd380Sjfb8856606 				ret = -EINVAL;
5617*2d9fd380Sjfb8856606 				goto err;
5618*2d9fd380Sjfb8856606 			}
5619*2d9fd380Sjfb8856606 			/*
5620*2d9fd380Sjfb8856606 			 * Handler for "rep_based_pf" devarg.
5621*2d9fd380Sjfb8856606 			 * Invoked as for ex: "-a 000:00:0d.0,
5622*2d9fd380Sjfb8856606 			 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5623*2d9fd380Sjfb8856606 			 */
5624*2d9fd380Sjfb8856606 			ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F,
5625*2d9fd380Sjfb8856606 						 bnxt_parse_devarg_rep_fc_r2f,
5626*2d9fd380Sjfb8856606 						 (void *)&representor);
5627*2d9fd380Sjfb8856606 			if (ret) {
5628*2d9fd380Sjfb8856606 				ret = -EINVAL;
5629*2d9fd380Sjfb8856606 				goto err;
5630*2d9fd380Sjfb8856606 			}
5631*2d9fd380Sjfb8856606 			/*
5632*2d9fd380Sjfb8856606 			 * Handler for "rep_based_pf" devarg.
5633*2d9fd380Sjfb8856606 			 * Invoked as for ex: "-a 000:00:0d.0,
5634*2d9fd380Sjfb8856606 			 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
5635*2d9fd380Sjfb8856606 			 */
5636*2d9fd380Sjfb8856606 			ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R,
5637*2d9fd380Sjfb8856606 						 bnxt_parse_devarg_rep_fc_f2r,
5638*2d9fd380Sjfb8856606 						 (void *)&representor);
5639*2d9fd380Sjfb8856606 			if (ret) {
5640*2d9fd380Sjfb8856606 				ret = -EINVAL;
5641*2d9fd380Sjfb8856606 				goto err;
5642*2d9fd380Sjfb8856606 			}
5643*2d9fd380Sjfb8856606 		}
5644*2d9fd380Sjfb8856606 
5645*2d9fd380Sjfb8856606 		ret = rte_eth_dev_create(&pci_dev->device, name,
5646*2d9fd380Sjfb8856606 					 sizeof(struct bnxt_representor),
5647*2d9fd380Sjfb8856606 					 NULL, NULL,
5648*2d9fd380Sjfb8856606 					 bnxt_representor_init,
5649*2d9fd380Sjfb8856606 					 &representor);
5650*2d9fd380Sjfb8856606 		if (ret) {
5651*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR, "failed to create bnxt vf "
5652*2d9fd380Sjfb8856606 				    "representor %s.", name);
5653*2d9fd380Sjfb8856606 			goto err;
5654*2d9fd380Sjfb8856606 		}
5655*2d9fd380Sjfb8856606 
5656*2d9fd380Sjfb8856606 		vf_rep_eth_dev = rte_eth_dev_allocated(name);
5657*2d9fd380Sjfb8856606 		if (!vf_rep_eth_dev) {
5658*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR, "Failed to find the eth_dev"
5659*2d9fd380Sjfb8856606 				    " for VF-Rep: %s.", name);
5660*2d9fd380Sjfb8856606 			ret = -ENODEV;
5661*2d9fd380Sjfb8856606 			goto err;
5662*2d9fd380Sjfb8856606 		}
5663*2d9fd380Sjfb8856606 
5664*2d9fd380Sjfb8856606 		PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n",
5665*2d9fd380Sjfb8856606 			    backing_eth_dev->data->port_id);
5666*2d9fd380Sjfb8856606 		backing_bp->rep_info[representor.vf_id].vfr_eth_dev =
5667*2d9fd380Sjfb8856606 							 vf_rep_eth_dev;
5668*2d9fd380Sjfb8856606 		backing_bp->num_reps++;
5669*2d9fd380Sjfb8856606 
5670*2d9fd380Sjfb8856606 	}
5671*2d9fd380Sjfb8856606 
5672*2d9fd380Sjfb8856606 	rte_kvargs_free(kvlist);
5673*2d9fd380Sjfb8856606 	return 0;
5674*2d9fd380Sjfb8856606 
5675*2d9fd380Sjfb8856606 err:
5676*2d9fd380Sjfb8856606 	/* If num_rep > 1, then rollback already created
5677*2d9fd380Sjfb8856606 	 * ports, since we'll be failing the probe anyway
5678*2d9fd380Sjfb8856606 	 */
5679*2d9fd380Sjfb8856606 	if (num_rep > 1)
5680*2d9fd380Sjfb8856606 		bnxt_pci_remove_dev_with_reps(backing_eth_dev);
5681*2d9fd380Sjfb8856606 	rte_errno = -ret;
5682*2d9fd380Sjfb8856606 	rte_kvargs_free(kvlist);
5683*2d9fd380Sjfb8856606 
5684*2d9fd380Sjfb8856606 	return ret;
5685*2d9fd380Sjfb8856606 }
5686*2d9fd380Sjfb8856606 
bnxt_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)56872bfe3f2eSlogwang static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
56882bfe3f2eSlogwang 			  struct rte_pci_device *pci_dev)
5689a9643ea8Slogwang {
5690*2d9fd380Sjfb8856606 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
5691*2d9fd380Sjfb8856606 	struct rte_eth_dev *backing_eth_dev;
5692*2d9fd380Sjfb8856606 	uint16_t num_rep;
5693*2d9fd380Sjfb8856606 	int ret = 0;
5694*2d9fd380Sjfb8856606 
5695*2d9fd380Sjfb8856606 	if (pci_dev->device.devargs) {
5696*2d9fd380Sjfb8856606 		ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
5697*2d9fd380Sjfb8856606 					    &eth_da);
5698*2d9fd380Sjfb8856606 		if (ret)
5699*2d9fd380Sjfb8856606 			return ret;
5700*2d9fd380Sjfb8856606 	}
5701*2d9fd380Sjfb8856606 
5702*2d9fd380Sjfb8856606 	num_rep = eth_da.nb_representor_ports;
5703*2d9fd380Sjfb8856606 	PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
5704*2d9fd380Sjfb8856606 		    num_rep);
5705*2d9fd380Sjfb8856606 
5706*2d9fd380Sjfb8856606 	/* We could come here after first level of probe is already invoked
5707*2d9fd380Sjfb8856606 	 * as part of an application bringup(OVS-DPDK vswitchd), so first check
5708*2d9fd380Sjfb8856606 	 * for already allocated eth_dev for the backing device (PF/Trusted VF)
5709*2d9fd380Sjfb8856606 	 */
5710*2d9fd380Sjfb8856606 	backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5711*2d9fd380Sjfb8856606 	if (backing_eth_dev == NULL) {
5712*2d9fd380Sjfb8856606 		ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
5713*2d9fd380Sjfb8856606 					 sizeof(struct bnxt),
5714*2d9fd380Sjfb8856606 					 eth_dev_pci_specific_init, pci_dev,
5715*2d9fd380Sjfb8856606 					 bnxt_dev_init, NULL);
5716*2d9fd380Sjfb8856606 
5717*2d9fd380Sjfb8856606 		if (ret || !num_rep)
5718*2d9fd380Sjfb8856606 			return ret;
5719*2d9fd380Sjfb8856606 
5720*2d9fd380Sjfb8856606 		backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5721*2d9fd380Sjfb8856606 	}
5722*2d9fd380Sjfb8856606 	PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n",
5723*2d9fd380Sjfb8856606 		    backing_eth_dev->data->port_id);
5724*2d9fd380Sjfb8856606 
5725*2d9fd380Sjfb8856606 	if (!num_rep)
5726*2d9fd380Sjfb8856606 		return ret;
5727*2d9fd380Sjfb8856606 
5728*2d9fd380Sjfb8856606 	/* probe representor ports now */
5729*2d9fd380Sjfb8856606 	ret = bnxt_rep_port_probe(pci_dev, &eth_da, backing_eth_dev,
5730*2d9fd380Sjfb8856606 				  pci_dev->device.devargs->args);
5731*2d9fd380Sjfb8856606 
5732*2d9fd380Sjfb8856606 	return ret;
5733a9643ea8Slogwang }
5734a9643ea8Slogwang 
bnxt_pci_remove(struct rte_pci_device * pci_dev)57352bfe3f2eSlogwang static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
57362bfe3f2eSlogwang {
5737*2d9fd380Sjfb8856606 	struct rte_eth_dev *eth_dev;
5738*2d9fd380Sjfb8856606 
5739*2d9fd380Sjfb8856606 	eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
5740*2d9fd380Sjfb8856606 	if (!eth_dev)
5741*2d9fd380Sjfb8856606 		return 0; /* Invoked typically only by OVS-DPDK, by the
5742*2d9fd380Sjfb8856606 			   * time it comes here the eth_dev is already
5743*2d9fd380Sjfb8856606 			   * deleted by rte_eth_dev_close(), so returning
5744*2d9fd380Sjfb8856606 			   * +ve value will at least help in proper cleanup
5745*2d9fd380Sjfb8856606 			   */
5746*2d9fd380Sjfb8856606 
5747*2d9fd380Sjfb8856606 	PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id);
5748*2d9fd380Sjfb8856606 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
5749*2d9fd380Sjfb8856606 		if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
5750*2d9fd380Sjfb8856606 			return rte_eth_dev_destroy(eth_dev,
5751*2d9fd380Sjfb8856606 						   bnxt_representor_uninit);
5752d30ea906Sjfb8856606 		else
5753*2d9fd380Sjfb8856606 			return rte_eth_dev_destroy(eth_dev,
5754*2d9fd380Sjfb8856606 						   bnxt_dev_uninit);
5755*2d9fd380Sjfb8856606 	} else {
5756d30ea906Sjfb8856606 		return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
57572bfe3f2eSlogwang 	}
5758*2d9fd380Sjfb8856606 }
57592bfe3f2eSlogwang 
57602bfe3f2eSlogwang static struct rte_pci_driver bnxt_rte_pmd = {
57612bfe3f2eSlogwang 	.id_table = bnxt_pci_id_map,
5762*2d9fd380Sjfb8856606 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
5763*2d9fd380Sjfb8856606 			RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs
5764*2d9fd380Sjfb8856606 						  * and OVS-DPDK
5765*2d9fd380Sjfb8856606 						  */
57662bfe3f2eSlogwang 	.probe = bnxt_pci_probe,
57672bfe3f2eSlogwang 	.remove = bnxt_pci_remove,
5768a9643ea8Slogwang };
5769a9643ea8Slogwang 
57702bfe3f2eSlogwang static bool
is_device_supported(struct rte_eth_dev * dev,struct rte_pci_driver * drv)57712bfe3f2eSlogwang is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
57722bfe3f2eSlogwang {
57732bfe3f2eSlogwang 	if (strcmp(dev->device->driver->name, drv->driver.name))
57742bfe3f2eSlogwang 		return false;
57752bfe3f2eSlogwang 
57762bfe3f2eSlogwang 	return true;
57772bfe3f2eSlogwang }
57782bfe3f2eSlogwang 
is_bnxt_supported(struct rte_eth_dev * dev)57792bfe3f2eSlogwang bool is_bnxt_supported(struct rte_eth_dev *dev)
57802bfe3f2eSlogwang {
57812bfe3f2eSlogwang 	return is_device_supported(dev, &bnxt_rte_pmd);
57822bfe3f2eSlogwang }
57832bfe3f2eSlogwang 
5784*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE);
57852bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
57862bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
57872bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
5788