xref: /f-stack/dpdk/drivers/net/bnxt/bnxt_hwrm.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <unistd.h>
7 
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15 
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26 
27 #define HWRM_SPEC_CODE_1_8_3		0x10803
28 #define HWRM_VERSION_1_9_1		0x10901
29 #define HWRM_VERSION_1_9_2		0x10903
30 
31 struct bnxt_plcmodes_cfg {
32 	uint32_t	flags;
33 	uint16_t	jumbo_thresh;
34 	uint16_t	hds_offset;
35 	uint16_t	hds_threshold;
36 };
37 
page_getenum(size_t size)38 static int page_getenum(size_t size)
39 {
40 	if (size <= 1 << 4)
41 		return 4;
42 	if (size <= 1 << 12)
43 		return 12;
44 	if (size <= 1 << 13)
45 		return 13;
46 	if (size <= 1 << 16)
47 		return 16;
48 	if (size <= 1 << 21)
49 		return 21;
50 	if (size <= 1 << 22)
51 		return 22;
52 	if (size <= 1 << 30)
53 		return 30;
54 	PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55 	return sizeof(int) * 8 - 1;
56 }
57 
page_roundup(size_t size)58 static int page_roundup(size_t size)
59 {
60 	return 1 << page_getenum(size);
61 }
62 
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,uint8_t * pg_attr,uint64_t * pg_dir)63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64 				  uint8_t *pg_attr,
65 				  uint64_t *pg_dir)
66 {
67 	if (rmem->nr_pages > 1) {
68 		*pg_attr = 1;
69 		*pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
70 	} else {
71 		*pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
72 	}
73 }
74 
75 /*
76  * HWRM Functions (sent to HWRM)
77  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
78  * HWRM command times out, or a negative error code if the HWRM
79  * command was failed by the FW.
80  */
81 
bnxt_hwrm_send_message(struct bnxt * bp,void * msg,uint32_t msg_len,bool use_kong_mb)82 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
83 				  uint32_t msg_len, bool use_kong_mb)
84 {
85 	unsigned int i;
86 	struct input *req = msg;
87 	struct output *resp = bp->hwrm_cmd_resp_addr;
88 	uint32_t *data = msg;
89 	uint8_t *bar;
90 	uint8_t *valid;
91 	uint16_t max_req_len = bp->max_req_len;
92 	struct hwrm_short_input short_input = { 0 };
93 	uint16_t bar_offset = use_kong_mb ?
94 		GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
95 	uint16_t mb_trigger_offset = use_kong_mb ?
96 		GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
97 	uint32_t timeout;
98 
99 	/* Do not send HWRM commands to firmware in error state */
100 	if (bp->flags & BNXT_FLAG_FATAL_ERROR)
101 		return 0;
102 
103 	timeout = bp->hwrm_cmd_timeout;
104 
105 	if (bp->flags & BNXT_FLAG_SHORT_CMD ||
106 	    msg_len > bp->max_req_len) {
107 		void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
108 
109 		memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
110 		memcpy(short_cmd_req, req, msg_len);
111 
112 		short_input.req_type = rte_cpu_to_le_16(req->req_type);
113 		short_input.signature = rte_cpu_to_le_16(
114 					HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
115 		short_input.size = rte_cpu_to_le_16(msg_len);
116 		short_input.req_addr =
117 			rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
118 
119 		data = (uint32_t *)&short_input;
120 		msg_len = sizeof(short_input);
121 
122 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
123 	}
124 
125 	/* Write request msg to hwrm channel */
126 	for (i = 0; i < msg_len; i += 4) {
127 		bar = (uint8_t *)bp->bar0 + bar_offset + i;
128 		rte_write32(*data, bar);
129 		data++;
130 	}
131 
132 	/* Zero the rest of the request space */
133 	for (; i < max_req_len; i += 4) {
134 		bar = (uint8_t *)bp->bar0 + bar_offset + i;
135 		rte_write32(0, bar);
136 	}
137 
138 	/* Ring channel doorbell */
139 	bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
140 	rte_write32(1, bar);
141 	/*
142 	 * Make sure the channel doorbell ring command complete before
143 	 * reading the response to avoid getting stale or invalid
144 	 * responses.
145 	 */
146 	rte_io_mb();
147 
148 	/* Poll for the valid bit */
149 	for (i = 0; i < timeout; i++) {
150 		/* Sanity check on the resp->resp_len */
151 		rte_io_rmb();
152 		if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
153 			/* Last byte of resp contains the valid key */
154 			valid = (uint8_t *)resp + resp->resp_len - 1;
155 			if (*valid == HWRM_RESP_VALID_KEY)
156 				break;
157 		}
158 		rte_delay_us(1);
159 	}
160 
161 	if (i >= timeout) {
162 		/* Suppress VER_GET timeout messages during reset recovery */
163 		if (bp->flags & BNXT_FLAG_FW_RESET &&
164 		    rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
165 			return -ETIMEDOUT;
166 
167 		PMD_DRV_LOG(ERR,
168 			    "Error(timeout) sending msg 0x%04x, seq_id %d\n",
169 			    req->req_type, req->seq_id);
170 		return -ETIMEDOUT;
171 	}
172 	return 0;
173 }
174 
175 /*
176  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
177  * spinlock, and does initial processing.
178  *
179  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
180  * releases the spinlock only if it returns. If the regular int return codes
181  * are not used by the function, HWRM_CHECK_RESULT() should not be used
182  * directly, rather it should be copied and modified to suit the function.
183  *
184  * HWRM_UNLOCK() must be called after all response processing is completed.
185  */
186 #define HWRM_PREP(req, type, kong) do {	\
187 	rte_spinlock_lock(&bp->hwrm_lock); \
188 	if (bp->hwrm_cmd_resp_addr == NULL) { \
189 		rte_spinlock_unlock(&bp->hwrm_lock); \
190 		return -EACCES; \
191 	} \
192 	memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
193 	(req)->req_type = rte_cpu_to_le_16(type); \
194 	(req)->cmpl_ring = rte_cpu_to_le_16(-1); \
195 	(req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
196 		rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
197 	(req)->target_id = rte_cpu_to_le_16(0xffff); \
198 	(req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
199 } while (0)
200 
201 #define HWRM_CHECK_RESULT_SILENT() do {\
202 	if (rc) { \
203 		rte_spinlock_unlock(&bp->hwrm_lock); \
204 		return rc; \
205 	} \
206 	if (resp->error_code) { \
207 		rc = rte_le_to_cpu_16(resp->error_code); \
208 		rte_spinlock_unlock(&bp->hwrm_lock); \
209 		return rc; \
210 	} \
211 } while (0)
212 
213 #define HWRM_CHECK_RESULT() do {\
214 	if (rc) { \
215 		PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
216 		rte_spinlock_unlock(&bp->hwrm_lock); \
217 		if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
218 			rc = -EACCES; \
219 		else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
220 			rc = -ENOSPC; \
221 		else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
222 			rc = -EINVAL; \
223 		else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
224 			rc = -ENOTSUP; \
225 		else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
226 			rc = -EAGAIN; \
227 		else if (rc > 0) \
228 			rc = -EIO; \
229 		return rc; \
230 	} \
231 	if (resp->error_code) { \
232 		rc = rte_le_to_cpu_16(resp->error_code); \
233 		if (resp->resp_len >= 16) { \
234 			struct hwrm_err_output *tmp_hwrm_err_op = \
235 						(void *)resp; \
236 			PMD_DRV_LOG(ERR, \
237 				"error %d:%d:%08x:%04x\n", \
238 				rc, tmp_hwrm_err_op->cmd_err, \
239 				rte_le_to_cpu_32(\
240 					tmp_hwrm_err_op->opaque_0), \
241 				rte_le_to_cpu_16(\
242 					tmp_hwrm_err_op->opaque_1)); \
243 		} else { \
244 			PMD_DRV_LOG(ERR, "error %d\n", rc); \
245 		} \
246 		rte_spinlock_unlock(&bp->hwrm_lock); \
247 		if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
248 			rc = -EACCES; \
249 		else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
250 			rc = -ENOSPC; \
251 		else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
252 			rc = -EINVAL; \
253 		else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
254 			rc = -ENOTSUP; \
255 		else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
256 			rc = -EAGAIN; \
257 		else if (rc > 0) \
258 			rc = -EIO; \
259 		return rc; \
260 	} \
261 } while (0)
262 
263 #define HWRM_UNLOCK()		rte_spinlock_unlock(&bp->hwrm_lock)
264 
bnxt_hwrm_tf_message_direct(struct bnxt * bp,bool use_kong_mb,uint16_t msg_type,void * msg,uint32_t msg_len,void * resp_msg,uint32_t resp_len)265 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
266 				bool use_kong_mb,
267 				uint16_t msg_type,
268 				void *msg,
269 				uint32_t msg_len,
270 				void *resp_msg,
271 				uint32_t resp_len)
272 {
273 	int rc = 0;
274 	bool mailbox = BNXT_USE_CHIMP_MB;
275 	struct input *req = msg;
276 	struct output *resp = bp->hwrm_cmd_resp_addr;
277 
278 	if (use_kong_mb)
279 		mailbox = BNXT_USE_KONG(bp);
280 
281 	HWRM_PREP(req, msg_type, mailbox);
282 
283 	rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
284 
285 	HWRM_CHECK_RESULT();
286 
287 	if (resp_msg)
288 		memcpy(resp_msg, resp, resp_len);
289 
290 	HWRM_UNLOCK();
291 
292 	return rc;
293 }
294 
bnxt_hwrm_tf_message_tunneled(struct bnxt * bp,bool use_kong_mb,uint16_t tf_type,uint16_t tf_subtype,uint32_t * tf_response_code,void * msg,uint32_t msg_len,void * response,uint32_t response_len)295 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
296 				  bool use_kong_mb,
297 				  uint16_t tf_type,
298 				  uint16_t tf_subtype,
299 				  uint32_t *tf_response_code,
300 				  void *msg,
301 				  uint32_t msg_len,
302 				  void *response,
303 				  uint32_t response_len)
304 {
305 	int rc = 0;
306 	struct hwrm_cfa_tflib_input req = { .req_type = 0 };
307 	struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
308 	bool mailbox = BNXT_USE_CHIMP_MB;
309 
310 	if (msg_len > sizeof(req.tf_req))
311 		return -ENOMEM;
312 
313 	if (use_kong_mb)
314 		mailbox = BNXT_USE_KONG(bp);
315 
316 	HWRM_PREP(&req, HWRM_TF, mailbox);
317 	/* Build request using the user supplied request payload.
318 	 * TLV request size is checked at build time against HWRM
319 	 * request max size, thus no checking required.
320 	 */
321 	req.tf_type = tf_type;
322 	req.tf_subtype = tf_subtype;
323 	memcpy(req.tf_req, msg, msg_len);
324 
325 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
326 	HWRM_CHECK_RESULT();
327 
328 	/* Copy the resp to user provided response buffer */
329 	if (response != NULL)
330 		/* Post process response data. We need to copy only
331 		 * the 'payload' as the HWRM data structure really is
332 		 * HWRM header + msg header + payload and the TFLIB
333 		 * only provided a payload place holder.
334 		 */
335 		if (response_len != 0) {
336 			memcpy(response,
337 			       resp->tf_resp,
338 			       response_len);
339 		}
340 
341 	/* Extract the internal tflib response code */
342 	*tf_response_code = resp->tf_resp_code;
343 	HWRM_UNLOCK();
344 
345 	return rc;
346 }
347 
bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt * bp,struct bnxt_vnic_info * vnic)348 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
349 {
350 	int rc = 0;
351 	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
352 	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
353 
354 	HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
355 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
356 	req.mask = 0;
357 
358 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
359 
360 	HWRM_CHECK_RESULT();
361 	HWRM_UNLOCK();
362 
363 	return rc;
364 }
365 
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,struct bnxt_vnic_info * vnic,uint16_t vlan_count,struct bnxt_vlan_table_entry * vlan_table)366 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
367 				 struct bnxt_vnic_info *vnic,
368 				 uint16_t vlan_count,
369 				 struct bnxt_vlan_table_entry *vlan_table)
370 {
371 	int rc = 0;
372 	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
373 	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
374 	uint32_t mask = 0;
375 
376 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
377 		return rc;
378 
379 	HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
380 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
381 
382 	if (vnic->flags & BNXT_VNIC_INFO_BCAST)
383 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
384 	if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
385 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
386 
387 	if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
388 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
389 
390 	if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
391 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
392 	} else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
393 		mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
394 		req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
395 		req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
396 	}
397 	if (vlan_table) {
398 		if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
399 			mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
400 		req.vlan_tag_tbl_addr =
401 			rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
402 		req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
403 	}
404 	req.mask = rte_cpu_to_le_32(mask);
405 
406 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
407 
408 	HWRM_CHECK_RESULT();
409 	HWRM_UNLOCK();
410 
411 	return rc;
412 }
413 
bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt * bp,uint16_t fid,uint16_t vlan_count,struct bnxt_vlan_antispoof_table_entry * vlan_table)414 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
415 			uint16_t vlan_count,
416 			struct bnxt_vlan_antispoof_table_entry *vlan_table)
417 {
418 	int rc = 0;
419 	struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
420 	struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
421 						bp->hwrm_cmd_resp_addr;
422 
423 	/*
424 	 * Older HWRM versions did not support this command, and the set_rx_mask
425 	 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
426 	 * removed from set_rx_mask call, and this command was added.
427 	 *
428 	 * This command is also present from 1.7.8.11 and higher,
429 	 * as well as 1.7.8.0
430 	 */
431 	if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
432 		if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
433 			if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
434 					(11)))
435 				return 0;
436 		}
437 	}
438 	HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
439 	req.fid = rte_cpu_to_le_16(fid);
440 
441 	req.vlan_tag_mask_tbl_addr =
442 		rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
443 	req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
444 
445 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
446 
447 	HWRM_CHECK_RESULT();
448 	HWRM_UNLOCK();
449 
450 	return rc;
451 }
452 
bnxt_hwrm_clear_l2_filter(struct bnxt * bp,struct bnxt_filter_info * filter)453 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
454 			     struct bnxt_filter_info *filter)
455 {
456 	int rc = 0;
457 	struct bnxt_filter_info *l2_filter = filter;
458 	struct bnxt_vnic_info *vnic = NULL;
459 	struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
460 	struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
461 
462 	if (filter->fw_l2_filter_id == UINT64_MAX)
463 		return 0;
464 
465 	if (filter->matching_l2_fltr_ptr)
466 		l2_filter = filter->matching_l2_fltr_ptr;
467 
468 	PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
469 		    filter, l2_filter, l2_filter->l2_ref_cnt);
470 
471 	if (l2_filter->l2_ref_cnt == 0)
472 		return 0;
473 
474 	if (l2_filter->l2_ref_cnt > 0)
475 		l2_filter->l2_ref_cnt--;
476 
477 	if (l2_filter->l2_ref_cnt > 0)
478 		return 0;
479 
480 	HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
481 
482 	req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
483 
484 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
485 
486 	HWRM_CHECK_RESULT();
487 	HWRM_UNLOCK();
488 
489 	filter->fw_l2_filter_id = UINT64_MAX;
490 	if (l2_filter->l2_ref_cnt == 0) {
491 		vnic = l2_filter->vnic;
492 		if (vnic) {
493 			STAILQ_REMOVE(&vnic->filter, l2_filter,
494 				      bnxt_filter_info, next);
495 			bnxt_free_filter(bp, l2_filter);
496 		}
497 	}
498 
499 	return 0;
500 }
501 
bnxt_hwrm_set_l2_filter(struct bnxt * bp,uint16_t dst_id,struct bnxt_filter_info * filter)502 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
503 			 uint16_t dst_id,
504 			 struct bnxt_filter_info *filter)
505 {
506 	int rc = 0;
507 	struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
508 	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
509 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
510 	const struct rte_eth_vmdq_rx_conf *conf =
511 		    &dev_conf->rx_adv_conf.vmdq_rx_conf;
512 	uint32_t enables = 0;
513 	uint16_t j = dst_id - 1;
514 
515 	//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
516 	if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
517 	    conf->pool_map[j].pools & (1UL << j)) {
518 		PMD_DRV_LOG(DEBUG,
519 			"Add vlan %u to vmdq pool %u\n",
520 			conf->pool_map[j].vlan_id, j);
521 
522 		filter->l2_ivlan = conf->pool_map[j].vlan_id;
523 		filter->enables |=
524 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
525 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
526 	}
527 
528 	if (filter->fw_l2_filter_id != UINT64_MAX)
529 		bnxt_hwrm_clear_l2_filter(bp, filter);
530 
531 	HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
532 
533 	/* PMD does not support XDP and RoCE */
534 	filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
535 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
536 	req.flags = rte_cpu_to_le_32(filter->flags);
537 
538 	enables = filter->enables |
539 	      HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
540 	req.dst_id = rte_cpu_to_le_16(dst_id);
541 
542 	if (enables &
543 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
544 		memcpy(req.l2_addr, filter->l2_addr,
545 		       RTE_ETHER_ADDR_LEN);
546 	if (enables &
547 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
548 		memcpy(req.l2_addr_mask, filter->l2_addr_mask,
549 		       RTE_ETHER_ADDR_LEN);
550 	if (enables &
551 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
552 		req.l2_ovlan = filter->l2_ovlan;
553 	if (enables &
554 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
555 		req.l2_ivlan = filter->l2_ivlan;
556 	if (enables &
557 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
558 		req.l2_ovlan_mask = filter->l2_ovlan_mask;
559 	if (enables &
560 	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
561 		req.l2_ivlan_mask = filter->l2_ivlan_mask;
562 	if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
563 		req.src_id = rte_cpu_to_le_32(filter->src_id);
564 	if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
565 		req.src_type = filter->src_type;
566 	if (filter->pri_hint) {
567 		req.pri_hint = filter->pri_hint;
568 		req.l2_filter_id_hint =
569 			rte_cpu_to_le_64(filter->l2_filter_id_hint);
570 	}
571 
572 	req.enables = rte_cpu_to_le_32(enables);
573 
574 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
575 
576 	HWRM_CHECK_RESULT();
577 
578 	filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
579 	filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
580 	HWRM_UNLOCK();
581 
582 	filter->l2_ref_cnt++;
583 
584 	return rc;
585 }
586 
bnxt_hwrm_ptp_cfg(struct bnxt * bp)587 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
588 {
589 	struct hwrm_port_mac_cfg_input req = {.req_type = 0};
590 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
591 	uint32_t flags = 0;
592 	int rc;
593 
594 	if (!ptp)
595 		return 0;
596 
597 	HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
598 
599 	if (ptp->rx_filter)
600 		flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
601 	else
602 		flags |=
603 			HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
604 	if (ptp->tx_tstamp_en)
605 		flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
606 	else
607 		flags |=
608 			HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
609 	req.flags = rte_cpu_to_le_32(flags);
610 	req.enables = rte_cpu_to_le_32
611 		(HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
612 	req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
613 
614 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
615 	HWRM_UNLOCK();
616 
617 	return rc;
618 }
619 
bnxt_hwrm_ptp_qcfg(struct bnxt * bp)620 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
621 {
622 	int rc = 0;
623 	struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
624 	struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
625 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
626 
627 	if (ptp)
628 		return 0;
629 
630 	HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
631 
632 	req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
633 
634 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
635 
636 	HWRM_CHECK_RESULT();
637 
638 	if (!BNXT_CHIP_THOR(bp) &&
639 	    !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
640 		return 0;
641 
642 	if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
643 		bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
644 
645 	ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
646 	if (!ptp)
647 		return -ENOMEM;
648 
649 	if (!BNXT_CHIP_THOR(bp)) {
650 		ptp->rx_regs[BNXT_PTP_RX_TS_L] =
651 			rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
652 		ptp->rx_regs[BNXT_PTP_RX_TS_H] =
653 			rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
654 		ptp->rx_regs[BNXT_PTP_RX_SEQ] =
655 			rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
656 		ptp->rx_regs[BNXT_PTP_RX_FIFO] =
657 			rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
658 		ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
659 			rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
660 		ptp->tx_regs[BNXT_PTP_TX_TS_L] =
661 			rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
662 		ptp->tx_regs[BNXT_PTP_TX_TS_H] =
663 			rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
664 		ptp->tx_regs[BNXT_PTP_TX_SEQ] =
665 			rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
666 		ptp->tx_regs[BNXT_PTP_TX_FIFO] =
667 			rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
668 	}
669 
670 	ptp->bp = bp;
671 	bp->ptp_cfg = ptp;
672 
673 	return 0;
674 }
675 
bnxt_hwrm_free_vf_info(struct bnxt * bp)676 void bnxt_hwrm_free_vf_info(struct bnxt *bp)
677 {
678 	int i;
679 
680 	for (i = 0; i < bp->pf->max_vfs; i++) {
681 		rte_free(bp->pf->vf_info[i].vlan_table);
682 		bp->pf->vf_info[i].vlan_table = NULL;
683 		rte_free(bp->pf->vf_info[i].vlan_as_table);
684 		bp->pf->vf_info[i].vlan_as_table = NULL;
685 	}
686 	rte_free(bp->pf->vf_info);
687 	bp->pf->vf_info = NULL;
688 }
689 
__bnxt_hwrm_func_qcaps(struct bnxt * bp)690 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
691 {
692 	int rc = 0;
693 	struct hwrm_func_qcaps_input req = {.req_type = 0 };
694 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
695 	uint16_t new_max_vfs;
696 	uint32_t flags;
697 	int i;
698 
699 	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
700 
701 	req.fid = rte_cpu_to_le_16(0xffff);
702 
703 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
704 
705 	HWRM_CHECK_RESULT();
706 
707 	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
708 	flags = rte_le_to_cpu_32(resp->flags);
709 	if (BNXT_PF(bp)) {
710 		bp->pf->port_id = resp->port_id;
711 		bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
712 		bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
713 		new_max_vfs = bp->pdev->max_vfs;
714 		if (new_max_vfs != bp->pf->max_vfs) {
715 			if (bp->pf->vf_info)
716 				bnxt_hwrm_free_vf_info(bp);
717 			bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
718 			    sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
719 			if (bp->pf->vf_info == NULL) {
720 				PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
721 				return -ENOMEM;
722 			}
723 			bp->pf->max_vfs = new_max_vfs;
724 			for (i = 0; i < new_max_vfs; i++) {
725 				bp->pf->vf_info[i].fid =
726 					bp->pf->first_vf_id + i;
727 				bp->pf->vf_info[i].vlan_table =
728 					rte_zmalloc("VF VLAN table",
729 						    getpagesize(),
730 						    getpagesize());
731 				if (bp->pf->vf_info[i].vlan_table == NULL)
732 					PMD_DRV_LOG(ERR,
733 					"Fail to alloc VLAN table for VF %d\n",
734 					i);
735 				else
736 					rte_mem_lock_page(
737 						bp->pf->vf_info[i].vlan_table);
738 				bp->pf->vf_info[i].vlan_as_table =
739 					rte_zmalloc("VF VLAN AS table",
740 						    getpagesize(),
741 						    getpagesize());
742 				if (bp->pf->vf_info[i].vlan_as_table == NULL)
743 					PMD_DRV_LOG(ERR,
744 					"Alloc VLAN AS table for VF %d fail\n",
745 					i);
746 				else
747 					rte_mem_lock_page(
748 					      bp->pf->vf_info[i].vlan_as_table);
749 				STAILQ_INIT(&bp->pf->vf_info[i].filter);
750 			}
751 		}
752 	}
753 
754 	bp->fw_fid = rte_le_to_cpu_32(resp->fid);
755 	if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
756 		bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
757 		memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
758 	} else {
759 		bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
760 	}
761 	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
762 	bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
763 	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
764 	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
765 	bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
766 	bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
767 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
768 	if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
769 		bp->max_l2_ctx += bp->max_rx_em_flows;
770 	/* TODO: For now, do not support VMDq/RFS on VFs. */
771 	if (BNXT_PF(bp)) {
772 		if (bp->pf->max_vfs)
773 			bp->max_vnics = 1;
774 		else
775 			bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
776 	} else {
777 		bp->max_vnics = 1;
778 	}
779 	PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
780 		    bp->max_l2_ctx, bp->max_vnics);
781 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
782 	if (BNXT_PF(bp)) {
783 		bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
784 		if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
785 			bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
786 			PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
787 			HWRM_UNLOCK();
788 			bnxt_hwrm_ptp_qcfg(bp);
789 		}
790 	}
791 
792 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
793 		bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
794 
795 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
796 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
797 		PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
798 	}
799 
800 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
801 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
802 
803 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
804 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
805 
806 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
807 		bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
808 
809 	HWRM_UNLOCK();
810 
811 	return rc;
812 }
813 
bnxt_hwrm_func_qcaps(struct bnxt * bp)814 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
815 {
816 	int rc;
817 
818 	rc = __bnxt_hwrm_func_qcaps(bp);
819 	if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
820 		rc = bnxt_alloc_ctx_mem(bp);
821 		if (rc)
822 			return rc;
823 
824 		/* On older FW,
825 		 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
826 		 * But the error can be ignored. Return success.
827 		 */
828 		rc = bnxt_hwrm_func_resc_qcaps(bp);
829 		if (!rc)
830 			bp->flags |= BNXT_FLAG_NEW_RM;
831 	}
832 
833 	return 0;
834 }
835 
836 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)837 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
838 {
839 	int rc = 0;
840 	uint32_t flags;
841 	struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
842 	struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
843 
844 	HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
845 
846 	req.target_id = rte_cpu_to_le_16(0xffff);
847 
848 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
849 
850 	HWRM_CHECK_RESULT();
851 
852 	flags = rte_le_to_cpu_32(resp->flags);
853 
854 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
855 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
856 		PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
857 	}
858 
859 	if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
860 		bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
861 
862 	bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
863 
864 	HWRM_UNLOCK();
865 
866 	return rc;
867 }
868 
bnxt_hwrm_func_reset(struct bnxt * bp)869 int bnxt_hwrm_func_reset(struct bnxt *bp)
870 {
871 	int rc = 0;
872 	struct hwrm_func_reset_input req = {.req_type = 0 };
873 	struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
874 
875 	HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
876 
877 	req.enables = rte_cpu_to_le_32(0);
878 
879 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
880 
881 	HWRM_CHECK_RESULT();
882 	HWRM_UNLOCK();
883 
884 	return rc;
885 }
886 
bnxt_hwrm_func_driver_register(struct bnxt * bp)887 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
888 {
889 	int rc;
890 	uint32_t flags = 0;
891 	struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
892 	struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
893 
894 	if (bp->flags & BNXT_FLAG_REGISTERED)
895 		return 0;
896 
897 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
898 		flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
899 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
900 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
901 
902 	/* PFs and trusted VFs should indicate the support of the
903 	 * Master capability on non Stingray platform
904 	 */
905 	if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
906 		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
907 
908 	HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
909 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
910 			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
911 	req.ver_maj = RTE_VER_YEAR;
912 	req.ver_min = RTE_VER_MONTH;
913 	req.ver_upd = RTE_VER_MINOR;
914 
915 	if (BNXT_PF(bp)) {
916 		req.enables |= rte_cpu_to_le_32(
917 			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
918 		memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
919 		       RTE_MIN(sizeof(req.vf_req_fwd),
920 			       sizeof(bp->pf->vf_req_fwd)));
921 	}
922 
923 	req.flags = rte_cpu_to_le_32(flags);
924 
925 	req.async_event_fwd[0] |=
926 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
927 				 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
928 				 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
929 				 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
930 				 ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
931 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
932 		req.async_event_fwd[0] |=
933 			rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
934 	req.async_event_fwd[1] |=
935 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
936 				 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
937 	if (BNXT_PF(bp))
938 		req.async_event_fwd[1] |=
939 			rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
940 
941 	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
942 		req.async_event_fwd[1] |=
943 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
944 
945 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
946 
947 	HWRM_CHECK_RESULT();
948 
949 	flags = rte_le_to_cpu_32(resp->flags);
950 	if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
951 		bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
952 
953 	HWRM_UNLOCK();
954 
955 	bp->flags |= BNXT_FLAG_REGISTERED;
956 
957 	return rc;
958 }
959 
bnxt_hwrm_check_vf_rings(struct bnxt * bp)960 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
961 {
962 	if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
963 		return 0;
964 
965 	return bnxt_hwrm_func_reserve_vf_resc(bp, true);
966 }
967 
bnxt_hwrm_func_reserve_vf_resc(struct bnxt * bp,bool test)968 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
969 {
970 	int rc;
971 	uint32_t flags = 0;
972 	uint32_t enables;
973 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
974 	struct hwrm_func_vf_cfg_input req = {0};
975 
976 	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
977 
978 	enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
979 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
980 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
981 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
982 		  HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
983 
984 	if (BNXT_HAS_RING_GRPS(bp)) {
985 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
986 		req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
987 	}
988 
989 	req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
990 	req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
991 					    AGG_RING_MULTIPLIER);
992 	req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
993 	req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
994 					      bp->tx_nr_rings +
995 					      BNXT_NUM_ASYNC_CPR(bp));
996 	req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
997 	if (bp->vf_resv_strategy ==
998 	    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
999 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1000 			   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1001 			   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1002 		req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1003 		req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1004 		req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1005 	} else if (bp->vf_resv_strategy ==
1006 		   HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1007 		enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1008 		req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1009 	}
1010 
1011 	if (test)
1012 		flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1013 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1014 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1015 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1016 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1017 			HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1018 
1019 	if (test && BNXT_HAS_RING_GRPS(bp))
1020 		flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1021 
1022 	req.flags = rte_cpu_to_le_32(flags);
1023 	req.enables |= rte_cpu_to_le_32(enables);
1024 
1025 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1026 
1027 	if (test)
1028 		HWRM_CHECK_RESULT_SILENT();
1029 	else
1030 		HWRM_CHECK_RESULT();
1031 
1032 	HWRM_UNLOCK();
1033 	return rc;
1034 }
1035 
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp)1036 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1037 {
1038 	int rc;
1039 	struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1040 	struct hwrm_func_resource_qcaps_input req = {0};
1041 
1042 	HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1043 	req.fid = rte_cpu_to_le_16(0xffff);
1044 
1045 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1046 
1047 	HWRM_CHECK_RESULT_SILENT();
1048 
1049 	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1050 	bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1051 	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1052 	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1053 	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1054 	/* func_resource_qcaps does not return max_rx_em_flows.
1055 	 * So use the value provided by func_qcaps.
1056 	 */
1057 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1058 	if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
1059 		bp->max_l2_ctx += bp->max_rx_em_flows;
1060 	bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1061 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1062 	bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1063 	bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1064 	if (bp->vf_resv_strategy >
1065 	    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1066 		bp->vf_resv_strategy =
1067 		HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1068 
1069 	HWRM_UNLOCK();
1070 	return rc;
1071 }
1072 
bnxt_hwrm_ver_get(struct bnxt * bp,uint32_t timeout)1073 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1074 {
1075 	int rc = 0;
1076 	struct hwrm_ver_get_input req = {.req_type = 0 };
1077 	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1078 	uint32_t fw_version;
1079 	uint16_t max_resp_len;
1080 	char type[RTE_MEMZONE_NAMESIZE];
1081 	uint32_t dev_caps_cfg;
1082 
1083 	bp->max_req_len = HWRM_MAX_REQ_LEN;
1084 	bp->hwrm_cmd_timeout = timeout;
1085 	HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1086 
1087 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1088 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
1089 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1090 
1091 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1092 
1093 	if (bp->flags & BNXT_FLAG_FW_RESET)
1094 		HWRM_CHECK_RESULT_SILENT();
1095 	else
1096 		HWRM_CHECK_RESULT();
1097 
1098 	PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
1099 		resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1100 		resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1101 		resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
1102 	bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1103 		     (resp->hwrm_fw_min_8b << 16) |
1104 		     (resp->hwrm_fw_bld_8b << 8) |
1105 		     resp->hwrm_fw_rsvd_8b;
1106 	PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1107 		HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1108 
1109 	fw_version = resp->hwrm_intf_maj_8b << 16;
1110 	fw_version |= resp->hwrm_intf_min_8b << 8;
1111 	fw_version |= resp->hwrm_intf_upd_8b;
1112 	bp->hwrm_spec_code = fw_version;
1113 
1114 	/* def_req_timeout value is in milliseconds */
1115 	bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1116 	/* convert timeout to usec */
1117 	bp->hwrm_cmd_timeout *= 1000;
1118 	if (!bp->hwrm_cmd_timeout)
1119 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1120 
1121 	if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1122 		PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1123 		rc = -EINVAL;
1124 		goto error;
1125 	}
1126 
1127 	if (bp->max_req_len > resp->max_req_win_len) {
1128 		PMD_DRV_LOG(ERR, "Unsupported request length\n");
1129 		rc = -EINVAL;
1130 	}
1131 	bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1132 	bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1133 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1134 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1135 
1136 	max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1137 	dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1138 
1139 	if (bp->max_resp_len != max_resp_len) {
1140 		sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1141 			bp->pdev->addr.domain, bp->pdev->addr.bus,
1142 			bp->pdev->addr.devid, bp->pdev->addr.function);
1143 
1144 		rte_free(bp->hwrm_cmd_resp_addr);
1145 
1146 		bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1147 		if (bp->hwrm_cmd_resp_addr == NULL) {
1148 			rc = -ENOMEM;
1149 			goto error;
1150 		}
1151 		bp->hwrm_cmd_resp_dma_addr =
1152 			rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1153 		if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1154 			PMD_DRV_LOG(ERR,
1155 			"Unable to map response buffer to physical memory.\n");
1156 			rc = -ENOMEM;
1157 			goto error;
1158 		}
1159 		bp->max_resp_len = max_resp_len;
1160 	}
1161 
1162 	if ((dev_caps_cfg &
1163 		HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1164 	    (dev_caps_cfg &
1165 	     HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1166 		PMD_DRV_LOG(DEBUG, "Short command supported\n");
1167 		bp->flags |= BNXT_FLAG_SHORT_CMD;
1168 	}
1169 
1170 	if (((dev_caps_cfg &
1171 	      HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1172 	     (dev_caps_cfg &
1173 	      HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1174 	    bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1175 		sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1176 			bp->pdev->addr.domain, bp->pdev->addr.bus,
1177 			bp->pdev->addr.devid, bp->pdev->addr.function);
1178 
1179 		rte_free(bp->hwrm_short_cmd_req_addr);
1180 
1181 		bp->hwrm_short_cmd_req_addr =
1182 				rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1183 		if (bp->hwrm_short_cmd_req_addr == NULL) {
1184 			rc = -ENOMEM;
1185 			goto error;
1186 		}
1187 		bp->hwrm_short_cmd_req_dma_addr =
1188 			rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1189 		if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1190 			rte_free(bp->hwrm_short_cmd_req_addr);
1191 			PMD_DRV_LOG(ERR,
1192 				"Unable to map buffer to physical memory.\n");
1193 			rc = -ENOMEM;
1194 			goto error;
1195 		}
1196 	}
1197 	if (dev_caps_cfg &
1198 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1199 		bp->flags |= BNXT_FLAG_KONG_MB_EN;
1200 		PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1201 	}
1202 	if (dev_caps_cfg &
1203 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1204 		PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1205 	if (dev_caps_cfg &
1206 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1207 		bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1208 		PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1209 	}
1210 
1211 	if (dev_caps_cfg &
1212 	    HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1213 		PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1214 		bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1215 	}
1216 
1217 
1218 error:
1219 	HWRM_UNLOCK();
1220 	return rc;
1221 }
1222 
bnxt_hwrm_func_driver_unregister(struct bnxt * bp,uint32_t flags)1223 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1224 {
1225 	int rc;
1226 	struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1227 	struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1228 
1229 	if (!(bp->flags & BNXT_FLAG_REGISTERED))
1230 		return 0;
1231 
1232 	HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1233 	req.flags = flags;
1234 
1235 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1236 
1237 	HWRM_CHECK_RESULT();
1238 	HWRM_UNLOCK();
1239 
1240 	return rc;
1241 }
1242 
bnxt_hwrm_port_phy_cfg(struct bnxt * bp,struct bnxt_link_info * conf)1243 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1244 {
1245 	int rc = 0;
1246 	struct hwrm_port_phy_cfg_input req = {0};
1247 	struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1248 	uint32_t enables = 0;
1249 
1250 	HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1251 
1252 	if (conf->link_up) {
1253 		/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1254 		if (bp->link_info->auto_mode && conf->link_speed) {
1255 			req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1256 			PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1257 		}
1258 
1259 		req.flags = rte_cpu_to_le_32(conf->phy_flags);
1260 		/*
1261 		 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1262 		 * any auto mode, even "none".
1263 		 */
1264 		if (!conf->link_speed) {
1265 			/* No speeds specified. Enable AutoNeg - all speeds */
1266 			enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1267 			req.auto_mode =
1268 				HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1269 		} else {
1270 			if (bp->link_info->link_signal_mode) {
1271 				enables |=
1272 				HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1273 				req.force_pam4_link_speed =
1274 					rte_cpu_to_le_16(conf->link_speed);
1275 			} else {
1276 				req.force_link_speed =
1277 					rte_cpu_to_le_16(conf->link_speed);
1278 			}
1279 		}
1280 		/* AutoNeg - Advertise speeds specified. */
1281 		if (conf->auto_link_speed_mask &&
1282 		    !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1283 			req.auto_mode =
1284 				HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1285 			req.auto_link_speed_mask =
1286 				conf->auto_link_speed_mask;
1287 			if (conf->auto_pam4_link_speeds) {
1288 				enables |=
1289 				HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1290 				req.auto_link_pam4_speed_mask =
1291 					conf->auto_pam4_link_speeds;
1292 			} else {
1293 				enables |=
1294 				HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1295 			}
1296 		}
1297 		if (conf->auto_link_speed &&
1298 		!(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1299 			enables |=
1300 				HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1301 
1302 		req.auto_duplex = conf->duplex;
1303 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1304 		req.auto_pause = conf->auto_pause;
1305 		req.force_pause = conf->force_pause;
1306 		/* Set force_pause if there is no auto or if there is a force */
1307 		if (req.auto_pause && !req.force_pause)
1308 			enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1309 		else
1310 			enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1311 
1312 		req.enables = rte_cpu_to_le_32(enables);
1313 	} else {
1314 		req.flags =
1315 		rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1316 		PMD_DRV_LOG(INFO, "Force Link Down\n");
1317 	}
1318 
1319 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1320 
1321 	HWRM_CHECK_RESULT();
1322 	HWRM_UNLOCK();
1323 
1324 	return rc;
1325 }
1326 
bnxt_hwrm_port_phy_qcfg(struct bnxt * bp,struct bnxt_link_info * link_info)1327 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1328 				   struct bnxt_link_info *link_info)
1329 {
1330 	int rc = 0;
1331 	struct hwrm_port_phy_qcfg_input req = {0};
1332 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1333 
1334 	HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1335 
1336 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1337 
1338 	HWRM_CHECK_RESULT();
1339 
1340 	link_info->phy_link_status = resp->link;
1341 	link_info->link_up =
1342 		(link_info->phy_link_status ==
1343 		 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1344 	link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1345 	link_info->duplex = resp->duplex_cfg;
1346 	link_info->pause = resp->pause;
1347 	link_info->auto_pause = resp->auto_pause;
1348 	link_info->force_pause = resp->force_pause;
1349 	link_info->auto_mode = resp->auto_mode;
1350 	link_info->phy_type = resp->phy_type;
1351 	link_info->media_type = resp->media_type;
1352 
1353 	link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1354 	link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1355 	link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1356 	link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1357 	link_info->phy_ver[0] = resp->phy_maj;
1358 	link_info->phy_ver[1] = resp->phy_min;
1359 	link_info->phy_ver[2] = resp->phy_bld;
1360 	link_info->link_signal_mode =
1361 		rte_le_to_cpu_16(resp->active_fec_signal_mode);
1362 	link_info->force_pam4_link_speed =
1363 			rte_le_to_cpu_16(resp->force_pam4_link_speed);
1364 	link_info->support_pam4_speeds =
1365 			rte_le_to_cpu_16(resp->support_pam4_speeds);
1366 	link_info->auto_pam4_link_speeds =
1367 			rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1368 	HWRM_UNLOCK();
1369 
1370 	PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1371 		    link_info->link_speed, link_info->auto_mode,
1372 		    link_info->auto_link_speed, link_info->auto_link_speed_mask,
1373 		    link_info->support_speeds, link_info->force_link_speed);
1374 	PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
1375 		    link_info->link_signal_mode,
1376 		    link_info->auto_pam4_link_speeds,
1377 		    link_info->support_pam4_speeds,
1378 		    link_info->force_pam4_link_speed);
1379 	return rc;
1380 }
1381 
bnxt_hwrm_port_phy_qcaps(struct bnxt * bp)1382 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1383 {
1384 	int rc = 0;
1385 	struct hwrm_port_phy_qcaps_input req = {0};
1386 	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1387 	struct bnxt_link_info *link_info = bp->link_info;
1388 
1389 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1390 		return 0;
1391 
1392 	HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1393 
1394 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1395 
1396 	HWRM_CHECK_RESULT();
1397 
1398 	bp->port_cnt = resp->port_cnt;
1399 	if (resp->supported_speeds_auto_mode)
1400 		link_info->support_auto_speeds =
1401 			rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1402 	if (resp->supported_pam4_speeds_auto_mode)
1403 		link_info->support_pam4_auto_speeds =
1404 			rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1405 
1406 	HWRM_UNLOCK();
1407 
1408 	return 0;
1409 }
1410 
bnxt_find_lossy_profile(struct bnxt * bp)1411 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1412 {
1413 	int i = 0;
1414 
1415 	for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1416 		if (bp->tx_cos_queue[i].profile ==
1417 		    HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1418 			bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1419 			return true;
1420 		}
1421 	}
1422 	return false;
1423 }
1424 
bnxt_find_first_valid_profile(struct bnxt * bp)1425 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1426 {
1427 	int i = 0;
1428 
1429 	for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1430 		if (bp->tx_cos_queue[i].profile !=
1431 		    HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1432 		    bp->tx_cos_queue[i].id !=
1433 		    HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1434 			bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1435 			break;
1436 		}
1437 	}
1438 }
1439 
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)1440 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1441 {
1442 	int rc = 0;
1443 	struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1444 	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1445 	uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1446 	int i;
1447 
1448 get_rx_info:
1449 	HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1450 
1451 	req.flags = rte_cpu_to_le_32(dir);
1452 	/* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1453 	if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1454 	    !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1455 		req.drv_qmap_cap =
1456 			HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1457 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1458 
1459 	HWRM_CHECK_RESULT();
1460 
1461 	if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1462 		GET_TX_QUEUE_INFO(0);
1463 		GET_TX_QUEUE_INFO(1);
1464 		GET_TX_QUEUE_INFO(2);
1465 		GET_TX_QUEUE_INFO(3);
1466 		GET_TX_QUEUE_INFO(4);
1467 		GET_TX_QUEUE_INFO(5);
1468 		GET_TX_QUEUE_INFO(6);
1469 		GET_TX_QUEUE_INFO(7);
1470 	} else  {
1471 		GET_RX_QUEUE_INFO(0);
1472 		GET_RX_QUEUE_INFO(1);
1473 		GET_RX_QUEUE_INFO(2);
1474 		GET_RX_QUEUE_INFO(3);
1475 		GET_RX_QUEUE_INFO(4);
1476 		GET_RX_QUEUE_INFO(5);
1477 		GET_RX_QUEUE_INFO(6);
1478 		GET_RX_QUEUE_INFO(7);
1479 	}
1480 
1481 	HWRM_UNLOCK();
1482 
1483 	if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1484 		goto done;
1485 
1486 	if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1487 		bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1488 	} else {
1489 		int j;
1490 
1491 		/* iterate and find the COSq profile to use for Tx */
1492 		if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1493 			for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1494 				if (bp->tx_cos_queue[i].id != 0xff)
1495 					bp->tx_cosq_id[j++] =
1496 						bp->tx_cos_queue[i].id;
1497 			}
1498 		} else {
1499 			/* When CoS classification is disabled, for normal NIC
1500 			 * operations, ideally we should look to use LOSSY.
1501 			 * If not found, fallback to the first valid profile
1502 			 */
1503 			if (!bnxt_find_lossy_profile(bp))
1504 				bnxt_find_first_valid_profile(bp);
1505 
1506 		}
1507 	}
1508 
1509 	bp->max_tc = resp->max_configurable_queues;
1510 	bp->max_lltc = resp->max_configurable_lossless_queues;
1511 	if (bp->max_tc > BNXT_MAX_QUEUE)
1512 		bp->max_tc = BNXT_MAX_QUEUE;
1513 	bp->max_q = bp->max_tc;
1514 
1515 	if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1516 		dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1517 		goto get_rx_info;
1518 	}
1519 
1520 done:
1521 	return rc;
1522 }
1523 
bnxt_hwrm_ring_alloc(struct bnxt * bp,struct bnxt_ring * ring,uint32_t ring_type,uint32_t map_index,uint32_t stats_ctx_id,uint32_t cmpl_ring_id,uint16_t tx_cosq_id)1524 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1525 			 struct bnxt_ring *ring,
1526 			 uint32_t ring_type, uint32_t map_index,
1527 			 uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1528 			 uint16_t tx_cosq_id)
1529 {
1530 	int rc = 0;
1531 	uint32_t enables = 0;
1532 	struct hwrm_ring_alloc_input req = {.req_type = 0 };
1533 	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1534 	struct rte_mempool *mb_pool;
1535 	uint16_t rx_buf_size;
1536 
1537 	HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1538 
1539 	req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1540 	req.fbo = rte_cpu_to_le_32(0);
1541 	/* Association of ring index with doorbell index */
1542 	req.logical_id = rte_cpu_to_le_16(map_index);
1543 	req.length = rte_cpu_to_le_32(ring->ring_size);
1544 
1545 	switch (ring_type) {
1546 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1547 		req.ring_type = ring_type;
1548 		req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1549 		req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1550 		req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1551 		if (stats_ctx_id != INVALID_STATS_CTX_ID)
1552 			enables |=
1553 			HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1554 		break;
1555 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1556 		req.ring_type = ring_type;
1557 		req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1558 		req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1559 		if (BNXT_CHIP_THOR(bp)) {
1560 			mb_pool = bp->rx_queues[0]->mb_pool;
1561 			rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1562 				      RTE_PKTMBUF_HEADROOM;
1563 			rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1564 			req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1565 			enables |=
1566 				HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1567 		}
1568 		if (stats_ctx_id != INVALID_STATS_CTX_ID)
1569 			enables |=
1570 				HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1571 		break;
1572 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1573 		req.ring_type = ring_type;
1574 		if (BNXT_HAS_NQ(bp)) {
1575 			/* Association of cp ring with nq */
1576 			req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1577 			enables |=
1578 				HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1579 		}
1580 		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1581 		break;
1582 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1583 		req.ring_type = ring_type;
1584 		req.page_size = BNXT_PAGE_SHFT;
1585 		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1586 		break;
1587 	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1588 		req.ring_type = ring_type;
1589 		req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1590 
1591 		mb_pool = bp->rx_queues[0]->mb_pool;
1592 		rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1593 			      RTE_PKTMBUF_HEADROOM;
1594 		rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1595 		req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1596 
1597 		req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1598 		enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1599 			   HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1600 			   HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1601 		break;
1602 	default:
1603 		PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1604 			ring_type);
1605 		HWRM_UNLOCK();
1606 		return -EINVAL;
1607 	}
1608 	req.enables = rte_cpu_to_le_32(enables);
1609 
1610 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1611 
1612 	if (rc || resp->error_code) {
1613 		if (rc == 0 && resp->error_code)
1614 			rc = rte_le_to_cpu_16(resp->error_code);
1615 		switch (ring_type) {
1616 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1617 			PMD_DRV_LOG(ERR,
1618 				"hwrm_ring_alloc cp failed. rc:%d\n", rc);
1619 			HWRM_UNLOCK();
1620 			return rc;
1621 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1622 			PMD_DRV_LOG(ERR,
1623 				    "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1624 			HWRM_UNLOCK();
1625 			return rc;
1626 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1627 			PMD_DRV_LOG(ERR,
1628 				    "hwrm_ring_alloc rx agg failed. rc:%d\n",
1629 				    rc);
1630 			HWRM_UNLOCK();
1631 			return rc;
1632 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1633 			PMD_DRV_LOG(ERR,
1634 				    "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1635 			HWRM_UNLOCK();
1636 			return rc;
1637 		case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1638 			PMD_DRV_LOG(ERR,
1639 				    "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1640 			HWRM_UNLOCK();
1641 			return rc;
1642 		default:
1643 			PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1644 			HWRM_UNLOCK();
1645 			return rc;
1646 		}
1647 	}
1648 
1649 	ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1650 	HWRM_UNLOCK();
1651 	return rc;
1652 }
1653 
bnxt_hwrm_ring_free(struct bnxt * bp,struct bnxt_ring * ring,uint32_t ring_type)1654 int bnxt_hwrm_ring_free(struct bnxt *bp,
1655 			struct bnxt_ring *ring, uint32_t ring_type)
1656 {
1657 	int rc;
1658 	struct hwrm_ring_free_input req = {.req_type = 0 };
1659 	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1660 
1661 	HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1662 
1663 	req.ring_type = ring_type;
1664 	req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1665 
1666 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1667 
1668 	if (rc || resp->error_code) {
1669 		if (rc == 0 && resp->error_code)
1670 			rc = rte_le_to_cpu_16(resp->error_code);
1671 		HWRM_UNLOCK();
1672 
1673 		switch (ring_type) {
1674 		case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1675 			PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1676 				rc);
1677 			return rc;
1678 		case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1679 			PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1680 				rc);
1681 			return rc;
1682 		case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1683 			PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1684 				rc);
1685 			return rc;
1686 		case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1687 			PMD_DRV_LOG(ERR,
1688 				    "hwrm_ring_free nq failed. rc:%d\n", rc);
1689 			return rc;
1690 		case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1691 			PMD_DRV_LOG(ERR,
1692 				    "hwrm_ring_free agg failed. rc:%d\n", rc);
1693 			return rc;
1694 		default:
1695 			PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1696 			return rc;
1697 		}
1698 	}
1699 	HWRM_UNLOCK();
1700 	return 0;
1701 }
1702 
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp,unsigned int idx)1703 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1704 {
1705 	int rc = 0;
1706 	struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1707 	struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1708 
1709 	HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1710 
1711 	req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1712 	req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1713 	req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1714 	req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1715 
1716 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1717 
1718 	HWRM_CHECK_RESULT();
1719 
1720 	bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1721 
1722 	HWRM_UNLOCK();
1723 
1724 	return rc;
1725 }
1726 
bnxt_hwrm_ring_grp_free(struct bnxt * bp,unsigned int idx)1727 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1728 {
1729 	int rc;
1730 	struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1731 	struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1732 
1733 	HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1734 
1735 	req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1736 
1737 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1738 
1739 	HWRM_CHECK_RESULT();
1740 	HWRM_UNLOCK();
1741 
1742 	bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1743 	return rc;
1744 }
1745 
bnxt_hwrm_stat_clear(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)1746 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1747 {
1748 	int rc = 0;
1749 	struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1750 	struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1751 
1752 	if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1753 		return rc;
1754 
1755 	HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1756 
1757 	req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1758 
1759 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1760 
1761 	HWRM_CHECK_RESULT();
1762 	HWRM_UNLOCK();
1763 
1764 	return rc;
1765 }
1766 
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,unsigned int idx __rte_unused)1767 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1768 				unsigned int idx __rte_unused)
1769 {
1770 	int rc;
1771 	struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1772 	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1773 
1774 	HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1775 
1776 	req.update_period_ms = rte_cpu_to_le_32(0);
1777 
1778 	req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1779 
1780 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1781 
1782 	HWRM_CHECK_RESULT();
1783 
1784 	cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1785 
1786 	HWRM_UNLOCK();
1787 
1788 	return rc;
1789 }
1790 
bnxt_hwrm_stat_ctx_free(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,unsigned int idx __rte_unused)1791 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1792 				unsigned int idx __rte_unused)
1793 {
1794 	int rc;
1795 	struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1796 	struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1797 
1798 	HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1799 
1800 	req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1801 
1802 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1803 
1804 	HWRM_CHECK_RESULT();
1805 	HWRM_UNLOCK();
1806 
1807 	return rc;
1808 }
1809 
bnxt_hwrm_vnic_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic)1810 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1811 {
1812 	int rc = 0, i, j;
1813 	struct hwrm_vnic_alloc_input req = { 0 };
1814 	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1815 
1816 	if (!BNXT_HAS_RING_GRPS(bp))
1817 		goto skip_ring_grps;
1818 
1819 	/* map ring groups to this vnic */
1820 	PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1821 		vnic->start_grp_id, vnic->end_grp_id);
1822 	for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1823 		vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1824 
1825 	vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1826 	vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1827 	vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1828 	vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1829 
1830 skip_ring_grps:
1831 	vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1832 	HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1833 
1834 	if (vnic->func_default)
1835 		req.flags =
1836 			rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1837 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1838 
1839 	HWRM_CHECK_RESULT();
1840 
1841 	vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1842 	HWRM_UNLOCK();
1843 	PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1844 	return rc;
1845 }
1846 
bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt * bp,struct bnxt_vnic_info * vnic,struct bnxt_plcmodes_cfg * pmode)1847 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1848 					struct bnxt_vnic_info *vnic,
1849 					struct bnxt_plcmodes_cfg *pmode)
1850 {
1851 	int rc = 0;
1852 	struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1853 	struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1854 
1855 	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1856 
1857 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1858 
1859 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1860 
1861 	HWRM_CHECK_RESULT();
1862 
1863 	pmode->flags = rte_le_to_cpu_32(resp->flags);
1864 	/* dflt_vnic bit doesn't exist in the _cfg command */
1865 	pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1866 	pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1867 	pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1868 	pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1869 
1870 	HWRM_UNLOCK();
1871 
1872 	return rc;
1873 }
1874 
bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic,struct bnxt_plcmodes_cfg * pmode)1875 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1876 				       struct bnxt_vnic_info *vnic,
1877 				       struct bnxt_plcmodes_cfg *pmode)
1878 {
1879 	int rc = 0;
1880 	struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1881 	struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1882 
1883 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1884 		PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1885 		return rc;
1886 	}
1887 
1888 	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1889 
1890 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1891 	req.flags = rte_cpu_to_le_32(pmode->flags);
1892 	req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1893 	req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1894 	req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1895 	req.enables = rte_cpu_to_le_32(
1896 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1897 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1898 	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1899 	);
1900 
1901 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1902 
1903 	HWRM_CHECK_RESULT();
1904 	HWRM_UNLOCK();
1905 
1906 	return rc;
1907 }
1908 
bnxt_hwrm_vnic_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)1909 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1910 {
1911 	int rc = 0;
1912 	struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1913 	struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1914 	struct bnxt_plcmodes_cfg pmodes = { 0 };
1915 	uint32_t ctx_enable_flag = 0;
1916 	uint32_t enables = 0;
1917 
1918 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1919 		PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1920 		return rc;
1921 	}
1922 
1923 	rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1924 	if (rc)
1925 		return rc;
1926 
1927 	HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
1928 
1929 	if (BNXT_CHIP_THOR(bp)) {
1930 		int dflt_rxq = vnic->start_grp_id;
1931 		struct bnxt_rx_ring_info *rxr;
1932 		struct bnxt_cp_ring_info *cpr;
1933 		struct bnxt_rx_queue *rxq;
1934 		int i;
1935 
1936 		/*
1937 		 * The first active receive ring is used as the VNIC
1938 		 * default receive ring. If there are no active receive
1939 		 * rings (all corresponding receive queues are stopped),
1940 		 * the first receive ring is used.
1941 		 */
1942 		for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
1943 			rxq = bp->eth_dev->data->rx_queues[i];
1944 			if (rxq->rx_started) {
1945 				dflt_rxq = i;
1946 				break;
1947 			}
1948 		}
1949 
1950 		rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
1951 		rxr = rxq->rx_ring;
1952 		cpr = rxq->cp_ring;
1953 
1954 		req.default_rx_ring_id =
1955 			rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1956 		req.default_cmpl_ring_id =
1957 			rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1958 		enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1959 			  HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1960 		goto config_mru;
1961 	}
1962 
1963 	/* Only RSS support for now TBD: COS & LB */
1964 	enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1965 	if (vnic->lb_rule != 0xffff)
1966 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1967 	if (vnic->cos_rule != 0xffff)
1968 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1969 	if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1970 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1971 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1972 	}
1973 	if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1974 		ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1975 		req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1976 	}
1977 
1978 	enables |= ctx_enable_flag;
1979 	req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1980 	req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1981 	req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1982 	req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1983 
1984 config_mru:
1985 	req.enables = rte_cpu_to_le_32(enables);
1986 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1987 	req.mru = rte_cpu_to_le_16(vnic->mru);
1988 	/* Configure default VNIC only once. */
1989 	if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1990 		req.flags |=
1991 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1992 		bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1993 	}
1994 	if (vnic->vlan_strip)
1995 		req.flags |=
1996 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1997 	if (vnic->bd_stall)
1998 		req.flags |=
1999 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2000 	if (vnic->roce_dual)
2001 		req.flags |= rte_cpu_to_le_32(
2002 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
2003 	if (vnic->roce_only)
2004 		req.flags |= rte_cpu_to_le_32(
2005 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
2006 	if (vnic->rss_dflt_cr)
2007 		req.flags |= rte_cpu_to_le_32(
2008 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2009 
2010 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2011 
2012 	HWRM_CHECK_RESULT();
2013 	HWRM_UNLOCK();
2014 
2015 	rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2016 
2017 	return rc;
2018 }
2019 
bnxt_hwrm_vnic_qcfg(struct bnxt * bp,struct bnxt_vnic_info * vnic,int16_t fw_vf_id)2020 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2021 		int16_t fw_vf_id)
2022 {
2023 	int rc = 0;
2024 	struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2025 	struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2026 
2027 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2028 		PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2029 		return rc;
2030 	}
2031 	HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2032 
2033 	req.enables =
2034 		rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2035 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2036 	req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2037 
2038 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2039 
2040 	HWRM_CHECK_RESULT();
2041 
2042 	vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2043 	vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2044 	vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2045 	vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2046 	vnic->mru = rte_le_to_cpu_16(resp->mru);
2047 	vnic->func_default = rte_le_to_cpu_32(
2048 			resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2049 	vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2050 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2051 	vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2052 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2053 	vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
2054 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
2055 	vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
2056 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
2057 	vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2058 			HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2059 
2060 	HWRM_UNLOCK();
2061 
2062 	return rc;
2063 }
2064 
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,struct bnxt_vnic_info * vnic,uint16_t ctx_idx)2065 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2066 			     struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2067 {
2068 	int rc = 0;
2069 	uint16_t ctx_id;
2070 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2071 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2072 						bp->hwrm_cmd_resp_addr;
2073 
2074 	HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2075 
2076 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2077 	HWRM_CHECK_RESULT();
2078 
2079 	ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2080 	if (!BNXT_HAS_RING_GRPS(bp))
2081 		vnic->fw_grp_ids[ctx_idx] = ctx_id;
2082 	else if (ctx_idx == 0)
2083 		vnic->rss_rule = ctx_id;
2084 
2085 	HWRM_UNLOCK();
2086 
2087 	return rc;
2088 }
2089 
2090 static
_bnxt_hwrm_vnic_ctx_free(struct bnxt * bp,struct bnxt_vnic_info * vnic,uint16_t ctx_idx)2091 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2092 			     struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2093 {
2094 	int rc = 0;
2095 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2096 	struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2097 						bp->hwrm_cmd_resp_addr;
2098 
2099 	if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2100 		PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2101 		return rc;
2102 	}
2103 	HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2104 
2105 	req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2106 
2107 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2108 
2109 	HWRM_CHECK_RESULT();
2110 	HWRM_UNLOCK();
2111 
2112 	return rc;
2113 }
2114 
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp,struct bnxt_vnic_info * vnic)2115 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2116 {
2117 	int rc = 0;
2118 
2119 	if (BNXT_CHIP_THOR(bp)) {
2120 		int j;
2121 
2122 		for (j = 0; j < vnic->num_lb_ctxts; j++) {
2123 			rc = _bnxt_hwrm_vnic_ctx_free(bp,
2124 						      vnic,
2125 						      vnic->fw_grp_ids[j]);
2126 			vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2127 		}
2128 		vnic->num_lb_ctxts = 0;
2129 	} else {
2130 		rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2131 		vnic->rss_rule = INVALID_HW_RING_ID;
2132 	}
2133 
2134 	return rc;
2135 }
2136 
bnxt_hwrm_vnic_free(struct bnxt * bp,struct bnxt_vnic_info * vnic)2137 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2138 {
2139 	int rc = 0;
2140 	struct hwrm_vnic_free_input req = {.req_type = 0 };
2141 	struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2142 
2143 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2144 		PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2145 		return rc;
2146 	}
2147 
2148 	HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2149 
2150 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2151 
2152 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2153 
2154 	HWRM_CHECK_RESULT();
2155 	HWRM_UNLOCK();
2156 
2157 	vnic->fw_vnic_id = INVALID_HW_RING_ID;
2158 	/* Configure default VNIC again if necessary. */
2159 	if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2160 		bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2161 
2162 	return rc;
2163 }
2164 
2165 static int
bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt * bp,struct bnxt_vnic_info * vnic)2166 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2167 {
2168 	int i;
2169 	int rc = 0;
2170 	int nr_ctxs = vnic->num_lb_ctxts;
2171 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2172 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2173 
2174 	for (i = 0; i < nr_ctxs; i++) {
2175 		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2176 
2177 		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2178 		req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2179 		req.hash_mode_flags = vnic->hash_mode;
2180 
2181 		req.hash_key_tbl_addr =
2182 			rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2183 
2184 		req.ring_grp_tbl_addr =
2185 			rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2186 					 i * HW_HASH_INDEX_SIZE);
2187 		req.ring_table_pair_index = i;
2188 		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2189 
2190 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2191 					    BNXT_USE_CHIMP_MB);
2192 
2193 		HWRM_CHECK_RESULT();
2194 		HWRM_UNLOCK();
2195 	}
2196 
2197 	return rc;
2198 }
2199 
bnxt_hwrm_vnic_rss_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)2200 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2201 			   struct bnxt_vnic_info *vnic)
2202 {
2203 	int rc = 0;
2204 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2205 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2206 
2207 	if (!vnic->rss_table)
2208 		return 0;
2209 
2210 	if (BNXT_CHIP_THOR(bp))
2211 		return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
2212 
2213 	HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2214 
2215 	req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2216 	req.hash_mode_flags = vnic->hash_mode;
2217 
2218 	req.ring_grp_tbl_addr =
2219 	    rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2220 	req.hash_key_tbl_addr =
2221 	    rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2222 	req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2223 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2224 
2225 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2226 
2227 	HWRM_CHECK_RESULT();
2228 	HWRM_UNLOCK();
2229 
2230 	return rc;
2231 }
2232 
bnxt_hwrm_vnic_plcmode_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic)2233 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2234 			struct bnxt_vnic_info *vnic)
2235 {
2236 	int rc = 0;
2237 	struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2238 	struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2239 	uint16_t size;
2240 
2241 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2242 		PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2243 		return rc;
2244 	}
2245 
2246 	HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2247 
2248 	req.flags = rte_cpu_to_le_32(
2249 			HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2250 
2251 	req.enables = rte_cpu_to_le_32(
2252 		HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2253 
2254 	size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2255 	size -= RTE_PKTMBUF_HEADROOM;
2256 	size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2257 
2258 	req.jumbo_thresh = rte_cpu_to_le_16(size);
2259 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2260 
2261 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2262 
2263 	HWRM_CHECK_RESULT();
2264 	HWRM_UNLOCK();
2265 
2266 	return rc;
2267 }
2268 
bnxt_hwrm_vnic_tpa_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic,bool enable)2269 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2270 			struct bnxt_vnic_info *vnic, bool enable)
2271 {
2272 	int rc = 0;
2273 	struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2274 	struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2275 
2276 	if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
2277 		if (enable)
2278 			PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2279 		return -ENOTSUP;
2280 	}
2281 
2282 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2283 		PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2284 		return 0;
2285 	}
2286 
2287 	HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2288 
2289 	if (enable) {
2290 		req.enables = rte_cpu_to_le_32(
2291 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2292 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2293 				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2294 		req.flags = rte_cpu_to_le_32(
2295 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2296 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2297 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2298 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2299 				HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2300 			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2301 		req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2302 		req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2303 		req.min_agg_len = rte_cpu_to_le_32(512);
2304 	}
2305 	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2306 
2307 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2308 
2309 	HWRM_CHECK_RESULT();
2310 	HWRM_UNLOCK();
2311 
2312 	return rc;
2313 }
2314 
bnxt_hwrm_func_vf_mac(struct bnxt * bp,uint16_t vf,const uint8_t * mac_addr)2315 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2316 {
2317 	struct hwrm_func_cfg_input req = {0};
2318 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2319 	int rc;
2320 
2321 	req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2322 	req.enables = rte_cpu_to_le_32(
2323 			HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2324 	memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2325 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2326 
2327 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2328 
2329 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2330 	HWRM_CHECK_RESULT();
2331 	HWRM_UNLOCK();
2332 
2333 	bp->pf->vf_info[vf].random_mac = false;
2334 
2335 	return rc;
2336 }
2337 
bnxt_hwrm_func_qstats_tx_drop(struct bnxt * bp,uint16_t fid,uint64_t * dropped)2338 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2339 				  uint64_t *dropped)
2340 {
2341 	int rc = 0;
2342 	struct hwrm_func_qstats_input req = {.req_type = 0};
2343 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2344 
2345 	HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2346 
2347 	req.fid = rte_cpu_to_le_16(fid);
2348 
2349 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2350 
2351 	HWRM_CHECK_RESULT();
2352 
2353 	if (dropped)
2354 		*dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2355 
2356 	HWRM_UNLOCK();
2357 
2358 	return rc;
2359 }
2360 
bnxt_hwrm_func_qstats(struct bnxt * bp,uint16_t fid,struct rte_eth_stats * stats,struct hwrm_func_qstats_output * func_qstats)2361 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2362 			  struct rte_eth_stats *stats,
2363 			  struct hwrm_func_qstats_output *func_qstats)
2364 {
2365 	int rc = 0;
2366 	struct hwrm_func_qstats_input req = {.req_type = 0};
2367 	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2368 
2369 	HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2370 
2371 	req.fid = rte_cpu_to_le_16(fid);
2372 
2373 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2374 
2375 	HWRM_CHECK_RESULT();
2376 	if (func_qstats)
2377 		memcpy(func_qstats, resp,
2378 		       sizeof(struct hwrm_func_qstats_output));
2379 
2380 	if (!stats)
2381 		goto exit;
2382 
2383 	stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2384 	stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2385 	stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2386 	stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2387 	stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2388 	stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2389 
2390 	stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2391 	stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2392 	stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2393 	stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2394 	stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2395 	stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2396 
2397 	stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2398 	stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2399 	stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2400 
2401 exit:
2402 	HWRM_UNLOCK();
2403 
2404 	return rc;
2405 }
2406 
bnxt_hwrm_func_clr_stats(struct bnxt * bp,uint16_t fid)2407 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2408 {
2409 	int rc = 0;
2410 	struct hwrm_func_clr_stats_input req = {.req_type = 0};
2411 	struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2412 
2413 	HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2414 
2415 	req.fid = rte_cpu_to_le_16(fid);
2416 
2417 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2418 
2419 	HWRM_CHECK_RESULT();
2420 	HWRM_UNLOCK();
2421 
2422 	return rc;
2423 }
2424 
bnxt_clear_all_hwrm_stat_ctxs(struct bnxt * bp)2425 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2426 {
2427 	unsigned int i;
2428 	int rc = 0;
2429 
2430 	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2431 		struct bnxt_tx_queue *txq;
2432 		struct bnxt_rx_queue *rxq;
2433 		struct bnxt_cp_ring_info *cpr;
2434 
2435 		if (i >= bp->rx_cp_nr_rings) {
2436 			txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2437 			cpr = txq->cp_ring;
2438 		} else {
2439 			rxq = bp->rx_queues[i];
2440 			cpr = rxq->cp_ring;
2441 		}
2442 
2443 		rc = bnxt_hwrm_stat_clear(bp, cpr);
2444 		if (rc)
2445 			return rc;
2446 	}
2447 	return 0;
2448 }
2449 
2450 static int
bnxt_free_all_hwrm_stat_ctxs(struct bnxt * bp)2451 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2452 {
2453 	int rc;
2454 	unsigned int i;
2455 	struct bnxt_cp_ring_info *cpr;
2456 
2457 	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2458 
2459 		if (i >= bp->rx_cp_nr_rings) {
2460 			cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2461 		} else {
2462 			cpr = bp->rx_queues[i]->cp_ring;
2463 			if (BNXT_HAS_RING_GRPS(bp))
2464 				bp->grp_info[i].fw_stats_ctx = -1;
2465 		}
2466 		if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2467 			rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2468 			cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2469 			if (rc)
2470 				return rc;
2471 		}
2472 	}
2473 	return 0;
2474 }
2475 
bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt * bp)2476 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2477 {
2478 	unsigned int i;
2479 	int rc = 0;
2480 
2481 	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2482 		struct bnxt_tx_queue *txq;
2483 		struct bnxt_rx_queue *rxq;
2484 		struct bnxt_cp_ring_info *cpr;
2485 
2486 		if (i >= bp->rx_cp_nr_rings) {
2487 			txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2488 			cpr = txq->cp_ring;
2489 		} else {
2490 			rxq = bp->rx_queues[i];
2491 			cpr = rxq->cp_ring;
2492 		}
2493 
2494 		rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2495 
2496 		if (rc)
2497 			return rc;
2498 	}
2499 	return rc;
2500 }
2501 
2502 static int
bnxt_free_all_hwrm_ring_grps(struct bnxt * bp)2503 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2504 {
2505 	uint16_t idx;
2506 	uint32_t rc = 0;
2507 
2508 	if (!BNXT_HAS_RING_GRPS(bp))
2509 		return 0;
2510 
2511 	for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2512 
2513 		if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2514 			continue;
2515 
2516 		rc = bnxt_hwrm_ring_grp_free(bp, idx);
2517 
2518 		if (rc)
2519 			return rc;
2520 	}
2521 	return rc;
2522 }
2523 
bnxt_free_nq_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2524 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2525 {
2526 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2527 
2528 	bnxt_hwrm_ring_free(bp, cp_ring,
2529 			    HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2530 	cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2531 	memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2532 				     sizeof(*cpr->cp_desc_ring));
2533 	cpr->cp_raw_cons = 0;
2534 	cpr->valid = 0;
2535 }
2536 
bnxt_free_cp_ring(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2537 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2538 {
2539 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2540 
2541 	bnxt_hwrm_ring_free(bp, cp_ring,
2542 			HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2543 	cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2544 	memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2545 			sizeof(*cpr->cp_desc_ring));
2546 	cpr->cp_raw_cons = 0;
2547 	cpr->valid = 0;
2548 }
2549 
bnxt_free_hwrm_rx_ring(struct bnxt * bp,int queue_index)2550 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2551 {
2552 	struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2553 	struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2554 	struct bnxt_ring *ring = rxr->rx_ring_struct;
2555 	struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2556 
2557 	if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2558 		bnxt_hwrm_ring_free(bp, ring,
2559 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2560 		ring->fw_ring_id = INVALID_HW_RING_ID;
2561 		if (BNXT_HAS_RING_GRPS(bp))
2562 			bp->grp_info[queue_index].rx_fw_ring_id =
2563 							INVALID_HW_RING_ID;
2564 	}
2565 	ring = rxr->ag_ring_struct;
2566 	if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2567 		bnxt_hwrm_ring_free(bp, ring,
2568 				    BNXT_CHIP_THOR(bp) ?
2569 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2570 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2571 		if (BNXT_HAS_RING_GRPS(bp))
2572 			bp->grp_info[queue_index].ag_fw_ring_id =
2573 							INVALID_HW_RING_ID;
2574 	}
2575 	if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2576 		bnxt_free_cp_ring(bp, cpr);
2577 
2578 	if (BNXT_HAS_RING_GRPS(bp))
2579 		bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2580 }
2581 
2582 static int
bnxt_free_all_hwrm_rings(struct bnxt * bp)2583 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2584 {
2585 	unsigned int i;
2586 
2587 	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2588 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
2589 		struct bnxt_tx_ring_info *txr = txq->tx_ring;
2590 		struct bnxt_ring *ring = txr->tx_ring_struct;
2591 		struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2592 
2593 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2594 			bnxt_hwrm_ring_free(bp, ring,
2595 					HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2596 			ring->fw_ring_id = INVALID_HW_RING_ID;
2597 			memset(txr->tx_desc_ring, 0,
2598 					txr->tx_ring_struct->ring_size *
2599 					sizeof(*txr->tx_desc_ring));
2600 			memset(txr->tx_buf_ring, 0,
2601 					txr->tx_ring_struct->ring_size *
2602 					sizeof(*txr->tx_buf_ring));
2603 			txr->tx_prod = 0;
2604 			txr->tx_cons = 0;
2605 		}
2606 		if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2607 			bnxt_free_cp_ring(bp, cpr);
2608 			cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2609 		}
2610 	}
2611 
2612 	for (i = 0; i < bp->rx_cp_nr_rings; i++)
2613 		bnxt_free_hwrm_rx_ring(bp, i);
2614 
2615 	return 0;
2616 }
2617 
bnxt_alloc_all_hwrm_ring_grps(struct bnxt * bp)2618 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2619 {
2620 	uint16_t i;
2621 	uint32_t rc = 0;
2622 
2623 	if (!BNXT_HAS_RING_GRPS(bp))
2624 		return 0;
2625 
2626 	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2627 		rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2628 		if (rc)
2629 			return rc;
2630 	}
2631 	return rc;
2632 }
2633 
2634 /*
2635  * HWRM utility functions
2636  */
2637 
bnxt_free_hwrm_resources(struct bnxt * bp)2638 void bnxt_free_hwrm_resources(struct bnxt *bp)
2639 {
2640 	/* Release memzone */
2641 	rte_free(bp->hwrm_cmd_resp_addr);
2642 	rte_free(bp->hwrm_short_cmd_req_addr);
2643 	bp->hwrm_cmd_resp_addr = NULL;
2644 	bp->hwrm_short_cmd_req_addr = NULL;
2645 	bp->hwrm_cmd_resp_dma_addr = 0;
2646 	bp->hwrm_short_cmd_req_dma_addr = 0;
2647 }
2648 
bnxt_alloc_hwrm_resources(struct bnxt * bp)2649 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2650 {
2651 	struct rte_pci_device *pdev = bp->pdev;
2652 	char type[RTE_MEMZONE_NAMESIZE];
2653 
2654 	sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2655 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2656 	bp->max_resp_len = HWRM_MAX_RESP_LEN;
2657 	bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2658 	if (bp->hwrm_cmd_resp_addr == NULL)
2659 		return -ENOMEM;
2660 	bp->hwrm_cmd_resp_dma_addr =
2661 		rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2662 	if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2663 		PMD_DRV_LOG(ERR,
2664 			"unable to map response address to physical memory\n");
2665 		return -ENOMEM;
2666 	}
2667 	rte_spinlock_init(&bp->hwrm_lock);
2668 
2669 	return 0;
2670 }
2671 
2672 int
bnxt_clear_one_vnic_filter(struct bnxt * bp,struct bnxt_filter_info * filter)2673 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2674 {
2675 	int rc = 0;
2676 
2677 	if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2678 		rc = bnxt_hwrm_clear_em_filter(bp, filter);
2679 		if (rc)
2680 			return rc;
2681 	} else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2682 		rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2683 		if (rc)
2684 			return rc;
2685 	}
2686 
2687 	rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2688 	return rc;
2689 }
2690 
2691 static int
bnxt_clear_hwrm_vnic_filters(struct bnxt * bp,struct bnxt_vnic_info * vnic)2692 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2693 {
2694 	struct bnxt_filter_info *filter;
2695 	int rc = 0;
2696 
2697 	STAILQ_FOREACH(filter, &vnic->filter, next) {
2698 		rc = bnxt_clear_one_vnic_filter(bp, filter);
2699 		STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2700 		bnxt_free_filter(bp, filter);
2701 	}
2702 	return rc;
2703 }
2704 
2705 static int
bnxt_clear_hwrm_vnic_flows(struct bnxt * bp,struct bnxt_vnic_info * vnic)2706 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2707 {
2708 	struct bnxt_filter_info *filter;
2709 	struct rte_flow *flow;
2710 	int rc = 0;
2711 
2712 	while (!STAILQ_EMPTY(&vnic->flow_list)) {
2713 		flow = STAILQ_FIRST(&vnic->flow_list);
2714 		filter = flow->filter;
2715 		PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2716 		rc = bnxt_clear_one_vnic_filter(bp, filter);
2717 
2718 		STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2719 		rte_free(flow);
2720 	}
2721 	return rc;
2722 }
2723 
bnxt_set_hwrm_vnic_filters(struct bnxt * bp,struct bnxt_vnic_info * vnic)2724 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2725 {
2726 	struct bnxt_filter_info *filter;
2727 	int rc = 0;
2728 
2729 	STAILQ_FOREACH(filter, &vnic->filter, next) {
2730 		if (filter->filter_type == HWRM_CFA_EM_FILTER)
2731 			rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2732 						     filter);
2733 		else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2734 			rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2735 							 filter);
2736 		else
2737 			rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2738 						     filter);
2739 		if (rc)
2740 			break;
2741 	}
2742 	return rc;
2743 }
2744 
2745 static void
bnxt_free_tunnel_ports(struct bnxt * bp)2746 bnxt_free_tunnel_ports(struct bnxt *bp)
2747 {
2748 	if (bp->vxlan_port_cnt)
2749 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2750 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2751 
2752 	if (bp->geneve_port_cnt)
2753 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2754 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2755 }
2756 
bnxt_free_all_hwrm_resources(struct bnxt * bp)2757 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2758 {
2759 	int i;
2760 
2761 	if (bp->vnic_info == NULL)
2762 		return;
2763 
2764 	/*
2765 	 * Cleanup VNICs in reverse order, to make sure the L2 filter
2766 	 * from vnic0 is last to be cleaned up.
2767 	 */
2768 	for (i = bp->max_vnics - 1; i >= 0; i--) {
2769 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2770 
2771 		if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2772 			continue;
2773 
2774 		bnxt_clear_hwrm_vnic_flows(bp, vnic);
2775 
2776 		bnxt_clear_hwrm_vnic_filters(bp, vnic);
2777 
2778 		bnxt_hwrm_vnic_ctx_free(bp, vnic);
2779 
2780 		bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2781 
2782 		bnxt_hwrm_vnic_free(bp, vnic);
2783 
2784 		rte_free(vnic->fw_grp_ids);
2785 	}
2786 	/* Ring resources */
2787 	bnxt_free_all_hwrm_rings(bp);
2788 	bnxt_free_all_hwrm_ring_grps(bp);
2789 	bnxt_free_all_hwrm_stat_ctxs(bp);
2790 	bnxt_free_tunnel_ports(bp);
2791 }
2792 
bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)2793 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2794 {
2795 	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2796 
2797 	if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2798 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2799 
2800 	switch (conf_link_speed) {
2801 	case ETH_LINK_SPEED_10M_HD:
2802 	case ETH_LINK_SPEED_100M_HD:
2803 		/* FALLTHROUGH */
2804 		return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2805 	}
2806 	return hw_link_duplex;
2807 }
2808 
bnxt_check_eth_link_autoneg(uint32_t conf_link)2809 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2810 {
2811 	return !conf_link;
2812 }
2813 
bnxt_parse_eth_link_speed(uint32_t conf_link_speed,uint16_t pam4_link)2814 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2815 					  uint16_t pam4_link)
2816 {
2817 	uint16_t eth_link_speed = 0;
2818 
2819 	if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2820 		return ETH_LINK_SPEED_AUTONEG;
2821 
2822 	switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2823 	case ETH_LINK_SPEED_100M:
2824 	case ETH_LINK_SPEED_100M_HD:
2825 		/* FALLTHROUGH */
2826 		eth_link_speed =
2827 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2828 		break;
2829 	case ETH_LINK_SPEED_1G:
2830 		eth_link_speed =
2831 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2832 		break;
2833 	case ETH_LINK_SPEED_2_5G:
2834 		eth_link_speed =
2835 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2836 		break;
2837 	case ETH_LINK_SPEED_10G:
2838 		eth_link_speed =
2839 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2840 		break;
2841 	case ETH_LINK_SPEED_20G:
2842 		eth_link_speed =
2843 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2844 		break;
2845 	case ETH_LINK_SPEED_25G:
2846 		eth_link_speed =
2847 			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2848 		break;
2849 	case ETH_LINK_SPEED_40G:
2850 		eth_link_speed =
2851 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2852 		break;
2853 	case ETH_LINK_SPEED_50G:
2854 		eth_link_speed = pam4_link ?
2855 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
2856 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2857 		break;
2858 	case ETH_LINK_SPEED_100G:
2859 		eth_link_speed = pam4_link ?
2860 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
2861 			HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2862 		break;
2863 	case ETH_LINK_SPEED_200G:
2864 		eth_link_speed =
2865 			HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2866 		break;
2867 	default:
2868 		PMD_DRV_LOG(ERR,
2869 			"Unsupported link speed %d; default to AUTO\n",
2870 			conf_link_speed);
2871 		break;
2872 	}
2873 	return eth_link_speed;
2874 }
2875 
2876 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2877 		ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2878 		ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2879 		ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
2880 		ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
2881 
bnxt_validate_link_speed(struct bnxt * bp)2882 static int bnxt_validate_link_speed(struct bnxt *bp)
2883 {
2884 	uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
2885 	uint16_t port_id = bp->eth_dev->data->port_id;
2886 	uint32_t link_speed_capa;
2887 	uint32_t one_speed;
2888 
2889 	if (link_speed == ETH_LINK_SPEED_AUTONEG)
2890 		return 0;
2891 
2892 	link_speed_capa = bnxt_get_speed_capabilities(bp);
2893 
2894 	if (link_speed & ETH_LINK_SPEED_FIXED) {
2895 		one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2896 
2897 		if (one_speed & (one_speed - 1)) {
2898 			PMD_DRV_LOG(ERR,
2899 				"Invalid advertised speeds (%u) for port %u\n",
2900 				link_speed, port_id);
2901 			return -EINVAL;
2902 		}
2903 		if ((one_speed & link_speed_capa) != one_speed) {
2904 			PMD_DRV_LOG(ERR,
2905 				"Unsupported advertised speed (%u) for port %u\n",
2906 				link_speed, port_id);
2907 			return -EINVAL;
2908 		}
2909 	} else {
2910 		if (!(link_speed & link_speed_capa)) {
2911 			PMD_DRV_LOG(ERR,
2912 				"Unsupported advertised speeds (%u) for port %u\n",
2913 				link_speed, port_id);
2914 			return -EINVAL;
2915 		}
2916 	}
2917 	return 0;
2918 }
2919 
2920 static uint16_t
bnxt_parse_eth_link_speed_mask(struct bnxt * bp,uint32_t link_speed)2921 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2922 {
2923 	uint16_t ret = 0;
2924 
2925 	if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2926 		if (bp->link_info->support_speeds)
2927 			return bp->link_info->support_speeds;
2928 		link_speed = BNXT_SUPPORTED_SPEEDS;
2929 	}
2930 
2931 	if (link_speed & ETH_LINK_SPEED_100M)
2932 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2933 	if (link_speed & ETH_LINK_SPEED_100M_HD)
2934 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2935 	if (link_speed & ETH_LINK_SPEED_1G)
2936 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2937 	if (link_speed & ETH_LINK_SPEED_2_5G)
2938 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2939 	if (link_speed & ETH_LINK_SPEED_10G)
2940 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2941 	if (link_speed & ETH_LINK_SPEED_20G)
2942 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2943 	if (link_speed & ETH_LINK_SPEED_25G)
2944 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2945 	if (link_speed & ETH_LINK_SPEED_40G)
2946 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2947 	if (link_speed & ETH_LINK_SPEED_50G)
2948 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2949 	if (link_speed & ETH_LINK_SPEED_100G)
2950 		ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2951 	if (link_speed & ETH_LINK_SPEED_200G)
2952 		ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2953 	return ret;
2954 }
2955 
bnxt_parse_hw_link_speed(uint16_t hw_link_speed)2956 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2957 {
2958 	uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2959 
2960 	switch (hw_link_speed) {
2961 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2962 		eth_link_speed = ETH_SPEED_NUM_100M;
2963 		break;
2964 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2965 		eth_link_speed = ETH_SPEED_NUM_1G;
2966 		break;
2967 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2968 		eth_link_speed = ETH_SPEED_NUM_2_5G;
2969 		break;
2970 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2971 		eth_link_speed = ETH_SPEED_NUM_10G;
2972 		break;
2973 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2974 		eth_link_speed = ETH_SPEED_NUM_20G;
2975 		break;
2976 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2977 		eth_link_speed = ETH_SPEED_NUM_25G;
2978 		break;
2979 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2980 		eth_link_speed = ETH_SPEED_NUM_40G;
2981 		break;
2982 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2983 		eth_link_speed = ETH_SPEED_NUM_50G;
2984 		break;
2985 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2986 		eth_link_speed = ETH_SPEED_NUM_100G;
2987 		break;
2988 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
2989 		eth_link_speed = ETH_SPEED_NUM_200G;
2990 		break;
2991 	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2992 	default:
2993 		PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2994 			hw_link_speed);
2995 		break;
2996 	}
2997 	return eth_link_speed;
2998 }
2999 
bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)3000 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
3001 {
3002 	uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3003 
3004 	switch (hw_link_duplex) {
3005 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3006 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3007 		/* FALLTHROUGH */
3008 		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3009 		break;
3010 	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3011 		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
3012 		break;
3013 	default:
3014 		PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3015 			hw_link_duplex);
3016 		break;
3017 	}
3018 	return eth_link_duplex;
3019 }
3020 
bnxt_get_hwrm_link_config(struct bnxt * bp,struct rte_eth_link * link)3021 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3022 {
3023 	int rc = 0;
3024 	struct bnxt_link_info *link_info = bp->link_info;
3025 
3026 	rc = bnxt_hwrm_port_phy_qcaps(bp);
3027 	if (rc)
3028 		PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3029 
3030 	rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3031 	if (rc) {
3032 		PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3033 		goto exit;
3034 	}
3035 
3036 	if (link_info->link_speed)
3037 		link->link_speed =
3038 			bnxt_parse_hw_link_speed(link_info->link_speed);
3039 	else
3040 		link->link_speed = ETH_SPEED_NUM_NONE;
3041 	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3042 	link->link_status = link_info->link_up;
3043 	link->link_autoneg = link_info->auto_mode ==
3044 		HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3045 		ETH_LINK_FIXED : ETH_LINK_AUTONEG;
3046 exit:
3047 	return rc;
3048 }
3049 
bnxt_set_hwrm_link_config(struct bnxt * bp,bool link_up)3050 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3051 {
3052 	int rc = 0;
3053 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3054 	struct bnxt_link_info link_req;
3055 	uint16_t speed, autoneg;
3056 
3057 	if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3058 		return 0;
3059 
3060 	rc = bnxt_validate_link_speed(bp);
3061 	if (rc)
3062 		goto error;
3063 
3064 	memset(&link_req, 0, sizeof(link_req));
3065 	link_req.link_up = link_up;
3066 	if (!link_up)
3067 		goto port_phy_cfg;
3068 
3069 	autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3070 	if (BNXT_CHIP_THOR(bp) &&
3071 	    dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3072 		/* 40G is not supported as part of media auto detect.
3073 		 * The speed should be forced and autoneg disabled
3074 		 * to configure 40G speed.
3075 		 */
3076 		PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3077 		autoneg = 0;
3078 	}
3079 
3080 	/* No auto speeds and no auto_pam4_link. Disable autoneg */
3081 	if (bp->link_info->auto_link_speed == 0 &&
3082 	    bp->link_info->link_signal_mode &&
3083 	    bp->link_info->auto_pam4_link_speeds == 0)
3084 		autoneg = 0;
3085 
3086 	speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3087 					  bp->link_info->link_signal_mode);
3088 	link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3089 	/* Autoneg can be done only when the FW allows.
3090 	 * When user configures fixed speed of 40G and later changes to
3091 	 * any other speed, auto_link_speed/force_link_speed is still set
3092 	 * to 40G until link comes up at new speed.
3093 	 */
3094 	if (autoneg == 1 &&
3095 	    !(!BNXT_CHIP_THOR(bp) &&
3096 	      (bp->link_info->auto_link_speed ||
3097 	       bp->link_info->force_link_speed))) {
3098 		link_req.phy_flags |=
3099 				HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3100 		link_req.auto_link_speed_mask =
3101 			bnxt_parse_eth_link_speed_mask(bp,
3102 						       dev_conf->link_speeds);
3103 	} else {
3104 		if (bp->link_info->phy_type ==
3105 		    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3106 		    bp->link_info->phy_type ==
3107 		    HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3108 		    bp->link_info->media_type ==
3109 		    HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3110 			PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3111 			return -EINVAL;
3112 		}
3113 
3114 		link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3115 		/* If user wants a particular speed try that first. */
3116 		if (speed)
3117 			link_req.link_speed = speed;
3118 		else if (bp->link_info->force_pam4_link_speed)
3119 			link_req.link_speed =
3120 				bp->link_info->force_pam4_link_speed;
3121 		else if (bp->link_info->auto_pam4_link_speeds)
3122 			link_req.link_speed =
3123 				bp->link_info->auto_pam4_link_speeds;
3124 		else if (bp->link_info->support_pam4_speeds)
3125 			link_req.link_speed =
3126 				bp->link_info->support_pam4_speeds;
3127 		else if (bp->link_info->force_link_speed)
3128 			link_req.link_speed = bp->link_info->force_link_speed;
3129 		else
3130 			link_req.link_speed = bp->link_info->auto_link_speed;
3131 		/* Auto PAM4 link speed is zero, but auto_link_speed is not
3132 		 * zero. Use the auto_link_speed.
3133 		 */
3134 		if (bp->link_info->auto_link_speed != 0 &&
3135 		    bp->link_info->auto_pam4_link_speeds == 0)
3136 			link_req.link_speed = bp->link_info->auto_link_speed;
3137 	}
3138 	link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3139 	link_req.auto_pause = bp->link_info->auto_pause;
3140 	link_req.force_pause = bp->link_info->force_pause;
3141 
3142 port_phy_cfg:
3143 	rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3144 	if (rc) {
3145 		PMD_DRV_LOG(ERR,
3146 			"Set link config failed with rc %d\n", rc);
3147 	}
3148 
3149 error:
3150 	return rc;
3151 }
3152 
3153 /* JIRA 22088 */
bnxt_hwrm_func_qcfg(struct bnxt * bp,uint16_t * mtu)3154 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3155 {
3156 	struct hwrm_func_qcfg_input req = {0};
3157 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3158 	uint16_t flags;
3159 	int rc = 0;
3160 	bp->func_svif = BNXT_SVIF_INVALID;
3161 	uint16_t svif_info;
3162 
3163 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3164 	req.fid = rte_cpu_to_le_16(0xffff);
3165 
3166 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3167 
3168 	HWRM_CHECK_RESULT();
3169 
3170 	/* Hard Coded.. 0xfff VLAN ID mask */
3171 	bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3172 
3173 	svif_info = rte_le_to_cpu_16(resp->svif_info);
3174 	if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3175 		bp->func_svif =	svif_info &
3176 				     HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3177 
3178 	flags = rte_le_to_cpu_16(resp->flags);
3179 	if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3180 		bp->flags |= BNXT_FLAG_MULTI_HOST;
3181 
3182 	if (BNXT_VF(bp) &&
3183 	    !BNXT_VF_IS_TRUSTED(bp) &&
3184 	    (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3185 		bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3186 		PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3187 	} else if (BNXT_VF(bp) &&
3188 		   BNXT_VF_IS_TRUSTED(bp) &&
3189 		   !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3190 		bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3191 		PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3192 	}
3193 
3194 	if (mtu)
3195 		*mtu = rte_le_to_cpu_16(resp->mtu);
3196 
3197 	switch (resp->port_partition_type) {
3198 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3199 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3200 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3201 		/* FALLTHROUGH */
3202 		bp->flags |= BNXT_FLAG_NPAR_PF;
3203 		break;
3204 	default:
3205 		bp->flags &= ~BNXT_FLAG_NPAR_PF;
3206 		break;
3207 	}
3208 
3209 	HWRM_UNLOCK();
3210 
3211 	return rc;
3212 }
3213 
bnxt_hwrm_parent_pf_qcfg(struct bnxt * bp)3214 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3215 {
3216 	struct hwrm_func_qcfg_input req = {0};
3217 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3218 	int rc;
3219 
3220 	if (!BNXT_VF_IS_TRUSTED(bp))
3221 		return 0;
3222 
3223 	if (!bp->parent)
3224 		return -EINVAL;
3225 
3226 	bp->parent->fid = BNXT_PF_FID_INVALID;
3227 
3228 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3229 
3230 	req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3231 
3232 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3233 
3234 	HWRM_CHECK_RESULT();
3235 
3236 	memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3237 	bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3238 	bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3239 	bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3240 
3241 	/* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3242 	if (bp->parent->vnic == 0) {
3243 		PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
3244 		/* Use hard-coded values appropriate for current Wh+ fw. */
3245 		if (bp->parent->fid == 2)
3246 			bp->parent->vnic = 0x100;
3247 		else
3248 			bp->parent->vnic = 1;
3249 	}
3250 
3251 	HWRM_UNLOCK();
3252 
3253 	return 0;
3254 }
3255 
bnxt_hwrm_get_dflt_vnic_svif(struct bnxt * bp,uint16_t fid,uint16_t * vnic_id,uint16_t * svif)3256 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3257 				 uint16_t *vnic_id, uint16_t *svif)
3258 {
3259 	struct hwrm_func_qcfg_input req = {0};
3260 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3261 	uint16_t svif_info;
3262 	int rc = 0;
3263 
3264 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3265 	req.fid = rte_cpu_to_le_16(fid);
3266 
3267 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3268 
3269 	HWRM_CHECK_RESULT();
3270 
3271 	if (vnic_id)
3272 		*vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3273 
3274 	svif_info = rte_le_to_cpu_16(resp->svif_info);
3275 	if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3276 		*svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3277 
3278 	HWRM_UNLOCK();
3279 
3280 	return rc;
3281 }
3282 
bnxt_hwrm_port_mac_qcfg(struct bnxt * bp)3283 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3284 {
3285 	struct hwrm_port_mac_qcfg_input req = {0};
3286 	struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3287 	uint16_t port_svif_info;
3288 	int rc;
3289 
3290 	bp->port_svif = BNXT_SVIF_INVALID;
3291 
3292 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3293 		return 0;
3294 
3295 	HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3296 
3297 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3298 
3299 	HWRM_CHECK_RESULT_SILENT();
3300 
3301 	port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3302 	if (port_svif_info &
3303 	    HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3304 		bp->port_svif = port_svif_info &
3305 			HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3306 
3307 	HWRM_UNLOCK();
3308 
3309 	return 0;
3310 }
3311 
bnxt_hwrm_pf_func_cfg(struct bnxt * bp,struct bnxt_pf_resource_info * pf_resc)3312 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
3313 				 struct bnxt_pf_resource_info *pf_resc)
3314 {
3315 	struct hwrm_func_cfg_input req = {0};
3316 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3317 	uint32_t enables;
3318 	int rc;
3319 
3320 	enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3321 		  HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3322 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3323 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3324 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3325 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3326 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3327 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3328 		  HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3329 
3330 	if (BNXT_HAS_RING_GRPS(bp)) {
3331 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3332 		req.num_hw_ring_grps =
3333 			rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
3334 	} else if (BNXT_HAS_NQ(bp)) {
3335 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3336 		req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3337 	}
3338 
3339 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3340 	req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3341 	req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3342 	req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
3343 	req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
3344 	req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
3345 	req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
3346 	req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
3347 	req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
3348 	req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3349 	req.fid = rte_cpu_to_le_16(0xffff);
3350 	req.enables = rte_cpu_to_le_32(enables);
3351 
3352 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3353 
3354 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3355 
3356 	HWRM_CHECK_RESULT();
3357 	HWRM_UNLOCK();
3358 
3359 	return rc;
3360 }
3361 
3362 /* min values are the guaranteed resources and max values are subject
3363  * to availability. The strategy for now is to keep both min & max
3364  * values the same.
3365  */
3366 static void
bnxt_fill_vf_func_cfg_req_new(struct bnxt * bp,struct hwrm_func_vf_resource_cfg_input * req,int num_vfs)3367 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
3368 			      struct hwrm_func_vf_resource_cfg_input *req,
3369 			      int num_vfs)
3370 {
3371 	req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3372 					       (num_vfs + 1));
3373 	req->min_rsscos_ctx = req->max_rsscos_ctx;
3374 	req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3375 	req->min_stat_ctx = req->max_stat_ctx;
3376 	req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3377 					       (num_vfs + 1));
3378 	req->min_cmpl_rings = req->max_cmpl_rings;
3379 	req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3380 	req->min_tx_rings = req->max_tx_rings;
3381 	req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3382 	req->min_rx_rings = req->max_rx_rings;
3383 	req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3384 	req->min_l2_ctxs = req->max_l2_ctxs;
3385 	/* TODO: For now, do not support VMDq/RFS on VFs. */
3386 	req->max_vnics = rte_cpu_to_le_16(1);
3387 	req->min_vnics = req->max_vnics;
3388 	req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3389 						 (num_vfs + 1));
3390 	req->min_hw_ring_grps = req->max_hw_ring_grps;
3391 	req->flags =
3392 	 rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
3393 }
3394 
3395 static void
bnxt_fill_vf_func_cfg_req_old(struct bnxt * bp,struct hwrm_func_cfg_input * req,int num_vfs)3396 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
3397 			      struct hwrm_func_cfg_input *req,
3398 			      int num_vfs)
3399 {
3400 	req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3401 			HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3402 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3403 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3404 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3405 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3406 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3407 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3408 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3409 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3410 
3411 	req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3412 				    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3413 				    BNXT_NUM_VLANS);
3414 	req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3415 	req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3416 						(num_vfs + 1));
3417 	req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3418 	req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3419 					       (num_vfs + 1));
3420 	req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3421 	req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3422 	req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3423 	/* TODO: For now, do not support VMDq/RFS on VFs. */
3424 	req->num_vnics = rte_cpu_to_le_16(1);
3425 	req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3426 						 (num_vfs + 1));
3427 }
3428 
3429 /* Update the port wide resource values based on how many resources
3430  * got allocated to the VF.
3431  */
bnxt_update_max_resources(struct bnxt * bp,int vf)3432 static int bnxt_update_max_resources(struct bnxt *bp,
3433 				     int vf)
3434 {
3435 	struct hwrm_func_qcfg_input req = {0};
3436 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3437 	int rc;
3438 
3439 	/* Get the actual allocated values now */
3440 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3441 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3442 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3443 	HWRM_CHECK_RESULT();
3444 
3445 	bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3446 	bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
3447 	bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3448 	bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
3449 	bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
3450 	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
3451 	bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3452 
3453 	HWRM_UNLOCK();
3454 
3455 	return 0;
3456 }
3457 
bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt * bp,int vf)3458 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3459 {
3460 	struct hwrm_func_qcfg_input req = {0};
3461 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3462 	int rc;
3463 
3464 	/* Check for zero MAC address */
3465 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3466 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3467 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3468 	HWRM_CHECK_RESULT();
3469 	rc = rte_le_to_cpu_16(resp->vlan);
3470 
3471 	HWRM_UNLOCK();
3472 
3473 	return rc;
3474 }
3475 
bnxt_query_pf_resources(struct bnxt * bp,struct bnxt_pf_resource_info * pf_resc)3476 static int bnxt_query_pf_resources(struct bnxt *bp,
3477 				   struct bnxt_pf_resource_info *pf_resc)
3478 {
3479 	struct hwrm_func_qcfg_input req = {0};
3480 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3481 	int rc;
3482 
3483 	/* And copy the allocated numbers into the pf struct */
3484 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3485 	req.fid = rte_cpu_to_le_16(0xffff);
3486 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3487 	HWRM_CHECK_RESULT();
3488 
3489 	pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3490 	pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3491 	pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3492 	pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3493 	pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3494 	pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3495 	pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
3496 	bp->pf->evb_mode = resp->evb_mode;
3497 
3498 	HWRM_UNLOCK();
3499 
3500 	return rc;
3501 }
3502 
3503 static void
bnxt_calculate_pf_resources(struct bnxt * bp,struct bnxt_pf_resource_info * pf_resc,int num_vfs)3504 bnxt_calculate_pf_resources(struct bnxt *bp,
3505 			    struct bnxt_pf_resource_info *pf_resc,
3506 			    int num_vfs)
3507 {
3508 	if (!num_vfs) {
3509 		pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
3510 		pf_resc->num_stat_ctxs = bp->max_stat_ctx;
3511 		pf_resc->num_cp_rings = bp->max_cp_rings;
3512 		pf_resc->num_tx_rings = bp->max_tx_rings;
3513 		pf_resc->num_rx_rings = bp->max_rx_rings;
3514 		pf_resc->num_l2_ctxs = bp->max_l2_ctx;
3515 		pf_resc->num_hw_ring_grps = bp->max_ring_grps;
3516 
3517 		return;
3518 	}
3519 
3520 	pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
3521 				   bp->max_rsscos_ctx % (num_vfs + 1);
3522 	pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
3523 				 bp->max_stat_ctx % (num_vfs + 1);
3524 	pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
3525 				bp->max_cp_rings % (num_vfs + 1);
3526 	pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
3527 				bp->max_tx_rings % (num_vfs + 1);
3528 	pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
3529 				bp->max_rx_rings % (num_vfs + 1);
3530 	pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
3531 			       bp->max_l2_ctx % (num_vfs + 1);
3532 	pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
3533 				    bp->max_ring_grps % (num_vfs + 1);
3534 }
3535 
bnxt_hwrm_allocate_pf_only(struct bnxt * bp)3536 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3537 {
3538 	struct bnxt_pf_resource_info pf_resc = { 0 };
3539 	int rc;
3540 
3541 	if (!BNXT_PF(bp)) {
3542 		PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3543 		return -EINVAL;
3544 	}
3545 
3546 	rc = bnxt_hwrm_func_qcaps(bp);
3547 	if (rc)
3548 		return rc;
3549 
3550 	bnxt_calculate_pf_resources(bp, &pf_resc, 0);
3551 
3552 	bp->pf->func_cfg_flags &=
3553 		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3554 		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3555 	bp->pf->func_cfg_flags |=
3556 		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3557 	rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
3558 	rc = __bnxt_hwrm_func_qcaps(bp);
3559 	return rc;
3560 }
3561 
3562 static int
bnxt_configure_vf_req_buf(struct bnxt * bp,int num_vfs)3563 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
3564 {
3565 	size_t req_buf_sz, sz;
3566 	int i, rc;
3567 
3568 	req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3569 	bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3570 		page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3571 	if (bp->pf->vf_req_buf == NULL) {
3572 		return -ENOMEM;
3573 	}
3574 
3575 	for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3576 		rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3577 
3578 	for (i = 0; i < num_vfs; i++)
3579 		bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3580 					     (i * HWRM_MAX_REQ_LEN);
3581 
3582 	rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
3583 	if (rc)
3584 		rte_free(bp->pf->vf_req_buf);
3585 
3586 	return rc;
3587 }
3588 
3589 static int
bnxt_process_vf_resc_config_new(struct bnxt * bp,int num_vfs)3590 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
3591 {
3592 	struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3593 	struct hwrm_func_vf_resource_cfg_input req = {0};
3594 	int i, rc = 0;
3595 
3596 	bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
3597 	bp->pf->active_vfs = 0;
3598 	for (i = 0; i < num_vfs; i++) {
3599 		HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
3600 		req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3601 		rc = bnxt_hwrm_send_message(bp,
3602 					    &req,
3603 					    sizeof(req),
3604 					    BNXT_USE_CHIMP_MB);
3605 		if (rc || resp->error_code) {
3606 			PMD_DRV_LOG(ERR,
3607 				"Failed to initialize VF %d\n", i);
3608 			PMD_DRV_LOG(ERR,
3609 				"Not all VFs available. (%d, %d)\n",
3610 				rc, resp->error_code);
3611 			HWRM_UNLOCK();
3612 
3613 			/* If the first VF configuration itself fails,
3614 			 * unregister the vf_fwd_request buffer.
3615 			 */
3616 			if (i == 0)
3617 				bnxt_hwrm_func_buf_unrgtr(bp);
3618 			break;
3619 		}
3620 		HWRM_UNLOCK();
3621 
3622 		/* Update the max resource values based on the resource values
3623 		 * allocated to the VF.
3624 		 */
3625 		bnxt_update_max_resources(bp, i);
3626 		bp->pf->active_vfs++;
3627 		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3628 	}
3629 
3630 	return 0;
3631 }
3632 
3633 static int
bnxt_process_vf_resc_config_old(struct bnxt * bp,int num_vfs)3634 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
3635 {
3636 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3637 	struct hwrm_func_cfg_input req = {0};
3638 	int i, rc;
3639 
3640 	bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
3641 
3642 	bp->pf->active_vfs = 0;
3643 	for (i = 0; i < num_vfs; i++) {
3644 		HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3645 		req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3646 		req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3647 		rc = bnxt_hwrm_send_message(bp,
3648 					    &req,
3649 					    sizeof(req),
3650 					    BNXT_USE_CHIMP_MB);
3651 
3652 		/* Clear enable flag for next pass */
3653 		req.enables &= ~rte_cpu_to_le_32(
3654 				HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3655 
3656 		if (rc || resp->error_code) {
3657 			PMD_DRV_LOG(ERR,
3658 				"Failed to initialize VF %d\n", i);
3659 			PMD_DRV_LOG(ERR,
3660 				"Not all VFs available. (%d, %d)\n",
3661 				rc, resp->error_code);
3662 			HWRM_UNLOCK();
3663 
3664 			/* If the first VF configuration itself fails,
3665 			 * unregister the vf_fwd_request buffer.
3666 			 */
3667 			if (i == 0)
3668 				bnxt_hwrm_func_buf_unrgtr(bp);
3669 			break;
3670 		}
3671 
3672 		HWRM_UNLOCK();
3673 
3674 		/* Update the max resource values based on the resource values
3675 		 * allocated to the VF.
3676 		 */
3677 		bnxt_update_max_resources(bp, i);
3678 		bp->pf->active_vfs++;
3679 		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3680 	}
3681 
3682 	return 0;
3683 }
3684 
3685 static void
bnxt_configure_vf_resources(struct bnxt * bp,int num_vfs)3686 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
3687 {
3688 	if (bp->flags & BNXT_FLAG_NEW_RM)
3689 		bnxt_process_vf_resc_config_new(bp, num_vfs);
3690 	else
3691 		bnxt_process_vf_resc_config_old(bp, num_vfs);
3692 }
3693 
3694 static void
bnxt_update_pf_resources(struct bnxt * bp,struct bnxt_pf_resource_info * pf_resc)3695 bnxt_update_pf_resources(struct bnxt *bp,
3696 			 struct bnxt_pf_resource_info *pf_resc)
3697 {
3698 	bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
3699 	bp->max_stat_ctx = pf_resc->num_stat_ctxs;
3700 	bp->max_cp_rings = pf_resc->num_cp_rings;
3701 	bp->max_tx_rings = pf_resc->num_tx_rings;
3702 	bp->max_rx_rings = pf_resc->num_rx_rings;
3703 	bp->max_ring_grps = pf_resc->num_hw_ring_grps;
3704 }
3705 
3706 static int32_t
bnxt_configure_pf_resources(struct bnxt * bp,struct bnxt_pf_resource_info * pf_resc)3707 bnxt_configure_pf_resources(struct bnxt *bp,
3708 			    struct bnxt_pf_resource_info *pf_resc)
3709 {
3710 	/*
3711 	 * We're using STD_TX_RING_MODE here which will limit the TX
3712 	 * rings. This will allow QoS to function properly. Not setting this
3713 	 * will cause PF rings to break bandwidth settings.
3714 	 */
3715 	bp->pf->func_cfg_flags &=
3716 		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3717 		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3718 	bp->pf->func_cfg_flags |=
3719 		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3720 	return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
3721 }
3722 
bnxt_hwrm_allocate_vfs(struct bnxt * bp,int num_vfs)3723 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3724 {
3725 	struct bnxt_pf_resource_info pf_resc = { 0 };
3726 	int rc;
3727 
3728 	if (!BNXT_PF(bp)) {
3729 		PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3730 		return -EINVAL;
3731 	}
3732 
3733 	rc = bnxt_hwrm_func_qcaps(bp);
3734 	if (rc)
3735 		return rc;
3736 
3737 	bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
3738 
3739 	rc = bnxt_configure_pf_resources(bp, &pf_resc);
3740 	if (rc)
3741 		return rc;
3742 
3743 	rc = bnxt_query_pf_resources(bp, &pf_resc);
3744 	if (rc)
3745 		return rc;
3746 
3747 	/*
3748 	 * Now, create and register a buffer to hold forwarded VF requests
3749 	 */
3750 	rc = bnxt_configure_vf_req_buf(bp, num_vfs);
3751 	if (rc)
3752 		return rc;
3753 
3754 	bnxt_configure_vf_resources(bp, num_vfs);
3755 
3756 	bnxt_update_pf_resources(bp, &pf_resc);
3757 
3758 	return 0;
3759 }
3760 
bnxt_hwrm_pf_evb_mode(struct bnxt * bp)3761 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3762 {
3763 	struct hwrm_func_cfg_input req = {0};
3764 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3765 	int rc;
3766 
3767 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3768 
3769 	req.fid = rte_cpu_to_le_16(0xffff);
3770 	req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3771 	req.evb_mode = bp->pf->evb_mode;
3772 
3773 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3774 	HWRM_CHECK_RESULT();
3775 	HWRM_UNLOCK();
3776 
3777 	return rc;
3778 }
3779 
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,uint16_t port,uint8_t tunnel_type)3780 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3781 				uint8_t tunnel_type)
3782 {
3783 	struct hwrm_tunnel_dst_port_alloc_input req = {0};
3784 	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3785 	int rc = 0;
3786 
3787 	HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3788 	req.tunnel_type = tunnel_type;
3789 	req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3790 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3791 	HWRM_CHECK_RESULT();
3792 
3793 	switch (tunnel_type) {
3794 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3795 		bp->vxlan_fw_dst_port_id =
3796 			rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3797 		bp->vxlan_port = port;
3798 		break;
3799 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3800 		bp->geneve_fw_dst_port_id =
3801 			rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3802 		bp->geneve_port = port;
3803 		break;
3804 	default:
3805 		break;
3806 	}
3807 
3808 	HWRM_UNLOCK();
3809 
3810 	return rc;
3811 }
3812 
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,uint16_t port,uint8_t tunnel_type)3813 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3814 				uint8_t tunnel_type)
3815 {
3816 	struct hwrm_tunnel_dst_port_free_input req = {0};
3817 	struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3818 	int rc = 0;
3819 
3820 	HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3821 
3822 	req.tunnel_type = tunnel_type;
3823 	req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3824 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3825 
3826 	HWRM_CHECK_RESULT();
3827 	HWRM_UNLOCK();
3828 
3829 	if (tunnel_type ==
3830 	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
3831 		bp->vxlan_port = 0;
3832 		bp->vxlan_port_cnt = 0;
3833 	}
3834 
3835 	if (tunnel_type ==
3836 	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
3837 		bp->geneve_port = 0;
3838 		bp->geneve_port_cnt = 0;
3839 	}
3840 
3841 	return rc;
3842 }
3843 
bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt * bp,uint16_t vf,uint32_t flags)3844 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3845 					uint32_t flags)
3846 {
3847 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3848 	struct hwrm_func_cfg_input req = {0};
3849 	int rc;
3850 
3851 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3852 
3853 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3854 	req.flags = rte_cpu_to_le_32(flags);
3855 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3856 
3857 	HWRM_CHECK_RESULT();
3858 	HWRM_UNLOCK();
3859 
3860 	return rc;
3861 }
3862 
vf_vnic_set_rxmask_cb(struct bnxt_vnic_info * vnic,void * flagp)3863 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3864 {
3865 	uint32_t *flag = flagp;
3866 
3867 	vnic->flags = *flag;
3868 }
3869 
bnxt_set_rx_mask_no_vlan(struct bnxt * bp,struct bnxt_vnic_info * vnic)3870 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3871 {
3872 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3873 }
3874 
bnxt_hwrm_func_buf_rgtr(struct bnxt * bp,int num_vfs)3875 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
3876 {
3877 	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3878 	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3879 	int rc;
3880 
3881 	HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3882 
3883 	req.req_buf_num_pages = rte_cpu_to_le_16(1);
3884 	req.req_buf_page_size =
3885 		rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
3886 	req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3887 	req.req_buf_page_addr0 =
3888 		rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
3889 	if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3890 		PMD_DRV_LOG(ERR,
3891 			"unable to map buffer address to physical memory\n");
3892 		HWRM_UNLOCK();
3893 		return -ENOMEM;
3894 	}
3895 
3896 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3897 
3898 	HWRM_CHECK_RESULT();
3899 	HWRM_UNLOCK();
3900 
3901 	return rc;
3902 }
3903 
bnxt_hwrm_func_buf_unrgtr(struct bnxt * bp)3904 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3905 {
3906 	int rc = 0;
3907 	struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3908 	struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3909 
3910 	if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3911 		return 0;
3912 
3913 	HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3914 
3915 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3916 
3917 	HWRM_CHECK_RESULT();
3918 	HWRM_UNLOCK();
3919 
3920 	return rc;
3921 }
3922 
bnxt_hwrm_func_cfg_def_cp(struct bnxt * bp)3923 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3924 {
3925 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3926 	struct hwrm_func_cfg_input req = {0};
3927 	int rc;
3928 
3929 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3930 
3931 	req.fid = rte_cpu_to_le_16(0xffff);
3932 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3933 	req.enables = rte_cpu_to_le_32(
3934 			HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3935 	req.async_event_cr = rte_cpu_to_le_16(
3936 			bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3937 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3938 
3939 	HWRM_CHECK_RESULT();
3940 	HWRM_UNLOCK();
3941 
3942 	return rc;
3943 }
3944 
bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt * bp)3945 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3946 {
3947 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3948 	struct hwrm_func_vf_cfg_input req = {0};
3949 	int rc;
3950 
3951 	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3952 
3953 	req.enables = rte_cpu_to_le_32(
3954 			HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3955 	req.async_event_cr = rte_cpu_to_le_16(
3956 			bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3957 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3958 
3959 	HWRM_CHECK_RESULT();
3960 	HWRM_UNLOCK();
3961 
3962 	return rc;
3963 }
3964 
bnxt_hwrm_set_default_vlan(struct bnxt * bp,int vf,uint8_t is_vf)3965 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3966 {
3967 	struct hwrm_func_cfg_input req = {0};
3968 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3969 	uint16_t dflt_vlan, fid;
3970 	uint32_t func_cfg_flags;
3971 	int rc = 0;
3972 
3973 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3974 
3975 	if (is_vf) {
3976 		dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
3977 		fid = bp->pf->vf_info[vf].fid;
3978 		func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
3979 	} else {
3980 		fid = rte_cpu_to_le_16(0xffff);
3981 		func_cfg_flags = bp->pf->func_cfg_flags;
3982 		dflt_vlan = bp->vlan;
3983 	}
3984 
3985 	req.flags = rte_cpu_to_le_32(func_cfg_flags);
3986 	req.fid = rte_cpu_to_le_16(fid);
3987 	req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3988 	req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3989 
3990 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3991 
3992 	HWRM_CHECK_RESULT();
3993 	HWRM_UNLOCK();
3994 
3995 	return rc;
3996 }
3997 
bnxt_hwrm_func_bw_cfg(struct bnxt * bp,uint16_t vf,uint16_t max_bw,uint16_t enables)3998 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3999 			uint16_t max_bw, uint16_t enables)
4000 {
4001 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4002 	struct hwrm_func_cfg_input req = {0};
4003 	int rc;
4004 
4005 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4006 
4007 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4008 	req.enables |= rte_cpu_to_le_32(enables);
4009 	req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4010 	req.max_bw = rte_cpu_to_le_32(max_bw);
4011 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4012 
4013 	HWRM_CHECK_RESULT();
4014 	HWRM_UNLOCK();
4015 
4016 	return rc;
4017 }
4018 
bnxt_hwrm_set_vf_vlan(struct bnxt * bp,int vf)4019 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
4020 {
4021 	struct hwrm_func_cfg_input req = {0};
4022 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4023 	int rc = 0;
4024 
4025 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4026 
4027 	req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4028 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4029 	req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4030 	req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
4031 
4032 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4033 
4034 	HWRM_CHECK_RESULT();
4035 	HWRM_UNLOCK();
4036 
4037 	return rc;
4038 }
4039 
bnxt_hwrm_set_async_event_cr(struct bnxt * bp)4040 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
4041 {
4042 	int rc;
4043 
4044 	if (BNXT_PF(bp))
4045 		rc = bnxt_hwrm_func_cfg_def_cp(bp);
4046 	else
4047 		rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
4048 
4049 	return rc;
4050 }
4051 
bnxt_hwrm_reject_fwd_resp(struct bnxt * bp,uint16_t target_id,void * encaped,size_t ec_size)4052 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
4053 			      void *encaped, size_t ec_size)
4054 {
4055 	int rc = 0;
4056 	struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
4057 	struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4058 
4059 	if (ec_size > sizeof(req.encap_request))
4060 		return -1;
4061 
4062 	HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
4063 
4064 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4065 	memcpy(req.encap_request, encaped, ec_size);
4066 
4067 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4068 
4069 	HWRM_CHECK_RESULT();
4070 	HWRM_UNLOCK();
4071 
4072 	return rc;
4073 }
4074 
bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt * bp,uint16_t vf,struct rte_ether_addr * mac)4075 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
4076 				       struct rte_ether_addr *mac)
4077 {
4078 	struct hwrm_func_qcfg_input req = {0};
4079 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4080 	int rc;
4081 
4082 	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4083 
4084 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4085 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4086 
4087 	HWRM_CHECK_RESULT();
4088 
4089 	memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
4090 
4091 	HWRM_UNLOCK();
4092 
4093 	return rc;
4094 }
4095 
bnxt_hwrm_exec_fwd_resp(struct bnxt * bp,uint16_t target_id,void * encaped,size_t ec_size)4096 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
4097 			    void *encaped, size_t ec_size)
4098 {
4099 	int rc = 0;
4100 	struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
4101 	struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4102 
4103 	if (ec_size > sizeof(req.encap_request))
4104 		return -1;
4105 
4106 	HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
4107 
4108 	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4109 	memcpy(req.encap_request, encaped, ec_size);
4110 
4111 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4112 
4113 	HWRM_CHECK_RESULT();
4114 	HWRM_UNLOCK();
4115 
4116 	return rc;
4117 }
4118 
bnxt_hwrm_ctx_qstats(struct bnxt * bp,uint32_t cid,int idx,struct rte_eth_stats * stats,uint8_t rx)4119 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
4120 			 struct rte_eth_stats *stats, uint8_t rx)
4121 {
4122 	int rc = 0;
4123 	struct hwrm_stat_ctx_query_input req = {.req_type = 0};
4124 	struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
4125 
4126 	HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
4127 
4128 	req.stat_ctx_id = rte_cpu_to_le_32(cid);
4129 
4130 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4131 
4132 	HWRM_CHECK_RESULT();
4133 
4134 	if (rx) {
4135 		stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4136 		stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
4137 		stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
4138 		stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4139 		stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
4140 		stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
4141 		stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
4142 		stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
4143 	} else {
4144 		stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4145 		stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
4146 		stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
4147 		stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4148 		stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
4149 		stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
4150 	}
4151 
4152 	HWRM_UNLOCK();
4153 
4154 	return rc;
4155 }
4156 
bnxt_hwrm_port_qstats(struct bnxt * bp)4157 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4158 {
4159 	struct hwrm_port_qstats_input req = {0};
4160 	struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4161 	struct bnxt_pf_info *pf = bp->pf;
4162 	int rc;
4163 
4164 	HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4165 
4166 	req.port_id = rte_cpu_to_le_16(pf->port_id);
4167 	req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4168 	req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4169 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4170 
4171 	HWRM_CHECK_RESULT();
4172 	HWRM_UNLOCK();
4173 
4174 	return rc;
4175 }
4176 
bnxt_hwrm_port_clr_stats(struct bnxt * bp)4177 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4178 {
4179 	struct hwrm_port_clr_stats_input req = {0};
4180 	struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4181 	struct bnxt_pf_info *pf = bp->pf;
4182 	int rc;
4183 
4184 	/* Not allowed on NS2 device, NPAR, MultiHost, VF */
4185 	if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4186 	    BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4187 		return 0;
4188 
4189 	HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4190 
4191 	req.port_id = rte_cpu_to_le_16(pf->port_id);
4192 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4193 
4194 	HWRM_CHECK_RESULT();
4195 	HWRM_UNLOCK();
4196 
4197 	return rc;
4198 }
4199 
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)4200 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4201 {
4202 	struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4203 	struct hwrm_port_led_qcaps_input req = {0};
4204 	int rc;
4205 
4206 	if (BNXT_VF(bp))
4207 		return 0;
4208 
4209 	HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4210 	req.port_id = bp->pf->port_id;
4211 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4212 
4213 	HWRM_CHECK_RESULT();
4214 
4215 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4216 		unsigned int i;
4217 
4218 		bp->leds->num_leds = resp->num_leds;
4219 		memcpy(bp->leds, &resp->led0_id,
4220 			sizeof(bp->leds[0]) * bp->leds->num_leds);
4221 		for (i = 0; i < bp->leds->num_leds; i++) {
4222 			struct bnxt_led_info *led = &bp->leds[i];
4223 
4224 			uint16_t caps = led->led_state_caps;
4225 
4226 			if (!led->led_group_id ||
4227 				!BNXT_LED_ALT_BLINK_CAP(caps)) {
4228 				bp->leds->num_leds = 0;
4229 				break;
4230 			}
4231 		}
4232 	}
4233 
4234 	HWRM_UNLOCK();
4235 
4236 	return rc;
4237 }
4238 
bnxt_hwrm_port_led_cfg(struct bnxt * bp,bool led_on)4239 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4240 {
4241 	struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4242 	struct hwrm_port_led_cfg_input req = {0};
4243 	struct bnxt_led_cfg *led_cfg;
4244 	uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4245 	uint16_t duration = 0;
4246 	int rc, i;
4247 
4248 	if (!bp->leds->num_leds || BNXT_VF(bp))
4249 		return -EOPNOTSUPP;
4250 
4251 	HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4252 
4253 	if (led_on) {
4254 		led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4255 		duration = rte_cpu_to_le_16(500);
4256 	}
4257 	req.port_id = bp->pf->port_id;
4258 	req.num_leds = bp->leds->num_leds;
4259 	led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4260 	for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4261 		req.enables |= BNXT_LED_DFLT_ENABLES(i);
4262 		led_cfg->led_id = bp->leds[i].led_id;
4263 		led_cfg->led_state = led_state;
4264 		led_cfg->led_blink_on = duration;
4265 		led_cfg->led_blink_off = duration;
4266 		led_cfg->led_group_id = bp->leds[i].led_group_id;
4267 	}
4268 
4269 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4270 
4271 	HWRM_CHECK_RESULT();
4272 	HWRM_UNLOCK();
4273 
4274 	return rc;
4275 }
4276 
bnxt_hwrm_nvm_get_dir_info(struct bnxt * bp,uint32_t * entries,uint32_t * length)4277 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4278 			       uint32_t *length)
4279 {
4280 	int rc;
4281 	struct hwrm_nvm_get_dir_info_input req = {0};
4282 	struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4283 
4284 	HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4285 
4286 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4287 
4288 	HWRM_CHECK_RESULT();
4289 
4290 	*entries = rte_le_to_cpu_32(resp->entries);
4291 	*length = rte_le_to_cpu_32(resp->entry_length);
4292 
4293 	HWRM_UNLOCK();
4294 	return rc;
4295 }
4296 
bnxt_get_nvram_directory(struct bnxt * bp,uint32_t len,uint8_t * data)4297 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4298 {
4299 	int rc;
4300 	uint32_t dir_entries;
4301 	uint32_t entry_length;
4302 	uint8_t *buf;
4303 	size_t buflen;
4304 	rte_iova_t dma_handle;
4305 	struct hwrm_nvm_get_dir_entries_input req = {0};
4306 	struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4307 
4308 	rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4309 	if (rc != 0)
4310 		return rc;
4311 
4312 	*data++ = dir_entries;
4313 	*data++ = entry_length;
4314 	len -= 2;
4315 	memset(data, 0xff, len);
4316 
4317 	buflen = dir_entries * entry_length;
4318 	buf = rte_malloc("nvm_dir", buflen, 0);
4319 	if (buf == NULL)
4320 		return -ENOMEM;
4321 	dma_handle = rte_malloc_virt2iova(buf);
4322 	if (dma_handle == RTE_BAD_IOVA) {
4323 		PMD_DRV_LOG(ERR,
4324 			"unable to map response address to physical memory\n");
4325 		return -ENOMEM;
4326 	}
4327 	HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4328 	req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4329 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4330 
4331 	if (rc == 0)
4332 		memcpy(data, buf, len > buflen ? buflen : len);
4333 
4334 	rte_free(buf);
4335 	HWRM_CHECK_RESULT();
4336 	HWRM_UNLOCK();
4337 
4338 	return rc;
4339 }
4340 
bnxt_hwrm_get_nvram_item(struct bnxt * bp,uint32_t index,uint32_t offset,uint32_t length,uint8_t * data)4341 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4342 			     uint32_t offset, uint32_t length,
4343 			     uint8_t *data)
4344 {
4345 	int rc;
4346 	uint8_t *buf;
4347 	rte_iova_t dma_handle;
4348 	struct hwrm_nvm_read_input req = {0};
4349 	struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4350 
4351 	buf = rte_malloc("nvm_item", length, 0);
4352 	if (!buf)
4353 		return -ENOMEM;
4354 
4355 	dma_handle = rte_malloc_virt2iova(buf);
4356 	if (dma_handle == RTE_BAD_IOVA) {
4357 		PMD_DRV_LOG(ERR,
4358 			"unable to map response address to physical memory\n");
4359 		return -ENOMEM;
4360 	}
4361 	HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4362 	req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4363 	req.dir_idx = rte_cpu_to_le_16(index);
4364 	req.offset = rte_cpu_to_le_32(offset);
4365 	req.len = rte_cpu_to_le_32(length);
4366 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4367 	if (rc == 0)
4368 		memcpy(data, buf, length);
4369 
4370 	rte_free(buf);
4371 	HWRM_CHECK_RESULT();
4372 	HWRM_UNLOCK();
4373 
4374 	return rc;
4375 }
4376 
bnxt_hwrm_erase_nvram_directory(struct bnxt * bp,uint8_t index)4377 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4378 {
4379 	int rc;
4380 	struct hwrm_nvm_erase_dir_entry_input req = {0};
4381 	struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4382 
4383 	HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4384 	req.dir_idx = rte_cpu_to_le_16(index);
4385 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4386 	HWRM_CHECK_RESULT();
4387 	HWRM_UNLOCK();
4388 
4389 	return rc;
4390 }
4391 
4392 
bnxt_hwrm_flash_nvram(struct bnxt * bp,uint16_t dir_type,uint16_t dir_ordinal,uint16_t dir_ext,uint16_t dir_attr,const uint8_t * data,size_t data_len)4393 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4394 			  uint16_t dir_ordinal, uint16_t dir_ext,
4395 			  uint16_t dir_attr, const uint8_t *data,
4396 			  size_t data_len)
4397 {
4398 	int rc;
4399 	struct hwrm_nvm_write_input req = {0};
4400 	struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4401 	rte_iova_t dma_handle;
4402 	uint8_t *buf;
4403 
4404 	buf = rte_malloc("nvm_write", data_len, 0);
4405 	if (!buf)
4406 		return -ENOMEM;
4407 
4408 	dma_handle = rte_malloc_virt2iova(buf);
4409 	if (dma_handle == RTE_BAD_IOVA) {
4410 		PMD_DRV_LOG(ERR,
4411 			"unable to map response address to physical memory\n");
4412 		return -ENOMEM;
4413 	}
4414 	memcpy(buf, data, data_len);
4415 
4416 	HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4417 
4418 	req.dir_type = rte_cpu_to_le_16(dir_type);
4419 	req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4420 	req.dir_ext = rte_cpu_to_le_16(dir_ext);
4421 	req.dir_attr = rte_cpu_to_le_16(dir_attr);
4422 	req.dir_data_length = rte_cpu_to_le_32(data_len);
4423 	req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4424 
4425 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4426 
4427 	rte_free(buf);
4428 	HWRM_CHECK_RESULT();
4429 	HWRM_UNLOCK();
4430 
4431 	return rc;
4432 }
4433 
4434 static void
bnxt_vnic_count(struct bnxt_vnic_info * vnic __rte_unused,void * cbdata)4435 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4436 {
4437 	uint32_t *count = cbdata;
4438 
4439 	*count = *count + 1;
4440 }
4441 
bnxt_vnic_count_hwrm_stub(struct bnxt * bp __rte_unused,struct bnxt_vnic_info * vnic __rte_unused)4442 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4443 				     struct bnxt_vnic_info *vnic __rte_unused)
4444 {
4445 	return 0;
4446 }
4447 
bnxt_vf_vnic_count(struct bnxt * bp,uint16_t vf)4448 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4449 {
4450 	uint32_t count = 0;
4451 
4452 	bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4453 	    &count, bnxt_vnic_count_hwrm_stub);
4454 
4455 	return count;
4456 }
4457 
bnxt_hwrm_func_vf_vnic_query(struct bnxt * bp,uint16_t vf,uint16_t * vnic_ids)4458 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4459 					uint16_t *vnic_ids)
4460 {
4461 	struct hwrm_func_vf_vnic_ids_query_input req = {0};
4462 	struct hwrm_func_vf_vnic_ids_query_output *resp =
4463 						bp->hwrm_cmd_resp_addr;
4464 	int rc;
4465 
4466 	/* First query all VNIC ids */
4467 	HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4468 
4469 	req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4470 	req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4471 	req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4472 
4473 	if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4474 		HWRM_UNLOCK();
4475 		PMD_DRV_LOG(ERR,
4476 		"unable to map VNIC ID table address to physical memory\n");
4477 		return -ENOMEM;
4478 	}
4479 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4480 	HWRM_CHECK_RESULT();
4481 	rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4482 
4483 	HWRM_UNLOCK();
4484 
4485 	return rc;
4486 }
4487 
4488 /*
4489  * This function queries the VNIC IDs  for a specified VF. It then calls
4490  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4491  * Then it calls the hwrm_cb function to program this new vnic configuration.
4492  */
bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt * bp,uint16_t vf,void (* vnic_cb)(struct bnxt_vnic_info *,void *),void * cbdata,int (* hwrm_cb)(struct bnxt * bp,struct bnxt_vnic_info * vnic))4493 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4494 	void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4495 	int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4496 {
4497 	struct bnxt_vnic_info vnic;
4498 	int rc = 0;
4499 	int i, num_vnic_ids;
4500 	uint16_t *vnic_ids;
4501 	size_t vnic_id_sz;
4502 	size_t sz;
4503 
4504 	/* First query all VNIC ids */
4505 	vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4506 	vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4507 			RTE_CACHE_LINE_SIZE);
4508 	if (vnic_ids == NULL)
4509 		return -ENOMEM;
4510 
4511 	for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4512 		rte_mem_lock_page(((char *)vnic_ids) + sz);
4513 
4514 	num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4515 
4516 	if (num_vnic_ids < 0)
4517 		return num_vnic_ids;
4518 
4519 	/* Retrieve VNIC, update bd_stall then update */
4520 
4521 	for (i = 0; i < num_vnic_ids; i++) {
4522 		memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4523 		vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4524 		rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4525 		if (rc)
4526 			break;
4527 		if (vnic.mru <= 4)	/* Indicates unallocated */
4528 			continue;
4529 
4530 		vnic_cb(&vnic, cbdata);
4531 
4532 		rc = hwrm_cb(bp, &vnic);
4533 		if (rc)
4534 			break;
4535 	}
4536 
4537 	rte_free(vnic_ids);
4538 
4539 	return rc;
4540 }
4541 
bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt * bp,uint16_t vf,bool on)4542 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4543 					      bool on)
4544 {
4545 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4546 	struct hwrm_func_cfg_input req = {0};
4547 	int rc;
4548 
4549 	HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4550 
4551 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4552 	req.enables |= rte_cpu_to_le_32(
4553 			HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4554 	req.vlan_antispoof_mode = on ?
4555 		HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4556 		HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4557 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4558 
4559 	HWRM_CHECK_RESULT();
4560 	HWRM_UNLOCK();
4561 
4562 	return rc;
4563 }
4564 
bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt * bp,int vf)4565 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4566 {
4567 	struct bnxt_vnic_info vnic;
4568 	uint16_t *vnic_ids;
4569 	size_t vnic_id_sz;
4570 	int num_vnic_ids, i;
4571 	size_t sz;
4572 	int rc;
4573 
4574 	vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4575 	vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4576 			RTE_CACHE_LINE_SIZE);
4577 	if (vnic_ids == NULL)
4578 		return -ENOMEM;
4579 
4580 	for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4581 		rte_mem_lock_page(((char *)vnic_ids) + sz);
4582 
4583 	rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4584 	if (rc <= 0)
4585 		goto exit;
4586 	num_vnic_ids = rc;
4587 
4588 	/*
4589 	 * Loop through to find the default VNIC ID.
4590 	 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4591 	 * by sending the hwrm_func_qcfg command to the firmware.
4592 	 */
4593 	for (i = 0; i < num_vnic_ids; i++) {
4594 		memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4595 		vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4596 		rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4597 					bp->pf->first_vf_id + vf);
4598 		if (rc)
4599 			goto exit;
4600 		if (vnic.func_default) {
4601 			rte_free(vnic_ids);
4602 			return vnic.fw_vnic_id;
4603 		}
4604 	}
4605 	/* Could not find a default VNIC. */
4606 	PMD_DRV_LOG(ERR, "No default VNIC\n");
4607 exit:
4608 	rte_free(vnic_ids);
4609 	return rc;
4610 }
4611 
bnxt_hwrm_set_em_filter(struct bnxt * bp,uint16_t dst_id,struct bnxt_filter_info * filter)4612 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4613 			 uint16_t dst_id,
4614 			 struct bnxt_filter_info *filter)
4615 {
4616 	int rc = 0;
4617 	struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4618 	struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4619 	uint32_t enables = 0;
4620 
4621 	if (filter->fw_em_filter_id != UINT64_MAX)
4622 		bnxt_hwrm_clear_em_filter(bp, filter);
4623 
4624 	HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4625 
4626 	req.flags = rte_cpu_to_le_32(filter->flags);
4627 
4628 	enables = filter->enables |
4629 	      HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4630 	req.dst_id = rte_cpu_to_le_16(dst_id);
4631 
4632 	if (filter->ip_addr_type) {
4633 		req.ip_addr_type = filter->ip_addr_type;
4634 		enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4635 	}
4636 	if (enables &
4637 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4638 		req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4639 	if (enables &
4640 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4641 		memcpy(req.src_macaddr, filter->src_macaddr,
4642 		       RTE_ETHER_ADDR_LEN);
4643 	if (enables &
4644 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4645 		memcpy(req.dst_macaddr, filter->dst_macaddr,
4646 		       RTE_ETHER_ADDR_LEN);
4647 	if (enables &
4648 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4649 		req.ovlan_vid = filter->l2_ovlan;
4650 	if (enables &
4651 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4652 		req.ivlan_vid = filter->l2_ivlan;
4653 	if (enables &
4654 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4655 		req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4656 	if (enables &
4657 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4658 		req.ip_protocol = filter->ip_protocol;
4659 	if (enables &
4660 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4661 		req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4662 	if (enables &
4663 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4664 		req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4665 	if (enables &
4666 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4667 		req.src_port = rte_cpu_to_be_16(filter->src_port);
4668 	if (enables &
4669 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4670 		req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4671 	if (enables &
4672 	    HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4673 		req.mirror_vnic_id = filter->mirror_vnic_id;
4674 
4675 	req.enables = rte_cpu_to_le_32(enables);
4676 
4677 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4678 
4679 	HWRM_CHECK_RESULT();
4680 
4681 	filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4682 	HWRM_UNLOCK();
4683 
4684 	return rc;
4685 }
4686 
bnxt_hwrm_clear_em_filter(struct bnxt * bp,struct bnxt_filter_info * filter)4687 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4688 {
4689 	int rc = 0;
4690 	struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4691 	struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4692 
4693 	if (filter->fw_em_filter_id == UINT64_MAX)
4694 		return 0;
4695 
4696 	HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4697 
4698 	req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4699 
4700 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4701 
4702 	HWRM_CHECK_RESULT();
4703 	HWRM_UNLOCK();
4704 
4705 	filter->fw_em_filter_id = UINT64_MAX;
4706 	filter->fw_l2_filter_id = UINT64_MAX;
4707 
4708 	return 0;
4709 }
4710 
bnxt_hwrm_set_ntuple_filter(struct bnxt * bp,uint16_t dst_id,struct bnxt_filter_info * filter)4711 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4712 			 uint16_t dst_id,
4713 			 struct bnxt_filter_info *filter)
4714 {
4715 	int rc = 0;
4716 	struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4717 	struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4718 						bp->hwrm_cmd_resp_addr;
4719 	uint32_t enables = 0;
4720 
4721 	if (filter->fw_ntuple_filter_id != UINT64_MAX)
4722 		bnxt_hwrm_clear_ntuple_filter(bp, filter);
4723 
4724 	HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4725 
4726 	req.flags = rte_cpu_to_le_32(filter->flags);
4727 
4728 	enables = filter->enables |
4729 	      HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4730 	req.dst_id = rte_cpu_to_le_16(dst_id);
4731 
4732 	if (filter->ip_addr_type) {
4733 		req.ip_addr_type = filter->ip_addr_type;
4734 		enables |=
4735 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4736 	}
4737 	if (enables &
4738 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4739 		req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4740 	if (enables &
4741 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4742 		memcpy(req.src_macaddr, filter->src_macaddr,
4743 		       RTE_ETHER_ADDR_LEN);
4744 	if (enables &
4745 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4746 		req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4747 	if (enables &
4748 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4749 		req.ip_protocol = filter->ip_protocol;
4750 	if (enables &
4751 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4752 		req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4753 	if (enables &
4754 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4755 		req.src_ipaddr_mask[0] =
4756 			rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4757 	if (enables &
4758 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4759 		req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4760 	if (enables &
4761 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4762 		req.dst_ipaddr_mask[0] =
4763 			rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4764 	if (enables &
4765 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4766 		req.src_port = rte_cpu_to_le_16(filter->src_port);
4767 	if (enables &
4768 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4769 		req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4770 	if (enables &
4771 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4772 		req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4773 	if (enables &
4774 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4775 		req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4776 	if (enables &
4777 	    HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4778 		req.mirror_vnic_id = filter->mirror_vnic_id;
4779 
4780 	req.enables = rte_cpu_to_le_32(enables);
4781 
4782 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4783 
4784 	HWRM_CHECK_RESULT();
4785 
4786 	filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4787 	filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4788 	HWRM_UNLOCK();
4789 
4790 	return rc;
4791 }
4792 
bnxt_hwrm_clear_ntuple_filter(struct bnxt * bp,struct bnxt_filter_info * filter)4793 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4794 				struct bnxt_filter_info *filter)
4795 {
4796 	int rc = 0;
4797 	struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4798 	struct hwrm_cfa_ntuple_filter_free_output *resp =
4799 						bp->hwrm_cmd_resp_addr;
4800 
4801 	if (filter->fw_ntuple_filter_id == UINT64_MAX)
4802 		return 0;
4803 
4804 	HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4805 
4806 	req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4807 
4808 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4809 
4810 	HWRM_CHECK_RESULT();
4811 	HWRM_UNLOCK();
4812 
4813 	filter->fw_ntuple_filter_id = UINT64_MAX;
4814 
4815 	return 0;
4816 }
4817 
4818 static int
bnxt_vnic_rss_configure_thor(struct bnxt * bp,struct bnxt_vnic_info * vnic)4819 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4820 {
4821 	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4822 	uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4823 	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4824 	struct bnxt_rx_queue **rxqs = bp->rx_queues;
4825 	uint16_t *ring_tbl = vnic->rss_table;
4826 	int nr_ctxs = vnic->num_lb_ctxts;
4827 	int max_rings = bp->rx_nr_rings;
4828 	int i, j, k, cnt;
4829 	int rc = 0;
4830 
4831 	for (i = 0, k = 0; i < nr_ctxs; i++) {
4832 		struct bnxt_rx_ring_info *rxr;
4833 		struct bnxt_cp_ring_info *cpr;
4834 
4835 		HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4836 
4837 		req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4838 		req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4839 		req.hash_mode_flags = vnic->hash_mode;
4840 
4841 		req.ring_grp_tbl_addr =
4842 		    rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4843 				     i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4844 				     2 * sizeof(*ring_tbl));
4845 		req.hash_key_tbl_addr =
4846 		    rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4847 
4848 		req.ring_table_pair_index = i;
4849 		req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4850 
4851 		for (j = 0; j < 64; j++) {
4852 			uint16_t ring_id;
4853 
4854 			/* Find next active ring. */
4855 			for (cnt = 0; cnt < max_rings; cnt++) {
4856 				if (rx_queue_state[k] !=
4857 						RTE_ETH_QUEUE_STATE_STOPPED)
4858 					break;
4859 				if (++k == max_rings)
4860 					k = 0;
4861 			}
4862 
4863 			/* Return if no rings are active. */
4864 			if (cnt == max_rings) {
4865 				HWRM_UNLOCK();
4866 				return 0;
4867 			}
4868 
4869 			/* Add rx/cp ring pair to RSS table. */
4870 			rxr = rxqs[k]->rx_ring;
4871 			cpr = rxqs[k]->cp_ring;
4872 
4873 			ring_id = rxr->rx_ring_struct->fw_ring_id;
4874 			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
4875 			ring_id = cpr->cp_ring_struct->fw_ring_id;
4876 			*ring_tbl++ = rte_cpu_to_le_16(ring_id);
4877 
4878 			if (++k == max_rings)
4879 				k = 0;
4880 		}
4881 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4882 					    BNXT_USE_CHIMP_MB);
4883 
4884 		HWRM_CHECK_RESULT();
4885 		HWRM_UNLOCK();
4886 	}
4887 
4888 	return rc;
4889 }
4890 
bnxt_vnic_rss_configure(struct bnxt * bp,struct bnxt_vnic_info * vnic)4891 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4892 {
4893 	unsigned int rss_idx, fw_idx, i;
4894 
4895 	if (!(vnic->rss_table && vnic->hash_type))
4896 		return 0;
4897 
4898 	if (BNXT_CHIP_THOR(bp))
4899 		return bnxt_vnic_rss_configure_thor(bp, vnic);
4900 
4901 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4902 		return 0;
4903 
4904 	if (vnic->rss_table && vnic->hash_type) {
4905 		/*
4906 		 * Fill the RSS hash & redirection table with
4907 		 * ring group ids for all VNICs
4908 		 */
4909 		for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4910 			rss_idx++, fw_idx++) {
4911 			for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4912 				fw_idx %= bp->rx_cp_nr_rings;
4913 				if (vnic->fw_grp_ids[fw_idx] !=
4914 				    INVALID_HW_RING_ID)
4915 					break;
4916 				fw_idx++;
4917 			}
4918 			if (i == bp->rx_cp_nr_rings)
4919 				return 0;
4920 			vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4921 		}
4922 		return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4923 	}
4924 
4925 	return 0;
4926 }
4927 
bnxt_hwrm_set_coal_params(struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)4928 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4929 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4930 {
4931 	uint16_t flags;
4932 
4933 	req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4934 
4935 	/* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4936 	req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4937 
4938 	/* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4939 	req->num_cmpl_dma_aggr_during_int =
4940 		rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4941 
4942 	req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4943 
4944 	/* min timer set to 1/2 of interrupt timer */
4945 	req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4946 
4947 	/* buf timer set to 1/4 of interrupt timer */
4948 	req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4949 
4950 	req->cmpl_aggr_dma_tmr_during_int =
4951 		rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4952 
4953 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4954 		HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4955 	req->flags = rte_cpu_to_le_16(flags);
4956 }
4957 
bnxt_hwrm_set_coal_params_thor(struct bnxt * bp,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * agg_req)4958 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4959 		struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4960 {
4961 	struct hwrm_ring_aggint_qcaps_input req = {0};
4962 	struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4963 	uint32_t enables;
4964 	uint16_t flags;
4965 	int rc;
4966 
4967 	HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4968 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4969 	HWRM_CHECK_RESULT();
4970 
4971 	agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4972 	agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4973 
4974 	flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4975 		HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4976 	agg_req->flags = rte_cpu_to_le_16(flags);
4977 	enables =
4978 	 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4979 	 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4980 	agg_req->enables = rte_cpu_to_le_32(enables);
4981 
4982 	HWRM_UNLOCK();
4983 	return rc;
4984 }
4985 
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_coal * coal,uint16_t ring_id)4986 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4987 			struct bnxt_coal *coal, uint16_t ring_id)
4988 {
4989 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4990 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4991 						bp->hwrm_cmd_resp_addr;
4992 	int rc;
4993 
4994 	/* Set ring coalesce parameters only for 100G NICs */
4995 	if (BNXT_CHIP_THOR(bp)) {
4996 		if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4997 			return -1;
4998 	} else if (bnxt_stratus_device(bp)) {
4999 		bnxt_hwrm_set_coal_params(coal, &req);
5000 	} else {
5001 		return 0;
5002 	}
5003 
5004 	HWRM_PREP(&req,
5005 		  HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5006 		  BNXT_USE_CHIMP_MB);
5007 	req.ring_id = rte_cpu_to_le_16(ring_id);
5008 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5009 	HWRM_CHECK_RESULT();
5010 	HWRM_UNLOCK();
5011 	return 0;
5012 }
5013 
5014 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)5015 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5016 {
5017 	struct hwrm_func_backing_store_qcaps_input req = {0};
5018 	struct hwrm_func_backing_store_qcaps_output *resp =
5019 		bp->hwrm_cmd_resp_addr;
5020 	struct bnxt_ctx_pg_info *ctx_pg;
5021 	struct bnxt_ctx_mem_info *ctx;
5022 	int total_alloc_len;
5023 	int rc, i, tqm_rings;
5024 
5025 	if (!BNXT_CHIP_THOR(bp) ||
5026 	    bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
5027 	    BNXT_VF(bp) ||
5028 	    bp->ctx)
5029 		return 0;
5030 
5031 	HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
5032 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5033 	HWRM_CHECK_RESULT_SILENT();
5034 
5035 	total_alloc_len = sizeof(*ctx);
5036 	ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
5037 			  RTE_CACHE_LINE_SIZE);
5038 	if (!ctx) {
5039 		rc = -ENOMEM;
5040 		goto ctx_err;
5041 	}
5042 
5043 	ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
5044 	ctx->qp_min_qp1_entries =
5045 		rte_le_to_cpu_16(resp->qp_min_qp1_entries);
5046 	ctx->qp_max_l2_entries =
5047 		rte_le_to_cpu_16(resp->qp_max_l2_entries);
5048 	ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
5049 	ctx->srq_max_l2_entries =
5050 		rte_le_to_cpu_16(resp->srq_max_l2_entries);
5051 	ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
5052 	ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
5053 	ctx->cq_max_l2_entries =
5054 		rte_le_to_cpu_16(resp->cq_max_l2_entries);
5055 	ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
5056 	ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
5057 	ctx->vnic_max_vnic_entries =
5058 		rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
5059 	ctx->vnic_max_ring_table_entries =
5060 		rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
5061 	ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
5062 	ctx->stat_max_entries =
5063 		rte_le_to_cpu_32(resp->stat_max_entries);
5064 	ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
5065 	ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
5066 	ctx->tqm_min_entries_per_ring =
5067 		rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
5068 	ctx->tqm_max_entries_per_ring =
5069 		rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
5070 	ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5071 	if (!ctx->tqm_entries_multiple)
5072 		ctx->tqm_entries_multiple = 1;
5073 	ctx->mrav_max_entries =
5074 		rte_le_to_cpu_32(resp->mrav_max_entries);
5075 	ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
5076 	ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
5077 	ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
5078 	ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
5079 
5080 	if (!ctx->tqm_fp_rings_count)
5081 		ctx->tqm_fp_rings_count = bp->max_q;
5082 
5083 	tqm_rings = ctx->tqm_fp_rings_count + 1;
5084 
5085 	ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
5086 			    sizeof(*ctx_pg) * tqm_rings,
5087 			    RTE_CACHE_LINE_SIZE);
5088 	if (!ctx_pg) {
5089 		rc = -ENOMEM;
5090 		goto ctx_err;
5091 	}
5092 	for (i = 0; i < tqm_rings; i++, ctx_pg++)
5093 		ctx->tqm_mem[i] = ctx_pg;
5094 
5095 	bp->ctx = ctx;
5096 ctx_err:
5097 	HWRM_UNLOCK();
5098 	return rc;
5099 }
5100 
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,uint32_t enables)5101 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
5102 {
5103 	struct hwrm_func_backing_store_cfg_input req = {0};
5104 	struct hwrm_func_backing_store_cfg_output *resp =
5105 		bp->hwrm_cmd_resp_addr;
5106 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
5107 	struct bnxt_ctx_pg_info *ctx_pg;
5108 	uint32_t *num_entries;
5109 	uint64_t *pg_dir;
5110 	uint8_t *pg_attr;
5111 	uint32_t ena;
5112 	int i, rc;
5113 
5114 	if (!ctx)
5115 		return 0;
5116 
5117 	HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
5118 	req.enables = rte_cpu_to_le_32(enables);
5119 
5120 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
5121 		ctx_pg = &ctx->qp_mem;
5122 		req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5123 		req.qp_num_qp1_entries =
5124 			rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
5125 		req.qp_num_l2_entries =
5126 			rte_cpu_to_le_16(ctx->qp_max_l2_entries);
5127 		req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
5128 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5129 				      &req.qpc_pg_size_qpc_lvl,
5130 				      &req.qpc_page_dir);
5131 	}
5132 
5133 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5134 		ctx_pg = &ctx->srq_mem;
5135 		req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5136 		req.srq_num_l2_entries =
5137 				 rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5138 		req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5139 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5140 				      &req.srq_pg_size_srq_lvl,
5141 				      &req.srq_page_dir);
5142 	}
5143 
5144 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5145 		ctx_pg = &ctx->cq_mem;
5146 		req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5147 		req.cq_num_l2_entries =
5148 				rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5149 		req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5150 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5151 				      &req.cq_pg_size_cq_lvl,
5152 				      &req.cq_page_dir);
5153 	}
5154 
5155 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5156 		ctx_pg = &ctx->vnic_mem;
5157 		req.vnic_num_vnic_entries =
5158 			rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5159 		req.vnic_num_ring_table_entries =
5160 			rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5161 		req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5162 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5163 				      &req.vnic_pg_size_vnic_lvl,
5164 				      &req.vnic_page_dir);
5165 	}
5166 
5167 	if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5168 		ctx_pg = &ctx->stat_mem;
5169 		req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5170 		req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5171 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5172 				      &req.stat_pg_size_stat_lvl,
5173 				      &req.stat_page_dir);
5174 	}
5175 
5176 	req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5177 	num_entries = &req.tqm_sp_num_entries;
5178 	pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5179 	pg_dir = &req.tqm_sp_page_dir;
5180 	ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5181 	for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5182 		if (!(enables & ena))
5183 			continue;
5184 
5185 		req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5186 
5187 		ctx_pg = ctx->tqm_mem[i];
5188 		*num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5189 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5190 	}
5191 
5192 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5193 	HWRM_CHECK_RESULT();
5194 	HWRM_UNLOCK();
5195 
5196 	return rc;
5197 }
5198 
bnxt_hwrm_ext_port_qstats(struct bnxt * bp)5199 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5200 {
5201 	struct hwrm_port_qstats_ext_input req = {0};
5202 	struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5203 	struct bnxt_pf_info *pf = bp->pf;
5204 	int rc;
5205 
5206 	if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5207 	      bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5208 		return 0;
5209 
5210 	HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5211 
5212 	req.port_id = rte_cpu_to_le_16(pf->port_id);
5213 	if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5214 		req.tx_stat_host_addr =
5215 			rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5216 		req.tx_stat_size =
5217 			rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5218 	}
5219 	if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5220 		req.rx_stat_host_addr =
5221 			rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5222 		req.rx_stat_size =
5223 			rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5224 	}
5225 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5226 
5227 	if (rc) {
5228 		bp->fw_rx_port_stats_ext_size = 0;
5229 		bp->fw_tx_port_stats_ext_size = 0;
5230 	} else {
5231 		bp->fw_rx_port_stats_ext_size =
5232 			rte_le_to_cpu_16(resp->rx_stat_size);
5233 		bp->fw_tx_port_stats_ext_size =
5234 			rte_le_to_cpu_16(resp->tx_stat_size);
5235 	}
5236 
5237 	HWRM_CHECK_RESULT();
5238 	HWRM_UNLOCK();
5239 
5240 	return rc;
5241 }
5242 
5243 int
bnxt_hwrm_tunnel_redirect(struct bnxt * bp,uint8_t type)5244 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5245 {
5246 	struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5247 	struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5248 		bp->hwrm_cmd_resp_addr;
5249 	int rc = 0;
5250 
5251 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5252 	req.tunnel_type = type;
5253 	req.dest_fid = bp->fw_fid;
5254 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5255 	HWRM_CHECK_RESULT();
5256 
5257 	HWRM_UNLOCK();
5258 
5259 	return rc;
5260 }
5261 
5262 int
bnxt_hwrm_tunnel_redirect_free(struct bnxt * bp,uint8_t type)5263 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5264 {
5265 	struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5266 	struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5267 		bp->hwrm_cmd_resp_addr;
5268 	int rc = 0;
5269 
5270 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5271 	req.tunnel_type = type;
5272 	req.dest_fid = bp->fw_fid;
5273 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5274 	HWRM_CHECK_RESULT();
5275 
5276 	HWRM_UNLOCK();
5277 
5278 	return rc;
5279 }
5280 
bnxt_hwrm_tunnel_redirect_query(struct bnxt * bp,uint32_t * type)5281 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5282 {
5283 	struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5284 	struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5285 		bp->hwrm_cmd_resp_addr;
5286 	int rc = 0;
5287 
5288 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5289 	req.src_fid = bp->fw_fid;
5290 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5291 	HWRM_CHECK_RESULT();
5292 
5293 	if (type)
5294 		*type = rte_le_to_cpu_32(resp->tunnel_mask);
5295 
5296 	HWRM_UNLOCK();
5297 
5298 	return rc;
5299 }
5300 
bnxt_hwrm_tunnel_redirect_info(struct bnxt * bp,uint8_t tun_type,uint16_t * dst_fid)5301 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5302 				   uint16_t *dst_fid)
5303 {
5304 	struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5305 	struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5306 		bp->hwrm_cmd_resp_addr;
5307 	int rc = 0;
5308 
5309 	HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5310 	req.src_fid = bp->fw_fid;
5311 	req.tunnel_type = tun_type;
5312 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5313 	HWRM_CHECK_RESULT();
5314 
5315 	if (dst_fid)
5316 		*dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5317 
5318 	PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5319 
5320 	HWRM_UNLOCK();
5321 
5322 	return rc;
5323 }
5324 
bnxt_hwrm_set_mac(struct bnxt * bp)5325 int bnxt_hwrm_set_mac(struct bnxt *bp)
5326 {
5327 	struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5328 	struct hwrm_func_vf_cfg_input req = {0};
5329 	int rc = 0;
5330 
5331 	if (!BNXT_VF(bp))
5332 		return 0;
5333 
5334 	HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5335 
5336 	req.enables =
5337 		rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5338 	memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5339 
5340 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5341 
5342 	HWRM_CHECK_RESULT();
5343 
5344 	HWRM_UNLOCK();
5345 
5346 	return rc;
5347 }
5348 
bnxt_hwrm_if_change(struct bnxt * bp,bool up)5349 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5350 {
5351 	struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5352 	struct hwrm_func_drv_if_change_input req = {0};
5353 	uint32_t flags;
5354 	int rc;
5355 
5356 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5357 		return 0;
5358 
5359 	/* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5360 	 * If we issue FUNC_DRV_IF_CHANGE with flags down before
5361 	 * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5362 	 */
5363 	if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5364 		return 0;
5365 
5366 	HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5367 
5368 	if (up)
5369 		req.flags =
5370 		rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5371 
5372 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5373 
5374 	HWRM_CHECK_RESULT();
5375 	flags = rte_le_to_cpu_32(resp->flags);
5376 	HWRM_UNLOCK();
5377 
5378 	if (!up)
5379 		return 0;
5380 
5381 	if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5382 		PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5383 		bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5384 	}
5385 
5386 	return 0;
5387 }
5388 
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)5389 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5390 {
5391 	struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5392 	struct bnxt_error_recovery_info *info = bp->recovery_info;
5393 	struct hwrm_error_recovery_qcfg_input req = {0};
5394 	uint32_t flags = 0;
5395 	unsigned int i;
5396 	int rc;
5397 
5398 	/* Older FW does not have error recovery support */
5399 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5400 		return 0;
5401 
5402 	HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5403 
5404 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5405 
5406 	HWRM_CHECK_RESULT();
5407 
5408 	flags = rte_le_to_cpu_32(resp->flags);
5409 	if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5410 		info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5411 	else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5412 		info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5413 
5414 	if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5415 	    !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5416 		rc = -EINVAL;
5417 		goto err;
5418 	}
5419 
5420 	/* FW returned values are in units of 100msec */
5421 	info->driver_polling_freq =
5422 		rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5423 	info->master_func_wait_period =
5424 		rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5425 	info->normal_func_wait_period =
5426 		rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5427 	info->master_func_wait_period_after_reset =
5428 		rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5429 	info->max_bailout_time_after_reset =
5430 		rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5431 	info->status_regs[BNXT_FW_STATUS_REG] =
5432 		rte_le_to_cpu_32(resp->fw_health_status_reg);
5433 	info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5434 		rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5435 	info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5436 		rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5437 	info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5438 		rte_le_to_cpu_32(resp->reset_inprogress_reg);
5439 	info->reg_array_cnt =
5440 		rte_le_to_cpu_32(resp->reg_array_cnt);
5441 
5442 	if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5443 		rc = -EINVAL;
5444 		goto err;
5445 	}
5446 
5447 	for (i = 0; i < info->reg_array_cnt; i++) {
5448 		info->reset_reg[i] =
5449 			rte_le_to_cpu_32(resp->reset_reg[i]);
5450 		info->reset_reg_val[i] =
5451 			rte_le_to_cpu_32(resp->reset_reg_val[i]);
5452 		info->delay_after_reset[i] =
5453 			resp->delay_after_reset[i];
5454 	}
5455 err:
5456 	HWRM_UNLOCK();
5457 
5458 	/* Map the FW status registers */
5459 	if (!rc)
5460 		rc = bnxt_map_fw_health_status_regs(bp);
5461 
5462 	if (rc) {
5463 		rte_free(bp->recovery_info);
5464 		bp->recovery_info = NULL;
5465 	}
5466 	return rc;
5467 }
5468 
bnxt_hwrm_fw_reset(struct bnxt * bp)5469 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5470 {
5471 	struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5472 	struct hwrm_fw_reset_input req = {0};
5473 	int rc;
5474 
5475 	if (!BNXT_PF(bp))
5476 		return -EOPNOTSUPP;
5477 
5478 	HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5479 
5480 	req.embedded_proc_type =
5481 		HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5482 	req.selfrst_status =
5483 		HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5484 	req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5485 
5486 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5487 				    BNXT_USE_KONG(bp));
5488 
5489 	HWRM_CHECK_RESULT();
5490 	HWRM_UNLOCK();
5491 
5492 	return rc;
5493 }
5494 
bnxt_hwrm_port_ts_query(struct bnxt * bp,uint8_t path,uint64_t * timestamp)5495 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5496 {
5497 	struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5498 	struct hwrm_port_ts_query_input req = {0};
5499 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5500 	uint32_t flags = 0;
5501 	int rc;
5502 
5503 	if (!ptp)
5504 		return 0;
5505 
5506 	HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5507 
5508 	switch (path) {
5509 	case BNXT_PTP_FLAGS_PATH_TX:
5510 		flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5511 		break;
5512 	case BNXT_PTP_FLAGS_PATH_RX:
5513 		flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5514 		break;
5515 	case BNXT_PTP_FLAGS_CURRENT_TIME:
5516 		flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5517 		break;
5518 	}
5519 
5520 	req.flags = rte_cpu_to_le_32(flags);
5521 	req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5522 
5523 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5524 
5525 	HWRM_CHECK_RESULT();
5526 
5527 	if (timestamp) {
5528 		*timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5529 		*timestamp |=
5530 			(uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5531 	}
5532 	HWRM_UNLOCK();
5533 
5534 	return rc;
5535 }
5536 
bnxt_hwrm_cfa_counter_qcaps(struct bnxt * bp,uint16_t * max_fc)5537 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5538 {
5539 	int rc = 0;
5540 
5541 	struct hwrm_cfa_counter_qcaps_input req = {0};
5542 	struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5543 
5544 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5545 		PMD_DRV_LOG(DEBUG,
5546 			    "Not a PF or trusted VF. Command not supported\n");
5547 		return 0;
5548 	}
5549 
5550 	HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5551 	req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5552 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5553 
5554 	HWRM_CHECK_RESULT();
5555 	if (max_fc)
5556 		*max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5557 	HWRM_UNLOCK();
5558 
5559 	return 0;
5560 }
5561 
bnxt_hwrm_ctx_rgtr(struct bnxt * bp,rte_iova_t dma_addr,uint16_t * ctx_id)5562 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5563 {
5564 	int rc = 0;
5565 	struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5566 	struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5567 
5568 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5569 		PMD_DRV_LOG(DEBUG,
5570 			    "Not a PF or trusted VF. Command not supported\n");
5571 		return 0;
5572 	}
5573 
5574 	HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5575 
5576 	req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5577 	req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5578 	req.page_dir = rte_cpu_to_le_64(dma_addr);
5579 
5580 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5581 
5582 	HWRM_CHECK_RESULT();
5583 	if (ctx_id) {
5584 		*ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5585 		PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5586 	}
5587 	HWRM_UNLOCK();
5588 
5589 	return 0;
5590 }
5591 
bnxt_hwrm_ctx_unrgtr(struct bnxt * bp,uint16_t ctx_id)5592 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5593 {
5594 	int rc = 0;
5595 	struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5596 	struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5597 
5598 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5599 		PMD_DRV_LOG(DEBUG,
5600 			    "Not a PF or trusted VF. Command not supported\n");
5601 		return 0;
5602 	}
5603 
5604 	HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5605 
5606 	req.ctx_id = rte_cpu_to_le_16(ctx_id);
5607 
5608 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5609 
5610 	HWRM_CHECK_RESULT();
5611 	HWRM_UNLOCK();
5612 
5613 	return rc;
5614 }
5615 
bnxt_hwrm_cfa_counter_cfg(struct bnxt * bp,enum bnxt_flow_dir dir,uint16_t cntr,uint16_t ctx_id,uint32_t num_entries,bool enable)5616 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5617 			      uint16_t cntr, uint16_t ctx_id,
5618 			      uint32_t num_entries, bool enable)
5619 {
5620 	struct hwrm_cfa_counter_cfg_input req = {0};
5621 	struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5622 	uint16_t flags = 0;
5623 	int rc;
5624 
5625 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5626 		PMD_DRV_LOG(DEBUG,
5627 			    "Not a PF or trusted VF. Command not supported\n");
5628 		return 0;
5629 	}
5630 
5631 	HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5632 
5633 	req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5634 	req.counter_type = rte_cpu_to_le_16(cntr);
5635 	flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5636 		HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5637 	flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5638 	if (dir == BNXT_DIR_RX)
5639 		flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5640 	else if (dir == BNXT_DIR_TX)
5641 		flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5642 	req.flags = rte_cpu_to_le_16(flags);
5643 	req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5644 	req.num_entries = rte_cpu_to_le_32(num_entries);
5645 
5646 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5647 	HWRM_CHECK_RESULT();
5648 	HWRM_UNLOCK();
5649 
5650 	return 0;
5651 }
5652 
bnxt_hwrm_cfa_counter_qstats(struct bnxt * bp,enum bnxt_flow_dir dir,uint16_t cntr,uint16_t num_entries)5653 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5654 				 enum bnxt_flow_dir dir,
5655 				 uint16_t cntr,
5656 				 uint16_t num_entries)
5657 {
5658 	struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5659 	struct hwrm_cfa_counter_qstats_input req = {0};
5660 	uint16_t flow_ctx_id = 0;
5661 	uint16_t flags = 0;
5662 	int rc = 0;
5663 
5664 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5665 		PMD_DRV_LOG(DEBUG,
5666 			    "Not a PF or trusted VF. Command not supported\n");
5667 		return 0;
5668 	}
5669 
5670 	if (dir == BNXT_DIR_RX) {
5671 		flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5672 		flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5673 	} else if (dir == BNXT_DIR_TX) {
5674 		flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5675 		flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5676 	}
5677 
5678 	HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5679 	req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5680 	req.counter_type = rte_cpu_to_le_16(cntr);
5681 	req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5682 	req.num_entries = rte_cpu_to_le_16(num_entries);
5683 	req.flags = rte_cpu_to_le_16(flags);
5684 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5685 
5686 	HWRM_CHECK_RESULT();
5687 	HWRM_UNLOCK();
5688 
5689 	return 0;
5690 }
5691 
bnxt_hwrm_first_vf_id_query(struct bnxt * bp,uint16_t fid,uint16_t * first_vf_id)5692 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5693 				uint16_t *first_vf_id)
5694 {
5695 	int rc = 0;
5696 	struct hwrm_func_qcaps_input req = {.req_type = 0 };
5697 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5698 
5699 	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5700 
5701 	req.fid = rte_cpu_to_le_16(fid);
5702 
5703 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5704 
5705 	HWRM_CHECK_RESULT();
5706 
5707 	if (first_vf_id)
5708 		*first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
5709 
5710 	HWRM_UNLOCK();
5711 
5712 	return rc;
5713 }
5714 
bnxt_hwrm_cfa_pair_alloc(struct bnxt * bp,struct bnxt_representor * rep_bp)5715 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
5716 {
5717 	struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5718 	struct hwrm_cfa_pair_alloc_input req = {0};
5719 	int rc;
5720 
5721 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5722 		PMD_DRV_LOG(DEBUG,
5723 			    "Not a PF or trusted VF. Command not supported\n");
5724 		return 0;
5725 	}
5726 
5727 	HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
5728 	req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5729 	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5730 		 bp->eth_dev->data->name, rep_bp->vf_id);
5731 
5732 	req.pf_b_id = rep_bp->parent_pf_idx;
5733 	req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5734 						rte_cpu_to_le_16(rep_bp->vf_id);
5735 	req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
5736 	req.host_b_id = 1; /* TBD - Confirm if this is OK */
5737 
5738 	req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
5739 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
5740 	req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
5741 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
5742 	req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
5743 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
5744 	req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
5745 			HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
5746 
5747 	req.q_ab = rep_bp->rep_q_r2f;
5748 	req.q_ba = rep_bp->rep_q_f2r;
5749 	req.fc_ab = rep_bp->rep_fc_r2f;
5750 	req.fc_ba = rep_bp->rep_fc_f2r;
5751 
5752 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5753 	HWRM_CHECK_RESULT();
5754 
5755 	HWRM_UNLOCK();
5756 	PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
5757 		    BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
5758 	return rc;
5759 }
5760 
bnxt_hwrm_cfa_pair_free(struct bnxt * bp,struct bnxt_representor * rep_bp)5761 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
5762 {
5763 	struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
5764 	struct hwrm_cfa_pair_free_input req = {0};
5765 	int rc;
5766 
5767 	if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5768 		PMD_DRV_LOG(DEBUG,
5769 			    "Not a PF or trusted VF. Command not supported\n");
5770 		return 0;
5771 	}
5772 
5773 	HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
5774 	snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5775 		 bp->eth_dev->data->name, rep_bp->vf_id);
5776 	req.pf_b_id = rep_bp->parent_pf_idx;
5777 	req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5778 	req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5779 						rte_cpu_to_le_16(rep_bp->vf_id);
5780 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5781 	HWRM_CHECK_RESULT();
5782 	HWRM_UNLOCK();
5783 	PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
5784 		    rep_bp->vf_id);
5785 	return rc;
5786 }
5787