1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
3 * All rights reserved.
4 */
5
6 #include <rte_bitmap.h>
7 #include <rte_memzone.h>
8 #include <rte_malloc.h>
9 #include <unistd.h>
10
11 #include "bnxt.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_rxq.h"
15 #include "bnxt_rxr.h"
16 #include "bnxt_txq.h"
17 #include "bnxt_txr.h"
18
19 #include "hsi_struct_def_dpdk.h"
20
21 /*
22 * Generic ring handling
23 */
24
bnxt_free_ring(struct bnxt_ring * ring)25 void bnxt_free_ring(struct bnxt_ring *ring)
26 {
27 if (!ring)
28 return;
29
30 if (ring->vmem_size && *ring->vmem) {
31 memset((char *)*ring->vmem, 0, ring->vmem_size);
32 *ring->vmem = NULL;
33 }
34 ring->mem_zone = NULL;
35 }
36
37 /*
38 * Ring groups
39 */
40
bnxt_init_ring_grps(struct bnxt * bp)41 static void bnxt_init_ring_grps(struct bnxt *bp)
42 {
43 unsigned int i;
44
45 for (i = 0; i < bp->max_ring_grps; i++)
46 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
47 sizeof(struct bnxt_ring_grp_info));
48 }
49
bnxt_alloc_ring_grps(struct bnxt * bp)50 int bnxt_alloc_ring_grps(struct bnxt *bp)
51 {
52 if (bp->max_tx_rings == 0) {
53 PMD_DRV_LOG(ERR, "No TX rings available!\n");
54 return -EBUSY;
55 }
56
57 /* THOR does not support ring groups.
58 * But we will use the array to save RSS context IDs.
59 */
60 if (BNXT_CHIP_P5(bp)) {
61 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
62 } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
63 /* 1 ring is for default completion ring */
64 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
65 return -ENOSPC;
66 }
67
68 if (BNXT_HAS_RING_GRPS(bp)) {
69 bp->grp_info = rte_zmalloc("bnxt_grp_info",
70 sizeof(*bp->grp_info) *
71 bp->max_ring_grps, 0);
72 if (!bp->grp_info) {
73 PMD_DRV_LOG(ERR,
74 "Failed to alloc grp info tbl.\n");
75 return -ENOMEM;
76 }
77 bnxt_init_ring_grps(bp);
78 }
79
80 return 0;
81 }
82
83 /*
84 * Allocates a completion ring with vmem and stats optionally also allocating
85 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
86 * to not allocate them.
87 *
88 * Order in the allocation is:
89 * stats - Always non-zero length
90 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
91 * tx vmem - Only non-zero length if tx_ring_info is not NULL
92 * rx vmem - Only non-zero length if rx_ring_info is not NULL
93 * cp bd ring - Always non-zero length
94 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
95 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
96 */
bnxt_alloc_rings(struct bnxt * bp,unsigned int socket_id,uint16_t qidx,struct bnxt_tx_queue * txq,struct bnxt_rx_queue * rxq,struct bnxt_cp_ring_info * cp_ring_info,struct bnxt_cp_ring_info * nq_ring_info,const char * suffix)97 int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
98 struct bnxt_tx_queue *txq,
99 struct bnxt_rx_queue *rxq,
100 struct bnxt_cp_ring_info *cp_ring_info,
101 struct bnxt_cp_ring_info *nq_ring_info,
102 const char *suffix)
103 {
104 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
105 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
106 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
107 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
108 int ag_ring_start, ag_bitmap_start, tpa_info_start;
109 int ag_vmem_start, cp_ring_start, nq_ring_start;
110 int total_alloc_len, rx_ring_start, rx_ring_len;
111 struct rte_pci_device *pdev = bp->pdev;
112 struct bnxt_ring *tx_ring, *rx_ring;
113 const struct rte_memzone *mz = NULL;
114 char mz_name[RTE_MEMZONE_NAMESIZE];
115 rte_iova_t mz_phys_addr;
116 int ag_bitmap_len = 0;
117 int tpa_info_len = 0;
118 int ag_vmem_len = 0;
119 int ag_ring_len = 0;
120
121 int stats_len = (tx_ring_info || rx_ring_info) ?
122 RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
123 sizeof (struct hwrm_resp_hdr)) : 0;
124 stats_len = RTE_ALIGN(stats_len, 128);
125
126 int cp_vmem_start = stats_len;
127 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
128 cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
129
130 int nq_vmem_len = nq_ring_info ?
131 RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
132 nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
133
134 int nq_vmem_start = cp_vmem_start + cp_vmem_len;
135
136 int tx_vmem_start = nq_vmem_start + nq_vmem_len;
137 int tx_vmem_len =
138 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
139 tx_ring_struct->vmem_size) : 0;
140 tx_vmem_len = RTE_ALIGN(tx_vmem_len, 128);
141
142 int rx_vmem_start = tx_vmem_start + tx_vmem_len;
143 int rx_vmem_len = rx_ring_info ?
144 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
145 rx_ring_struct->vmem_size) : 0;
146 rx_vmem_len = RTE_ALIGN(rx_vmem_len, 128);
147
148 ag_vmem_start = rx_vmem_start + rx_vmem_len;
149 if (bnxt_need_agg_ring(bp->eth_dev))
150 ag_vmem_len = rx_ring_info && rx_ring_info->ag_ring_struct ?
151 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->ag_ring_struct->vmem_size) : 0;
152
153 cp_ring_start = ag_vmem_start + ag_vmem_len;
154 cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
155
156 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
157 sizeof(struct cmpl_base));
158 cp_ring_len = RTE_ALIGN(cp_ring_len, 128);
159 nq_ring_start = cp_ring_start + cp_ring_len;
160 nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
161
162 int nq_ring_len = nq_ring_info ? cp_ring_len : 0;
163
164 int tx_ring_start = nq_ring_start + nq_ring_len;
165 tx_ring_start = RTE_ALIGN(tx_ring_start, 4096);
166 int tx_ring_len = tx_ring_info ?
167 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
168 sizeof(struct tx_bd_long)) : 0;
169 tx_ring_len = RTE_ALIGN(tx_ring_len, 4096);
170
171 rx_ring_start = tx_ring_start + tx_ring_len;
172 rx_ring_start = RTE_ALIGN(rx_ring_start, 4096);
173 rx_ring_len = rx_ring_info ?
174 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
175 sizeof(struct rx_prod_pkt_bd)) : 0;
176 rx_ring_len = RTE_ALIGN(rx_ring_len, 4096);
177
178 ag_ring_start = rx_ring_start + rx_ring_len;
179 ag_ring_start = RTE_ALIGN(ag_ring_start, 4096);
180
181 if (bnxt_need_agg_ring(bp->eth_dev)) {
182 ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
183 ag_ring_len = RTE_ALIGN(ag_ring_len, 4096);
184
185 ag_bitmap_len = rx_ring_info ?
186 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
187 rx_ring_info->rx_ring_struct->ring_size *
188 AGG_RING_SIZE_FACTOR)) : 0;
189
190 if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
191 int tpa_max = BNXT_TPA_MAX_AGGS(bp);
192
193 tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
194 tpa_info_len = RTE_CACHE_LINE_ROUNDUP(tpa_info_len);
195 }
196 }
197
198 ag_bitmap_start = ag_ring_start + ag_ring_len;
199 tpa_info_start = ag_bitmap_start + ag_bitmap_len;
200 total_alloc_len = tpa_info_start + tpa_info_len;
201
202 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
203 "bnxt_" PCI_PRI_FMT "-%04x_%s", pdev->addr.domain,
204 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
205 suffix);
206 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
207 mz = rte_memzone_lookup(mz_name);
208 if (!mz) {
209 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
210 socket_id,
211 RTE_MEMZONE_2MB |
212 RTE_MEMZONE_SIZE_HINT_ONLY |
213 RTE_MEMZONE_IOVA_CONTIG,
214 getpagesize());
215 if (mz == NULL)
216 return -ENOMEM;
217 }
218 memset(mz->addr, 0, mz->len);
219 mz_phys_addr = mz->iova;
220
221 if (tx_ring_info) {
222 txq->mz = mz;
223 tx_ring = tx_ring_info->tx_ring_struct;
224
225 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
226 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
227 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
228 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
229 tx_ring->mem_zone = (const void *)mz;
230
231 if (!tx_ring->bd)
232 return -ENOMEM;
233 if (tx_ring->vmem_size) {
234 tx_ring->vmem =
235 (void **)((char *)mz->addr + tx_vmem_start);
236 tx_ring_info->tx_buf_ring =
237 (struct rte_mbuf **)tx_ring->vmem;
238 }
239 }
240
241 if (rx_ring_info) {
242 rxq->mz = mz;
243 rx_ring = rx_ring_info->rx_ring_struct;
244
245 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
246 rx_ring_info->rx_desc_ring =
247 (struct rx_prod_pkt_bd *)rx_ring->bd;
248 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
249 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
250 rx_ring->mem_zone = (const void *)mz;
251
252 if (!rx_ring->bd)
253 return -ENOMEM;
254 if (rx_ring->vmem_size) {
255 rx_ring->vmem =
256 (void **)((char *)mz->addr + rx_vmem_start);
257 rx_ring_info->rx_buf_ring =
258 (struct rte_mbuf **)rx_ring->vmem;
259 }
260
261 if (bnxt_need_agg_ring(bp->eth_dev)) {
262 rx_ring = rx_ring_info->ag_ring_struct;
263
264 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
265 rx_ring_info->ag_desc_ring =
266 (struct rx_prod_pkt_bd *)rx_ring->bd;
267 rx_ring->bd_dma = mz->iova + ag_ring_start;
268 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
269 rx_ring->mem_zone = (const void *)mz;
270
271 if (!rx_ring->bd)
272 return -ENOMEM;
273 if (rx_ring->vmem_size) {
274 rx_ring->vmem =
275 (void **)((char *)mz->addr + ag_vmem_start);
276 rx_ring_info->ag_buf_ring =
277 (struct rte_mbuf **)rx_ring->vmem;
278 }
279
280 rx_ring_info->ag_bitmap =
281 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
282 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
283 ag_bitmap_start, ag_bitmap_len);
284
285 /* TPA info */
286 if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
287 rx_ring_info->tpa_info =
288 ((struct bnxt_tpa_info *)
289 ((char *)mz->addr + tpa_info_start));
290 }
291 }
292
293 cp_ring->bd = ((char *)mz->addr + cp_ring_start);
294 cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
295 cp_ring_info->cp_desc_ring = cp_ring->bd;
296 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
297 cp_ring->mem_zone = (const void *)mz;
298
299 if (!cp_ring->bd)
300 return -ENOMEM;
301 if (cp_ring->vmem_size)
302 *cp_ring->vmem = ((char *)mz->addr + stats_len);
303 if (stats_len) {
304 cp_ring_info->hw_stats = mz->addr;
305 cp_ring_info->hw_stats_map = mz_phys_addr;
306 }
307 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
308
309 if (nq_ring_info) {
310 struct bnxt_ring *nq_ring = nq_ring_info->cp_ring_struct;
311
312 nq_ring->bd = (char *)mz->addr + nq_ring_start;
313 nq_ring->bd_dma = mz_phys_addr + nq_ring_start;
314 nq_ring_info->cp_desc_ring = nq_ring->bd;
315 nq_ring_info->cp_desc_mapping = nq_ring->bd_dma;
316 nq_ring->mem_zone = (const void *)mz;
317
318 if (!nq_ring->bd)
319 return -ENOMEM;
320 if (nq_ring->vmem_size)
321 *nq_ring->vmem = (char *)mz->addr + nq_vmem_start;
322
323 nq_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
324 }
325
326 return 0;
327 }
328
bnxt_init_dflt_coal(struct bnxt_coal * coal)329 static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
330 {
331 /* Tick values in micro seconds.
332 * 1 coal_buf x bufs_per_record = 1 completion record.
333 */
334 coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
335 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
336 coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
337 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
338 coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
339 coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
340 /* min timer set to 1/2 of interrupt timer */
341 coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
342 /* buf timer set to 1/4 of interrupt timer */
343 coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
344 coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
345 }
346
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,uint32_t ring_type,uint32_t map_idx,uint32_t fid,uint32_t ring_mask)347 static void bnxt_set_db(struct bnxt *bp,
348 struct bnxt_db_info *db,
349 uint32_t ring_type,
350 uint32_t map_idx,
351 uint32_t fid,
352 uint32_t ring_mask)
353 {
354 if (BNXT_CHIP_P5(bp)) {
355 int db_offset = DB_PF_OFFSET;
356 switch (ring_type) {
357 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
358 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
359 break;
360 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
361 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
362 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
363 break;
364 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
365 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_CQ;
366 break;
367 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
368 db->db_key64 = DBR_PATH_L2;
369 break;
370 }
371 if (BNXT_CHIP_SR2(bp)) {
372 db->db_key64 |= DBR_VALID;
373 db_offset = bp->legacy_db_size;
374 } else if (BNXT_VF(bp)) {
375 db_offset = DB_VF_OFFSET;
376 }
377
378 db->doorbell = (char *)bp->doorbell_base + db_offset;
379 db->db_key64 |= (uint64_t)fid << DBR_XID_SFT;
380 db->db_64 = true;
381 } else {
382 db->doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
383 switch (ring_type) {
384 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
385 db->db_key32 = DB_KEY_TX;
386 break;
387 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
388 db->db_key32 = DB_KEY_RX;
389 break;
390 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
391 db->db_key32 = DB_KEY_CP;
392 break;
393 }
394 db->db_64 = false;
395 }
396 db->db_ring_mask = ring_mask;
397
398 if (BNXT_CHIP_SR2(bp)) {
399 db->db_epoch_mask = db->db_ring_mask + 1;
400 db->db_epoch_shift = DBR_EPOCH_SFT -
401 rte_log2_u32(db->db_epoch_mask);
402 }
403 }
404
bnxt_alloc_cmpl_ring(struct bnxt * bp,int queue_index,struct bnxt_cp_ring_info * cpr)405 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
406 struct bnxt_cp_ring_info *cpr)
407 {
408 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
409 uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
410 int cp_ring_index = queue_index + BNXT_RX_VEC_START;
411 struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring;
412 uint8_t ring_type;
413 int rc = 0;
414
415 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
416
417 if (BNXT_HAS_NQ(bp)) {
418 if (nqr) {
419 nq_ring_id = nqr->cp_ring_struct->fw_ring_id;
420 } else {
421 PMD_DRV_LOG(ERR, "NQ ring is NULL\n");
422 return -EINVAL;
423 }
424 }
425
426 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
427 HWRM_NA_SIGNATURE, nq_ring_id, 0);
428 if (rc)
429 return rc;
430
431 cpr->cp_raw_cons = 0;
432 bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
433 cp_ring->fw_ring_id, cp_ring->ring_mask);
434 bnxt_db_cq(cpr);
435
436 return 0;
437 }
438
bnxt_alloc_rxtx_nq_ring(struct bnxt * bp)439 int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
440 {
441 struct bnxt_cp_ring_info *nqr;
442 struct bnxt_ring *ring;
443 int ring_index = BNXT_NUM_ASYNC_CPR(bp);
444 uint8_t ring_type;
445 int rc = 0;
446
447 if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
448 return 0;
449
450 nqr = rte_zmalloc_socket("nqr",
451 sizeof(struct bnxt_cp_ring_info),
452 RTE_CACHE_LINE_SIZE,
453 bp->eth_dev->device->numa_node);
454 if (nqr == NULL)
455 return -ENOMEM;
456
457 ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
458 sizeof(struct bnxt_ring),
459 RTE_CACHE_LINE_SIZE,
460 bp->eth_dev->device->numa_node);
461 if (ring == NULL) {
462 rte_free(nqr);
463 return -ENOMEM;
464 }
465
466 ring->bd = (void *)nqr->cp_desc_ring;
467 ring->bd_dma = nqr->cp_desc_mapping;
468 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
469 ring->ring_mask = ring->ring_size - 1;
470 ring->vmem_size = 0;
471 ring->vmem = NULL;
472 ring->fw_ring_id = INVALID_HW_RING_ID;
473
474 nqr->cp_ring_struct = ring;
475 rc = bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
476 NULL, nqr, NULL, "l2_nqr");
477 if (rc) {
478 rte_free(ring);
479 rte_free(nqr);
480 return -ENOMEM;
481 }
482
483 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
484
485 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, ring_index,
486 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
487 if (rc) {
488 rte_free(ring);
489 rte_free(nqr);
490 return rc;
491 }
492
493 bnxt_set_db(bp, &nqr->cp_db, ring_type, ring_index,
494 ring->fw_ring_id, ring->ring_mask);
495 bnxt_db_nq(nqr);
496
497 bp->rxtx_nq_ring = nqr;
498
499 return 0;
500 }
501
502 /* Free RX/TX NQ ring. */
bnxt_free_rxtx_nq_ring(struct bnxt * bp)503 void bnxt_free_rxtx_nq_ring(struct bnxt *bp)
504 {
505 struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring;
506
507 if (!nqr)
508 return;
509
510 bnxt_free_nq_ring(bp, nqr);
511
512 bnxt_free_ring(nqr->cp_ring_struct);
513 rte_free(nqr->cp_ring_struct);
514 nqr->cp_ring_struct = NULL;
515 rte_free(nqr);
516 bp->rxtx_nq_ring = NULL;
517 }
518
bnxt_alloc_rx_ring(struct bnxt * bp,int queue_index)519 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
520 {
521 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
522 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
523 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
524 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
525 struct bnxt_ring *ring = rxr->rx_ring_struct;
526 uint8_t ring_type;
527 int rc = 0;
528
529 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
530
531 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type,
532 queue_index, cpr->hw_stats_ctx_id,
533 cp_ring->fw_ring_id, 0);
534 if (rc)
535 return rc;
536
537 rxr->rx_raw_prod = 0;
538 if (BNXT_HAS_RING_GRPS(bp))
539 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
540 bnxt_set_db(bp, &rxr->rx_db, ring_type, queue_index, ring->fw_ring_id,
541 ring->ring_mask);
542 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
543
544 return 0;
545 }
546
bnxt_alloc_rx_agg_ring(struct bnxt * bp,int queue_index)547 static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int queue_index)
548 {
549 unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
550 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
551 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
552 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
553 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
554 struct bnxt_ring *ring = rxr->ag_ring_struct;
555 uint32_t hw_stats_ctx_id = HWRM_NA_SIGNATURE;
556 uint8_t ring_type;
557 int rc = 0;
558
559 if (!bnxt_need_agg_ring(bp->eth_dev))
560 return 0;
561
562 ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
563
564 if (BNXT_CHIP_P5(bp)) {
565 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
566 hw_stats_ctx_id = cpr->hw_stats_ctx_id;
567 } else {
568 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
569 }
570
571 rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, map_idx,
572 hw_stats_ctx_id, cp_ring->fw_ring_id, 0);
573
574 if (rc)
575 return rc;
576
577 rxr->ag_raw_prod = 0;
578 if (BNXT_HAS_RING_GRPS(bp))
579 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
580 bnxt_set_db(bp, &rxr->ag_db, ring_type, map_idx, ring->fw_ring_id,
581 ring->ring_mask);
582 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
583
584 return 0;
585 }
586
bnxt_alloc_hwrm_rx_ring(struct bnxt * bp,int queue_index)587 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
588 {
589 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
590 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
591 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
592 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
593 struct bnxt_coal coal;
594 int rc;
595
596 /*
597 * Storage for the cp ring is allocated based on worst-case
598 * usage, the actual size to be used by hw is computed here.
599 */
600 cp_ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
601
602 if (bnxt_need_agg_ring(bp->eth_dev))
603 cp_ring->ring_size *= AGG_RING_SIZE_FACTOR;
604
605 cp_ring->ring_mask = cp_ring->ring_size - 1;
606
607 rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr);
608 if (rc)
609 goto err_out;
610
611 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
612 if (rc)
613 goto err_out;
614
615 if (BNXT_HAS_RING_GRPS(bp)) {
616 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
617 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
618 }
619
620 bnxt_init_dflt_coal(&coal);
621 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
622
623 if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
624 /*
625 * If a dedicated async event completion ring is not enabled,
626 * use the first completion ring from PF or VF as the default
627 * completion ring for async event handling.
628 */
629 bp->async_cp_ring = cpr;
630 rc = bnxt_hwrm_set_async_event_cr(bp);
631 if (rc)
632 goto err_out;
633 }
634
635 rc = bnxt_alloc_rx_ring(bp, queue_index);
636 if (rc)
637 goto err_out;
638
639 rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
640 if (rc)
641 goto err_out;
642
643 if (BNXT_HAS_RING_GRPS(bp)) {
644 rc = bnxt_hwrm_ring_grp_alloc(bp, queue_index);
645 if (rc)
646 goto err_out;
647 }
648
649 if (rxq->rx_started) {
650 if (bnxt_init_one_rx_ring(rxq)) {
651 PMD_DRV_LOG(ERR,
652 "ring%d bnxt_init_one_rx_ring failed!\n",
653 queue_index);
654 rc = -ENOMEM;
655 goto err_out;
656 }
657 bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod);
658 if (bnxt_need_agg_ring(bp->eth_dev))
659 bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod);
660 }
661 rxq->index = queue_index;
662 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
663 bnxt_rxq_vec_setup(rxq);
664 #endif
665
666 return 0;
667
668 err_out:
669 PMD_DRV_LOG(ERR,
670 "Failed to allocate receive queue %d, rc %d.\n",
671 queue_index, rc);
672 return rc;
673 }
674
675 /* Initialise all rings to -1, its used to free rings later if allocation
676 * of few rings fails.
677 */
bnxt_init_all_rings(struct bnxt * bp)678 static void bnxt_init_all_rings(struct bnxt *bp)
679 {
680 unsigned int i = 0;
681 struct bnxt_rx_queue *rxq;
682 struct bnxt_ring *cp_ring;
683 struct bnxt_ring *ring;
684 struct bnxt_rx_ring_info *rxr;
685 struct bnxt_tx_queue *txq;
686
687 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
688 rxq = bp->rx_queues[i];
689 /* Rx-compl */
690 cp_ring = rxq->cp_ring->cp_ring_struct;
691 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
692 /* Rx-Reg */
693 rxr = rxq->rx_ring;
694 ring = rxr->rx_ring_struct;
695 ring->fw_ring_id = INVALID_HW_RING_ID;
696 /* Rx-AGG */
697 if (bnxt_need_agg_ring(bp->eth_dev)) {
698 ring = rxr->ag_ring_struct;
699 if (ring != NULL)
700 ring->fw_ring_id = INVALID_HW_RING_ID;
701 }
702 }
703 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
704 txq = bp->tx_queues[i];
705 /* Tx cmpl */
706 cp_ring = txq->cp_ring->cp_ring_struct;
707 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
708 /*Tx Ring */
709 ring = txq->tx_ring->tx_ring_struct;
710 ring->fw_ring_id = INVALID_HW_RING_ID;
711 }
712 }
713
714 /* ring_grp usage:
715 * [0] = default completion ring
716 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
717 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
718 */
bnxt_alloc_hwrm_rings(struct bnxt * bp)719 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
720 {
721 struct bnxt_coal coal;
722 unsigned int i;
723 int rc = 0;
724
725 bnxt_init_dflt_coal(&coal);
726 bnxt_init_all_rings(bp);
727
728 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
729 unsigned int soc_id = bp->eth_dev->device->numa_node;
730 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
731 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
732 struct bnxt_ring *ring;
733
734 if (bnxt_need_agg_ring(bp->eth_dev)) {
735 ring = rxr->ag_ring_struct;
736 if (ring == NULL) {
737 bnxt_free_rxq_mem(rxq);
738
739 rc = bnxt_init_rx_ring_struct(rxq, soc_id);
740 if (rc)
741 goto err_out;
742
743 rc = bnxt_alloc_rings(bp, soc_id,
744 i, NULL, rxq,
745 rxq->cp_ring, NULL,
746 "rxr");
747 if (rc)
748 goto err_out;
749 }
750 }
751
752 rc = bnxt_alloc_hwrm_rx_ring(bp, i);
753 if (rc)
754 goto err_out;
755 bnxt_hwrm_set_ring_coal(bp, &coal,
756 rxq->cp_ring->cp_ring_struct->fw_ring_id);
757 }
758
759 /* If something is wrong with Rx ring alloc, skip Tx ring alloc */
760 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
761 rc = bnxt_alloc_hwrm_tx_ring(bp, i);
762 if (rc)
763 goto err_out;
764 }
765
766 err_out:
767 return rc;
768 }
769
770 /* Allocate dedicated async completion ring. */
bnxt_alloc_async_cp_ring(struct bnxt * bp)771 int bnxt_alloc_async_cp_ring(struct bnxt *bp)
772 {
773 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
774 struct bnxt_ring *cp_ring;
775 uint8_t ring_type;
776 int rc;
777
778 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
779 return 0;
780
781 cp_ring = cpr->cp_ring_struct;
782
783 if (BNXT_HAS_NQ(bp))
784 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
785 else
786 ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
787
788 rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
789 HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
790
791 if (rc)
792 return rc;
793
794 cpr->cp_raw_cons = 0;
795 bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
796 cp_ring->fw_ring_id, cp_ring->ring_mask);
797
798 if (BNXT_HAS_NQ(bp))
799 bnxt_db_nq(cpr);
800 else
801 bnxt_db_cq(cpr);
802
803 return bnxt_hwrm_set_async_event_cr(bp);
804 }
805
806 /* Free dedicated async completion ring. */
bnxt_free_async_cp_ring(struct bnxt * bp)807 void bnxt_free_async_cp_ring(struct bnxt *bp)
808 {
809 struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
810
811 if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
812 return;
813
814 if (BNXT_HAS_NQ(bp))
815 bnxt_free_nq_ring(bp, cpr);
816 else
817 bnxt_free_cp_ring(bp, cpr);
818
819 bnxt_free_ring(cpr->cp_ring_struct);
820 rte_free(cpr->cp_ring_struct);
821 cpr->cp_ring_struct = NULL;
822 rte_free(cpr);
823 bp->async_cp_ring = NULL;
824 }
825
bnxt_alloc_async_ring_struct(struct bnxt * bp)826 int bnxt_alloc_async_ring_struct(struct bnxt *bp)
827 {
828 struct bnxt_cp_ring_info *cpr = NULL;
829 struct bnxt_ring *ring = NULL;
830
831 if (BNXT_NUM_ASYNC_CPR(bp) == 0)
832 return 0;
833
834 cpr = rte_zmalloc_socket("cpr",
835 sizeof(struct bnxt_cp_ring_info),
836 RTE_CACHE_LINE_SIZE,
837 bp->eth_dev->device->numa_node);
838 if (cpr == NULL)
839 return -ENOMEM;
840
841 ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
842 sizeof(struct bnxt_ring),
843 RTE_CACHE_LINE_SIZE,
844 bp->eth_dev->device->numa_node);
845 if (ring == NULL) {
846 rte_free(cpr);
847 return -ENOMEM;
848 }
849
850 ring->bd = (void *)cpr->cp_desc_ring;
851 ring->bd_dma = cpr->cp_desc_mapping;
852 ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
853 ring->ring_mask = ring->ring_size - 1;
854 ring->vmem_size = 0;
855 ring->vmem = NULL;
856 ring->fw_ring_id = INVALID_HW_RING_ID;
857
858 bp->async_cp_ring = cpr;
859 cpr->cp_ring_struct = ring;
860
861 return bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
862 NULL, bp->async_cp_ring, NULL, "def_cp");
863 }
864
bnxt_alloc_hwrm_tx_ring(struct bnxt * bp,int queue_index)865 int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index)
866 {
867 struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
868 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
869 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
870 struct bnxt_tx_ring_info *txr = txq->tx_ring;
871 struct bnxt_ring *ring = txr->tx_ring_struct;
872 unsigned int idx = queue_index + bp->rx_cp_nr_rings;
873 uint16_t tx_cosq_id = 0;
874 struct bnxt_coal coal;
875 int rc = 0;
876
877 rc = bnxt_alloc_cmpl_ring(bp, idx, cpr);
878 if (rc)
879 goto err_out;
880
881 bnxt_init_dflt_coal(&coal);
882 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
883
884 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
885 if (rc)
886 goto err_out;
887
888 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
889 tx_cosq_id = bp->tx_cosq_id[queue_index < bp->max_lltc ? queue_index : 0];
890 else
891 tx_cosq_id = bp->tx_cosq_id[0];
892
893 rc = bnxt_hwrm_ring_alloc(bp, ring,
894 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
895 queue_index, cpr->hw_stats_ctx_id,
896 cp_ring->fw_ring_id,
897 tx_cosq_id);
898 if (rc)
899 goto err_out;
900
901 bnxt_set_db(bp, &txr->tx_db, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
902 queue_index, ring->fw_ring_id,
903 ring->ring_mask);
904 txq->index = idx;
905
906 return rc;
907 err_out:
908 bnxt_free_hwrm_tx_ring(bp, queue_index);
909 return rc;
910 }
911