1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Broadcom.
3 * All rights reserved.
4 */
5
6 #include <inttypes.h>
7
8 #include <rte_atomic.h>
9 #include <rte_bitmap.h>
10 #include <rte_common.h>
11 #include <rte_dev.h>
12 #include <rte_malloc.h>
13 #include <rte_memzone.h>
14 #include <rte_prefetch.h>
15 #include <rte_string_fns.h>
16
17 #include "bcmfs_logs.h"
18 #include "bcmfs_qp.h"
19 #include "bcmfs_hw_defs.h"
20
21 /* TX or submission queue name */
22 static const char *txq_name = "tx";
23 /* Completion or receive queue name */
24 static const char *cmplq_name = "cmpl";
25
26 /* Helper function */
27 static int
bcmfs_qp_check_queue_alignment(uint64_t phys_addr,uint32_t align)28 bcmfs_qp_check_queue_alignment(uint64_t phys_addr,
29 uint32_t align)
30 {
31 if (((align - 1) & phys_addr) != 0)
32 return -EINVAL;
33 return 0;
34 }
35
36 static void
bcmfs_queue_delete(struct bcmfs_queue * queue,uint16_t queue_pair_id)37 bcmfs_queue_delete(struct bcmfs_queue *queue,
38 uint16_t queue_pair_id)
39 {
40 const struct rte_memzone *mz;
41 int status = 0;
42
43 if (queue == NULL) {
44 BCMFS_LOG(DEBUG, "Invalid queue");
45 return;
46 }
47 BCMFS_LOG(DEBUG, "Free ring %d type %d, memzone: %s",
48 queue_pair_id, queue->q_type, queue->memz_name);
49
50 mz = rte_memzone_lookup(queue->memz_name);
51 if (mz != NULL) {
52 /* Write an unused pattern to the queue memory. */
53 memset(queue->base_addr, 0x9B, queue->queue_size);
54 status = rte_memzone_free(mz);
55 if (status != 0)
56 BCMFS_LOG(ERR, "Error %d on freeing queue %s",
57 status, queue->memz_name);
58 } else {
59 BCMFS_LOG(DEBUG, "queue %s doesn't exist",
60 queue->memz_name);
61 }
62 }
63
64 static const struct rte_memzone *
queue_dma_zone_reserve(const char * queue_name,uint32_t queue_size,int socket_id,unsigned int align)65 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
66 int socket_id, unsigned int align)
67 {
68 const struct rte_memzone *mz;
69
70 mz = rte_memzone_lookup(queue_name);
71 if (mz != NULL) {
72 if (((size_t)queue_size <= mz->len) &&
73 (socket_id == SOCKET_ID_ANY ||
74 socket_id == mz->socket_id)) {
75 BCMFS_LOG(DEBUG, "re-use memzone already "
76 "allocated for %s", queue_name);
77 return mz;
78 }
79
80 BCMFS_LOG(ERR, "Incompatible memzone already "
81 "allocated %s, size %u, socket %d. "
82 "Requested size %u, socket %u",
83 queue_name, (uint32_t)mz->len,
84 mz->socket_id, queue_size, socket_id);
85 return NULL;
86 }
87
88 BCMFS_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
89 queue_name, queue_size, socket_id);
90 return rte_memzone_reserve_aligned(queue_name, queue_size,
91 socket_id, RTE_MEMZONE_IOVA_CONTIG, align);
92 }
93
94 static int
bcmfs_queue_create(struct bcmfs_queue * queue,struct bcmfs_qp_config * qp_conf,uint16_t queue_pair_id,enum bcmfs_queue_type qtype)95 bcmfs_queue_create(struct bcmfs_queue *queue,
96 struct bcmfs_qp_config *qp_conf,
97 uint16_t queue_pair_id,
98 enum bcmfs_queue_type qtype)
99 {
100 const struct rte_memzone *qp_mz;
101 char q_name[16];
102 unsigned int align;
103 uint32_t queue_size_bytes;
104 int ret;
105
106 if (qtype == BCMFS_RM_TXQ) {
107 strlcpy(q_name, txq_name, sizeof(q_name));
108 align = 1U << FS_RING_BD_ALIGN_ORDER;
109 queue_size_bytes = qp_conf->nb_descriptors *
110 qp_conf->max_descs_req * FS_RING_DESC_SIZE;
111 queue_size_bytes = RTE_ALIGN_MUL_CEIL(queue_size_bytes,
112 FS_RING_PAGE_SIZE);
113 /* make queue size to multiple for 4K pages */
114 } else if (qtype == BCMFS_RM_CPLQ) {
115 strlcpy(q_name, cmplq_name, sizeof(q_name));
116 align = 1U << FS_RING_CMPL_ALIGN_ORDER;
117
118 /*
119 * Memory size for cmpl + MSI
120 * For MSI allocate here itself and so we allocate twice
121 */
122 queue_size_bytes = 2 * FS_RING_CMPL_SIZE;
123 } else {
124 BCMFS_LOG(ERR, "Invalid queue selection");
125 return -EINVAL;
126 }
127
128 queue->q_type = qtype;
129
130 /*
131 * Allocate a memzone for the queue - create a unique name.
132 */
133 snprintf(queue->memz_name, sizeof(queue->memz_name),
134 "%s_%d_%s_%d_%s", "bcmfs", qtype, "qp_mem",
135 queue_pair_id, q_name);
136 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
137 0, align);
138 if (qp_mz == NULL) {
139 BCMFS_LOG(ERR, "Failed to allocate ring memzone");
140 return -ENOMEM;
141 }
142
143 if (bcmfs_qp_check_queue_alignment(qp_mz->iova, align)) {
144 BCMFS_LOG(ERR, "Invalid alignment on queue create "
145 " 0x%" PRIx64 "\n",
146 queue->base_phys_addr);
147 ret = -EFAULT;
148 goto queue_create_err;
149 }
150
151 queue->base_addr = (char *)qp_mz->addr;
152 queue->base_phys_addr = qp_mz->iova;
153 queue->queue_size = queue_size_bytes;
154
155 return 0;
156
157 queue_create_err:
158 rte_memzone_free(qp_mz);
159
160 return ret;
161 }
162
163 int
bcmfs_qp_release(struct bcmfs_qp ** qp_addr)164 bcmfs_qp_release(struct bcmfs_qp **qp_addr)
165 {
166 struct bcmfs_qp *qp = *qp_addr;
167
168 if (qp == NULL) {
169 BCMFS_LOG(DEBUG, "qp already freed");
170 return 0;
171 }
172
173 /* Don't free memory if there are still responses to be processed */
174 if ((qp->stats.enqueued_count - qp->stats.dequeued_count) == 0) {
175 /* Stop the h/w ring */
176 qp->ops->stopq(qp);
177 /* Delete the queue pairs */
178 bcmfs_queue_delete(&qp->tx_q, qp->qpair_id);
179 bcmfs_queue_delete(&qp->cmpl_q, qp->qpair_id);
180 } else {
181 return -EAGAIN;
182 }
183
184 rte_bitmap_reset(qp->ctx_bmp);
185 rte_free(qp->ctx_bmp_mem);
186 rte_free(qp->ctx_pool);
187
188 rte_free(qp);
189 *qp_addr = NULL;
190
191 return 0;
192 }
193
194 int
bcmfs_qp_setup(struct bcmfs_qp ** qp_addr,uint16_t queue_pair_id,struct bcmfs_qp_config * qp_conf)195 bcmfs_qp_setup(struct bcmfs_qp **qp_addr,
196 uint16_t queue_pair_id,
197 struct bcmfs_qp_config *qp_conf)
198 {
199 struct bcmfs_qp *qp;
200 uint32_t bmp_size;
201 uint32_t nb_descriptors = qp_conf->nb_descriptors;
202 uint16_t i;
203 int rc;
204
205 if (nb_descriptors < FS_RM_MIN_REQS) {
206 BCMFS_LOG(ERR, "Can't create qp for %u descriptors",
207 nb_descriptors);
208 return -EINVAL;
209 }
210
211 if (nb_descriptors > FS_RM_MAX_REQS)
212 nb_descriptors = FS_RM_MAX_REQS;
213
214 if (qp_conf->iobase == NULL) {
215 BCMFS_LOG(ERR, "IO onfig space null");
216 return -EINVAL;
217 }
218
219 qp = rte_zmalloc_socket("BCM FS PMD qp metadata",
220 sizeof(*qp), RTE_CACHE_LINE_SIZE,
221 qp_conf->socket_id);
222 if (qp == NULL) {
223 BCMFS_LOG(ERR, "Failed to alloc mem for qp struct");
224 return -ENOMEM;
225 }
226
227 qp->qpair_id = queue_pair_id;
228 qp->ioreg = qp_conf->iobase;
229 qp->nb_descriptors = nb_descriptors;
230 qp->ops = qp_conf->ops;
231
232 qp->stats.enqueued_count = 0;
233 qp->stats.dequeued_count = 0;
234
235 rc = bcmfs_queue_create(&qp->tx_q, qp_conf, qp->qpair_id,
236 BCMFS_RM_TXQ);
237 if (rc) {
238 BCMFS_LOG(ERR, "Tx queue create failed queue_pair_id %u",
239 queue_pair_id);
240 goto create_err;
241 }
242
243 rc = bcmfs_queue_create(&qp->cmpl_q, qp_conf, qp->qpair_id,
244 BCMFS_RM_CPLQ);
245 if (rc) {
246 BCMFS_LOG(ERR, "Cmpl queue create failed queue_pair_id= %u",
247 queue_pair_id);
248 goto q_create_err;
249 }
250
251 /* ctx saving bitmap */
252 bmp_size = rte_bitmap_get_memory_footprint(nb_descriptors);
253
254 /* Allocate memory for bitmap */
255 qp->ctx_bmp_mem = rte_zmalloc("ctx_bmp_mem", bmp_size,
256 RTE_CACHE_LINE_SIZE);
257 if (qp->ctx_bmp_mem == NULL) {
258 rc = -ENOMEM;
259 goto qp_create_err;
260 }
261
262 /* Initialize pool resource bitmap array */
263 qp->ctx_bmp = rte_bitmap_init(nb_descriptors, qp->ctx_bmp_mem,
264 bmp_size);
265 if (qp->ctx_bmp == NULL) {
266 rc = -EINVAL;
267 goto bmap_mem_free;
268 }
269
270 /* Mark all pools available */
271 for (i = 0; i < nb_descriptors; i++)
272 rte_bitmap_set(qp->ctx_bmp, i);
273
274 /* Allocate memory for context */
275 qp->ctx_pool = rte_zmalloc("qp_ctx_pool",
276 sizeof(unsigned long) *
277 nb_descriptors, 0);
278 if (qp->ctx_pool == NULL) {
279 BCMFS_LOG(ERR, "ctx allocation pool fails");
280 rc = -ENOMEM;
281 goto bmap_free;
282 }
283
284 /* Start h/w ring */
285 qp->ops->startq(qp);
286
287 *qp_addr = qp;
288
289 return 0;
290
291 bmap_free:
292 rte_bitmap_reset(qp->ctx_bmp);
293 bmap_mem_free:
294 rte_free(qp->ctx_bmp_mem);
295 qp_create_err:
296 bcmfs_queue_delete(&qp->cmpl_q, queue_pair_id);
297 q_create_err:
298 bcmfs_queue_delete(&qp->tx_q, queue_pair_id);
299 create_err:
300 rte_free(qp);
301
302 return rc;
303 }
304
305 uint16_t
bcmfs_enqueue_op_burst(void * qp,void ** ops,uint16_t nb_ops)306 bcmfs_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
307 {
308 struct bcmfs_qp *tmp_qp = (struct bcmfs_qp *)qp;
309 register uint32_t nb_ops_sent = 0;
310 uint16_t nb_ops_possible = nb_ops;
311 int ret;
312
313 if (unlikely(nb_ops == 0))
314 return 0;
315
316 while (nb_ops_sent != nb_ops_possible) {
317 ret = tmp_qp->ops->enq_one_req(qp, *ops);
318 if (ret != 0) {
319 tmp_qp->stats.enqueue_err_count++;
320 /* This message cannot be enqueued */
321 if (nb_ops_sent == 0)
322 return 0;
323 goto ring_db;
324 }
325
326 ops++;
327 nb_ops_sent++;
328 }
329
330 ring_db:
331 tmp_qp->stats.enqueued_count += nb_ops_sent;
332 tmp_qp->ops->ring_db(tmp_qp);
333
334 return nb_ops_sent;
335 }
336
337 uint16_t
bcmfs_dequeue_op_burst(void * qp,void ** ops,uint16_t nb_ops)338 bcmfs_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
339 {
340 struct bcmfs_qp *tmp_qp = (struct bcmfs_qp *)qp;
341 uint32_t deq = tmp_qp->ops->dequeue(tmp_qp, ops, nb_ops);
342
343 tmp_qp->stats.dequeued_count += deq;
344
345 return deq;
346 }
347
bcmfs_qp_stats_get(struct bcmfs_qp ** qp,int num_qp,struct bcmfs_qp_stats * stats)348 void bcmfs_qp_stats_get(struct bcmfs_qp **qp, int num_qp,
349 struct bcmfs_qp_stats *stats)
350 {
351 int i;
352
353 if (stats == NULL) {
354 BCMFS_LOG(ERR, "invalid param: stats %p",
355 stats);
356 return;
357 }
358
359 for (i = 0; i < num_qp; i++) {
360 if (qp[i] == NULL) {
361 BCMFS_LOG(DEBUG, "Uninitialised qp %d", i);
362 continue;
363 }
364
365 stats->enqueued_count += qp[i]->stats.enqueued_count;
366 stats->dequeued_count += qp[i]->stats.dequeued_count;
367 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
368 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
369 }
370 }
371
bcmfs_qp_stats_reset(struct bcmfs_qp ** qp,int num_qp)372 void bcmfs_qp_stats_reset(struct bcmfs_qp **qp, int num_qp)
373 {
374 int i;
375
376 for (i = 0; i < num_qp; i++) {
377 if (qp[i] == NULL) {
378 BCMFS_LOG(DEBUG, "Uninitialised qp %d", i);
379 continue;
380 }
381 memset(&qp[i]->stats, 0, sizeof(qp[i]->stats));
382 }
383 }
384