1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
3 */
4
5 #include <rte_atomic.h>
6 #include <rte_bus_pci.h>
7 #include <rte_common.h>
8 #include <rte_eal.h>
9 #include <rte_io.h>
10 #include <rte_kvargs.h>
11 #include <rte_malloc.h>
12 #include <rte_mbuf_pool_ops.h>
13 #include <rte_pci.h>
14
15 #include "otx2_common.h"
16 #include "otx2_dev.h"
17 #include "otx2_mempool.h"
18
19 #define OTX2_NPA_DEV_NAME RTE_STR(otx2_npa_dev_)
20 #define OTX2_NPA_DEV_NAME_LEN (sizeof(OTX2_NPA_DEV_NAME) + PCI_PRI_STR_SIZE)
21
22 static inline int
npa_lf_alloc(struct otx2_npa_lf * lf)23 npa_lf_alloc(struct otx2_npa_lf *lf)
24 {
25 struct otx2_mbox *mbox = lf->mbox;
26 struct npa_lf_alloc_req *req;
27 struct npa_lf_alloc_rsp *rsp;
28 int rc;
29
30 req = otx2_mbox_alloc_msg_npa_lf_alloc(mbox);
31 req->aura_sz = lf->aura_sz;
32 req->nr_pools = lf->nr_pools;
33
34 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
35 if (rc)
36 return NPA_LF_ERR_ALLOC;
37
38 lf->stack_pg_ptrs = rsp->stack_pg_ptrs;
39 lf->stack_pg_bytes = rsp->stack_pg_bytes;
40 lf->qints = rsp->qints;
41
42 return 0;
43 }
44
45 static int
npa_lf_free(struct otx2_mbox * mbox)46 npa_lf_free(struct otx2_mbox *mbox)
47 {
48 otx2_mbox_alloc_msg_npa_lf_free(mbox);
49
50 return otx2_mbox_process(mbox);
51 }
52
53 static int
npa_lf_init(struct otx2_npa_lf * lf,uintptr_t base,uint8_t aura_sz,uint32_t nr_pools,struct otx2_mbox * mbox)54 npa_lf_init(struct otx2_npa_lf *lf, uintptr_t base, uint8_t aura_sz,
55 uint32_t nr_pools, struct otx2_mbox *mbox)
56 {
57 uint32_t i, bmp_sz;
58 int rc;
59
60 /* Sanity checks */
61 if (!lf || !base || !mbox || !nr_pools)
62 return NPA_LF_ERR_PARAM;
63
64 if (base & AURA_ID_MASK)
65 return NPA_LF_ERR_BASE_INVALID;
66
67 if (aura_sz == NPA_AURA_SZ_0 || aura_sz >= NPA_AURA_SZ_MAX)
68 return NPA_LF_ERR_PARAM;
69
70 memset(lf, 0x0, sizeof(*lf));
71 lf->base = base;
72 lf->aura_sz = aura_sz;
73 lf->nr_pools = nr_pools;
74 lf->mbox = mbox;
75
76 rc = npa_lf_alloc(lf);
77 if (rc)
78 goto exit;
79
80 bmp_sz = rte_bitmap_get_memory_footprint(nr_pools);
81
82 /* Allocate memory for bitmap */
83 lf->npa_bmp_mem = rte_zmalloc("npa_bmp_mem", bmp_sz,
84 RTE_CACHE_LINE_SIZE);
85 if (lf->npa_bmp_mem == NULL) {
86 rc = -ENOMEM;
87 goto lf_free;
88 }
89
90 /* Initialize pool resource bitmap array */
91 lf->npa_bmp = rte_bitmap_init(nr_pools, lf->npa_bmp_mem, bmp_sz);
92 if (lf->npa_bmp == NULL) {
93 rc = -EINVAL;
94 goto bmap_mem_free;
95 }
96
97 /* Mark all pools available */
98 for (i = 0; i < nr_pools; i++)
99 rte_bitmap_set(lf->npa_bmp, i);
100
101 /* Allocate memory for qint context */
102 lf->npa_qint_mem = rte_zmalloc("npa_qint_mem",
103 sizeof(struct otx2_npa_qint) * nr_pools, 0);
104 if (lf->npa_qint_mem == NULL) {
105 rc = -ENOMEM;
106 goto bmap_free;
107 }
108
109 /* Allocate memory for nap_aura_lim memory */
110 lf->aura_lim = rte_zmalloc("npa_aura_lim_mem",
111 sizeof(struct npa_aura_lim) * nr_pools, 0);
112 if (lf->aura_lim == NULL) {
113 rc = -ENOMEM;
114 goto qint_free;
115 }
116
117 /* Init aura start & end limits */
118 for (i = 0; i < nr_pools; i++) {
119 lf->aura_lim[i].ptr_start = UINT64_MAX;
120 lf->aura_lim[i].ptr_end = 0x0ull;
121 }
122
123 return 0;
124
125 qint_free:
126 rte_free(lf->npa_qint_mem);
127 bmap_free:
128 rte_bitmap_free(lf->npa_bmp);
129 bmap_mem_free:
130 rte_free(lf->npa_bmp_mem);
131 lf_free:
132 npa_lf_free(lf->mbox);
133 exit:
134 return rc;
135 }
136
137 static int
npa_lf_fini(struct otx2_npa_lf * lf)138 npa_lf_fini(struct otx2_npa_lf *lf)
139 {
140 if (!lf)
141 return NPA_LF_ERR_PARAM;
142
143 rte_free(lf->aura_lim);
144 rte_free(lf->npa_qint_mem);
145 rte_bitmap_free(lf->npa_bmp);
146 rte_free(lf->npa_bmp_mem);
147
148 return npa_lf_free(lf->mbox);
149
150 }
151
152 static inline uint32_t
otx2_aura_size_to_u32(uint8_t val)153 otx2_aura_size_to_u32(uint8_t val)
154 {
155 if (val == NPA_AURA_SZ_0)
156 return 128;
157 if (val >= NPA_AURA_SZ_MAX)
158 return BIT_ULL(20);
159
160 return 1 << (val + 6);
161 }
162
163 static int
parse_max_pools(const char * key,const char * value,void * extra_args)164 parse_max_pools(const char *key, const char *value, void *extra_args)
165 {
166 RTE_SET_USED(key);
167 uint32_t val;
168
169 val = atoi(value);
170 if (val < otx2_aura_size_to_u32(NPA_AURA_SZ_128))
171 val = 128;
172 if (val > otx2_aura_size_to_u32(NPA_AURA_SZ_1M))
173 val = BIT_ULL(20);
174
175 *(uint8_t *)extra_args = rte_log2_u32(val) - 6;
176 return 0;
177 }
178
179 #define OTX2_MAX_POOLS "max_pools"
180
181 static uint8_t
otx2_parse_aura_size(struct rte_devargs * devargs)182 otx2_parse_aura_size(struct rte_devargs *devargs)
183 {
184 uint8_t aura_sz = NPA_AURA_SZ_128;
185 struct rte_kvargs *kvlist;
186
187 if (devargs == NULL)
188 goto exit;
189 kvlist = rte_kvargs_parse(devargs->args, NULL);
190 if (kvlist == NULL)
191 goto exit;
192
193 rte_kvargs_process(kvlist, OTX2_MAX_POOLS, &parse_max_pools, &aura_sz);
194 otx2_parse_common_devargs(kvlist);
195 rte_kvargs_free(kvlist);
196 exit:
197 return aura_sz;
198 }
199
200 static inline int
npa_lf_attach(struct otx2_mbox * mbox)201 npa_lf_attach(struct otx2_mbox *mbox)
202 {
203 struct rsrc_attach_req *req;
204
205 req = otx2_mbox_alloc_msg_attach_resources(mbox);
206 req->npalf = true;
207
208 return otx2_mbox_process(mbox);
209 }
210
211 static inline int
npa_lf_detach(struct otx2_mbox * mbox)212 npa_lf_detach(struct otx2_mbox *mbox)
213 {
214 struct rsrc_detach_req *req;
215
216 req = otx2_mbox_alloc_msg_detach_resources(mbox);
217 req->npalf = true;
218
219 return otx2_mbox_process(mbox);
220 }
221
222 static inline int
npa_lf_get_msix_offset(struct otx2_mbox * mbox,uint16_t * npa_msixoff)223 npa_lf_get_msix_offset(struct otx2_mbox *mbox, uint16_t *npa_msixoff)
224 {
225 struct msix_offset_rsp *msix_rsp;
226 int rc;
227
228 /* Get NPA and NIX MSIX vector offsets */
229 otx2_mbox_alloc_msg_msix_offset(mbox);
230
231 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
232
233 *npa_msixoff = msix_rsp->npa_msixoff;
234
235 return rc;
236 }
237
238 /**
239 * @internal
240 * Finalize NPA LF.
241 */
242 int
otx2_npa_lf_fini(void)243 otx2_npa_lf_fini(void)
244 {
245 struct otx2_idev_cfg *idev;
246 int rc = 0;
247
248 idev = otx2_intra_dev_get_cfg();
249 if (idev == NULL)
250 return -ENOMEM;
251
252 if (rte_atomic16_add_return(&idev->npa_refcnt, -1) == 0) {
253 otx2_npa_unregister_irqs(idev->npa_lf);
254 rc |= npa_lf_fini(idev->npa_lf);
255 rc |= npa_lf_detach(idev->npa_lf->mbox);
256 otx2_npa_set_defaults(idev);
257 }
258
259 return rc;
260 }
261
262 /**
263 * @internal
264 * Initialize NPA LF.
265 */
266 int
otx2_npa_lf_init(struct rte_pci_device * pci_dev,void * otx2_dev)267 otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev)
268 {
269 struct otx2_dev *dev = otx2_dev;
270 struct otx2_idev_cfg *idev;
271 struct otx2_npa_lf *lf;
272 uint16_t npa_msixoff;
273 uint32_t nr_pools;
274 uint8_t aura_sz;
275 int rc;
276
277 idev = otx2_intra_dev_get_cfg();
278 if (idev == NULL)
279 return -ENOMEM;
280
281 /* Is NPA LF initialized by any another driver? */
282 if (rte_atomic16_add_return(&idev->npa_refcnt, 1) == 1) {
283
284 rc = npa_lf_attach(dev->mbox);
285 if (rc)
286 goto fail;
287
288 rc = npa_lf_get_msix_offset(dev->mbox, &npa_msixoff);
289 if (rc)
290 goto npa_detach;
291
292 aura_sz = otx2_parse_aura_size(pci_dev->device.devargs);
293 nr_pools = otx2_aura_size_to_u32(aura_sz);
294
295 lf = &dev->npalf;
296 rc = npa_lf_init(lf, dev->bar2 + (RVU_BLOCK_ADDR_NPA << 20),
297 aura_sz, nr_pools, dev->mbox);
298
299 if (rc)
300 goto npa_detach;
301
302 lf->pf_func = dev->pf_func;
303 lf->npa_msixoff = npa_msixoff;
304 lf->intr_handle = &pci_dev->intr_handle;
305 lf->pci_dev = pci_dev;
306
307 idev->npa_pf_func = dev->pf_func;
308 idev->npa_lf = lf;
309 rte_smp_wmb();
310 rc = otx2_npa_register_irqs(lf);
311 if (rc)
312 goto npa_fini;
313
314 rte_mbuf_set_platform_mempool_ops("octeontx2_npa");
315 otx2_npa_dbg("npa_lf=%p pools=%d sz=%d pf_func=0x%x msix=0x%x",
316 lf, nr_pools, aura_sz, lf->pf_func, npa_msixoff);
317 }
318
319 return 0;
320
321 npa_fini:
322 npa_lf_fini(idev->npa_lf);
323 npa_detach:
324 npa_lf_detach(dev->mbox);
325 fail:
326 rte_atomic16_dec(&idev->npa_refcnt);
327 return rc;
328 }
329
330 static inline char*
otx2_npa_dev_to_name(struct rte_pci_device * pci_dev,char * name)331 otx2_npa_dev_to_name(struct rte_pci_device *pci_dev, char *name)
332 {
333 snprintf(name, OTX2_NPA_DEV_NAME_LEN,
334 OTX2_NPA_DEV_NAME PCI_PRI_FMT,
335 pci_dev->addr.domain, pci_dev->addr.bus,
336 pci_dev->addr.devid, pci_dev->addr.function);
337
338 return name;
339 }
340
341 static int
otx2_npa_init(struct rte_pci_device * pci_dev)342 otx2_npa_init(struct rte_pci_device *pci_dev)
343 {
344 char name[OTX2_NPA_DEV_NAME_LEN];
345 const struct rte_memzone *mz;
346 struct otx2_dev *dev;
347 int rc = -ENOMEM;
348
349 mz = rte_memzone_reserve_aligned(otx2_npa_dev_to_name(pci_dev, name),
350 sizeof(*dev), SOCKET_ID_ANY,
351 0, OTX2_ALIGN);
352 if (mz == NULL)
353 goto error;
354
355 dev = mz->addr;
356
357 /* Initialize the base otx2_dev object */
358 rc = otx2_dev_init(pci_dev, dev);
359 if (rc)
360 goto malloc_fail;
361
362 /* Grab the NPA LF if required */
363 rc = otx2_npa_lf_init(pci_dev, dev);
364 if (rc)
365 goto dev_uninit;
366
367 dev->drv_inited = true;
368 return 0;
369
370 dev_uninit:
371 otx2_npa_lf_fini();
372 otx2_dev_fini(pci_dev, dev);
373 malloc_fail:
374 rte_memzone_free(mz);
375 error:
376 otx2_err("Failed to initialize npa device rc=%d", rc);
377 return rc;
378 }
379
380 static int
otx2_npa_fini(struct rte_pci_device * pci_dev)381 otx2_npa_fini(struct rte_pci_device *pci_dev)
382 {
383 char name[OTX2_NPA_DEV_NAME_LEN];
384 const struct rte_memzone *mz;
385 struct otx2_dev *dev;
386
387 mz = rte_memzone_lookup(otx2_npa_dev_to_name(pci_dev, name));
388 if (mz == NULL)
389 return -EINVAL;
390
391 dev = mz->addr;
392 if (!dev->drv_inited)
393 goto dev_fini;
394
395 dev->drv_inited = false;
396 otx2_npa_lf_fini();
397
398 dev_fini:
399 if (otx2_npa_lf_active(dev)) {
400 otx2_info("%s: common resource in use by other devices",
401 pci_dev->name);
402 return -EAGAIN;
403 }
404
405 otx2_dev_fini(pci_dev, dev);
406 rte_memzone_free(mz);
407
408 return 0;
409 }
410
411 static int
npa_remove(struct rte_pci_device * pci_dev)412 npa_remove(struct rte_pci_device *pci_dev)
413 {
414 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
415 return 0;
416
417 return otx2_npa_fini(pci_dev);
418 }
419
420 static int
npa_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)421 npa_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
422 {
423 RTE_SET_USED(pci_drv);
424
425 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
426 return 0;
427
428 return otx2_npa_init(pci_dev);
429 }
430
431 static const struct rte_pci_id pci_npa_map[] = {
432 {
433 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
434 PCI_DEVID_OCTEONTX2_RVU_NPA_PF)
435 },
436 {
437 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
438 PCI_DEVID_OCTEONTX2_RVU_NPA_VF)
439 },
440 {
441 .vendor_id = 0,
442 },
443 };
444
445 static struct rte_pci_driver pci_npa = {
446 .id_table = pci_npa_map,
447 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
448 .probe = npa_probe,
449 .remove = npa_remove,
450 };
451
452 RTE_PMD_REGISTER_PCI(mempool_octeontx2, pci_npa);
453 RTE_PMD_REGISTER_PCI_TABLE(mempool_octeontx2, pci_npa_map);
454 RTE_PMD_REGISTER_KMOD_DEP(mempool_octeontx2, "vfio-pci");
455 RTE_PMD_REGISTER_PARAM_STRING(mempool_octeontx2,
456 OTX2_MAX_POOLS "=<128-1048576>"
457 OTX2_NPA_LOCK_MASK "=<1-65535>");
458