xref: /dpdk/drivers/common/cnxk/roc_cpt.c (revision ed963375)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 #define CPT_IQ_FC_LEN  128
9 #define CPT_IQ_GRP_LEN 16
10 
11 #define CPT_IQ_NB_DESC_MULTIPLIER 40
12 
13 /* The effective queue size to software is (CPT_LF_Q_SIZE[SIZE_DIV40] - 1 - 8).
14  *
15  * CPT requires 320 free entries (+8). And 40 entries are required for
16  * allowing CPT to discard packet when the queues are full (+1).
17  */
18 #define CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc)                                     \
19 	(PLT_DIV_CEIL(nb_desc, CPT_IQ_NB_DESC_MULTIPLIER) + 1 + 8)
20 
21 #define CPT_IQ_GRP_SIZE(nb_desc)                                               \
22 	(CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) * CPT_IQ_GRP_LEN)
23 
24 #define CPT_LF_MAX_NB_DESC     128000
25 #define CPT_LF_DEFAULT_NB_DESC 1024
26 
27 static void
cpt_lf_misc_intr_enb_dis(struct roc_cpt_lf * lf,bool enb)28 cpt_lf_misc_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
29 {
30 	/* Enable all cpt lf error irqs except RQ_DISABLED and CQ_DISABLED */
31 	if (enb)
32 		plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
33 			     BIT_ULL(1)),
34 			    lf->rbase + CPT_LF_MISC_INT_ENA_W1S);
35 	else
36 		plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
37 			     BIT_ULL(1)),
38 			    lf->rbase + CPT_LF_MISC_INT_ENA_W1C);
39 }
40 
41 static void
cpt_lf_misc_irq(void * param)42 cpt_lf_misc_irq(void *param)
43 {
44 	struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
45 	struct dev *dev = lf->dev;
46 	uint64_t intr;
47 
48 	intr = plt_read64(lf->rbase + CPT_LF_MISC_INT);
49 	if (intr == 0)
50 		return;
51 
52 	plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
53 
54 	/* Dump lf registers */
55 	cpt_lf_print(lf);
56 
57 	/* Clear interrupt */
58 	plt_write64(intr, lf->rbase + CPT_LF_MISC_INT);
59 }
60 
61 static int
cpt_lf_register_misc_irq(struct roc_cpt_lf * lf)62 cpt_lf_register_misc_irq(struct roc_cpt_lf *lf)
63 {
64 	struct plt_pci_device *pci_dev = lf->pci_dev;
65 	struct plt_intr_handle *handle;
66 	int rc, vec;
67 
68 	handle = pci_dev->intr_handle;
69 
70 	vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
71 	/* Clear err interrupt */
72 	cpt_lf_misc_intr_enb_dis(lf, false);
73 	/* Set used interrupt vectors */
74 	rc = dev_irq_register(handle, cpt_lf_misc_irq, lf, vec);
75 	/* Enable all dev interrupt except for RQ_DISABLED */
76 	cpt_lf_misc_intr_enb_dis(lf, true);
77 
78 	return rc;
79 }
80 
81 static void
cpt_lf_unregister_misc_irq(struct roc_cpt_lf * lf)82 cpt_lf_unregister_misc_irq(struct roc_cpt_lf *lf)
83 {
84 	struct plt_pci_device *pci_dev = lf->pci_dev;
85 	struct plt_intr_handle *handle;
86 	int vec;
87 
88 	handle = pci_dev->intr_handle;
89 
90 	vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
91 	/* Clear err interrupt */
92 	cpt_lf_misc_intr_enb_dis(lf, false);
93 	dev_irq_unregister(handle, cpt_lf_misc_irq, lf, vec);
94 }
95 
96 static void
cpt_lf_done_intr_enb_dis(struct roc_cpt_lf * lf,bool enb)97 cpt_lf_done_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
98 {
99 	if (enb)
100 		plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1S);
101 	else
102 		plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1C);
103 }
104 
105 static void
cpt_lf_done_irq(void * param)106 cpt_lf_done_irq(void *param)
107 {
108 	struct roc_cpt_lf *lf = param;
109 	uint64_t done_wait;
110 	uint64_t intr;
111 
112 	/* Read the number of completed requests */
113 	intr = plt_read64(lf->rbase + CPT_LF_DONE);
114 	if (intr == 0)
115 		return;
116 
117 	done_wait = plt_read64(lf->rbase + CPT_LF_DONE_WAIT);
118 
119 	/* Acknowledge the number of completed requests */
120 	plt_write64(intr, lf->rbase + CPT_LF_DONE_ACK);
121 
122 	plt_write64(done_wait, lf->rbase + CPT_LF_DONE_WAIT);
123 }
124 
125 static int
cpt_lf_register_done_irq(struct roc_cpt_lf * lf)126 cpt_lf_register_done_irq(struct roc_cpt_lf *lf)
127 {
128 	struct plt_pci_device *pci_dev = lf->pci_dev;
129 	struct plt_intr_handle *handle;
130 	int rc, vec;
131 
132 	handle = pci_dev->intr_handle;
133 
134 	vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
135 
136 	/* Clear done interrupt */
137 	cpt_lf_done_intr_enb_dis(lf, false);
138 
139 	/* Set used interrupt vectors */
140 	rc = dev_irq_register(handle, cpt_lf_done_irq, lf, vec);
141 
142 	/* Enable done interrupt */
143 	cpt_lf_done_intr_enb_dis(lf, true);
144 
145 	return rc;
146 }
147 
148 static void
cpt_lf_unregister_done_irq(struct roc_cpt_lf * lf)149 cpt_lf_unregister_done_irq(struct roc_cpt_lf *lf)
150 {
151 	struct plt_pci_device *pci_dev = lf->pci_dev;
152 	struct plt_intr_handle *handle;
153 	int vec;
154 
155 	handle = pci_dev->intr_handle;
156 
157 	vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
158 
159 	/* Clear done interrupt */
160 	cpt_lf_done_intr_enb_dis(lf, false);
161 	dev_irq_unregister(handle, cpt_lf_done_irq, lf, vec);
162 }
163 
164 static int
cpt_lf_register_irqs(struct roc_cpt_lf * lf)165 cpt_lf_register_irqs(struct roc_cpt_lf *lf)
166 {
167 	int rc;
168 
169 	if (lf->msixoff == MSIX_VECTOR_INVALID) {
170 		plt_err("Invalid CPTLF MSIX vector offset vector: 0x%x",
171 			lf->msixoff);
172 		return -EINVAL;
173 	}
174 
175 	/* Register lf err interrupt */
176 	rc = cpt_lf_register_misc_irq(lf);
177 	if (rc)
178 		plt_err("Error registering IRQs");
179 
180 	rc = cpt_lf_register_done_irq(lf);
181 	if (rc)
182 		plt_err("Error registering IRQs");
183 
184 	return rc;
185 }
186 
187 static void
cpt_lf_unregister_irqs(struct roc_cpt_lf * lf)188 cpt_lf_unregister_irqs(struct roc_cpt_lf *lf)
189 {
190 	cpt_lf_unregister_misc_irq(lf);
191 	cpt_lf_unregister_done_irq(lf);
192 }
193 
194 static void
cpt_lf_dump(struct roc_cpt_lf * lf)195 cpt_lf_dump(struct roc_cpt_lf *lf)
196 {
197 	plt_cpt_dbg("CPT LF");
198 	plt_cpt_dbg("RBASE: 0x%016" PRIx64, lf->rbase);
199 	plt_cpt_dbg("LMT_BASE: 0x%016" PRIx64, lf->lmt_base);
200 	plt_cpt_dbg("MSIXOFF: 0x%x", lf->msixoff);
201 	plt_cpt_dbg("LF_ID: 0x%x", lf->lf_id);
202 	plt_cpt_dbg("NB DESC: %d", lf->nb_desc);
203 	plt_cpt_dbg("FC_ADDR: 0x%016" PRIx64, (uintptr_t)lf->fc_addr);
204 	plt_cpt_dbg("CQ.VADDR: 0x%016" PRIx64, (uintptr_t)lf->iq_vaddr);
205 
206 	plt_cpt_dbg("CPT LF REG:");
207 	plt_cpt_dbg("LF_CTL[0x%016llx]: 0x%016" PRIx64, CPT_LF_CTL,
208 		    plt_read64(lf->rbase + CPT_LF_CTL));
209 	plt_cpt_dbg("LF_INPROG[0x%016llx]: 0x%016" PRIx64, CPT_LF_INPROG,
210 		    plt_read64(lf->rbase + CPT_LF_INPROG));
211 
212 	plt_cpt_dbg("Q_BASE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_BASE,
213 		    plt_read64(lf->rbase + CPT_LF_Q_BASE));
214 	plt_cpt_dbg("Q_SIZE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_SIZE,
215 		    plt_read64(lf->rbase + CPT_LF_Q_SIZE));
216 	plt_cpt_dbg("Q_INST_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_INST_PTR,
217 		    plt_read64(lf->rbase + CPT_LF_Q_INST_PTR));
218 	plt_cpt_dbg("Q_GRP_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_GRP_PTR,
219 		    plt_read64(lf->rbase + CPT_LF_Q_GRP_PTR));
220 }
221 
222 int
cpt_lf_outb_cfg(struct dev * dev,uint16_t sso_pf_func,uint16_t nix_pf_func,uint8_t lf_id,bool ena)223 cpt_lf_outb_cfg(struct dev *dev, uint16_t sso_pf_func, uint16_t nix_pf_func,
224 		uint8_t lf_id, bool ena)
225 {
226 	struct cpt_inline_ipsec_cfg_msg *req;
227 	struct mbox *mbox = dev->mbox;
228 
229 	req = mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
230 	if (req == NULL)
231 		return -ENOSPC;
232 
233 	req->dir = CPT_INLINE_OUTBOUND;
234 	req->slot = lf_id;
235 	if (ena) {
236 		req->enable = 1;
237 		req->sso_pf_func = sso_pf_func;
238 		req->nix_pf_func = nix_pf_func;
239 	} else {
240 		req->enable = 0;
241 	}
242 
243 	return mbox_process(mbox);
244 }
245 
246 int
roc_cpt_inline_ipsec_cfg(struct dev * cpt_dev,uint8_t lf_id,struct roc_nix * roc_nix)247 roc_cpt_inline_ipsec_cfg(struct dev *cpt_dev, uint8_t lf_id,
248 			 struct roc_nix *roc_nix)
249 {
250 	bool ena = roc_nix ? true : false;
251 	uint16_t nix_pf_func = 0;
252 	uint16_t sso_pf_func = 0;
253 
254 	if (ena) {
255 		nix_pf_func = roc_nix_get_pf_func(roc_nix);
256 		sso_pf_func = idev_sso_pffunc_get();
257 	}
258 
259 	return cpt_lf_outb_cfg(cpt_dev, sso_pf_func, nix_pf_func, lf_id, ena);
260 }
261 
262 int
roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt * roc_cpt,uint16_t param1,uint16_t param2)263 roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
264 			     uint16_t param2)
265 {
266 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
267 	struct cpt_rx_inline_lf_cfg_msg *req;
268 	struct mbox *mbox;
269 
270 	mbox = cpt->dev.mbox;
271 
272 	req = mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
273 	if (req == NULL)
274 		return -ENOSPC;
275 
276 	req->sso_pf_func = idev_sso_pffunc_get();
277 	req->param1 = param1;
278 	req->param2 = param2;
279 
280 	return mbox_process(mbox);
281 }
282 
283 int
roc_cpt_rxc_time_cfg(struct roc_cpt * roc_cpt,struct roc_cpt_rxc_time_cfg * cfg)284 roc_cpt_rxc_time_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_rxc_time_cfg *cfg)
285 {
286 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
287 	struct cpt_rxc_time_cfg_req *req;
288 	struct dev *dev = &cpt->dev;
289 
290 	req = mbox_alloc_msg_cpt_rxc_time_cfg(dev->mbox);
291 	if (req == NULL)
292 		return -ENOSPC;
293 
294 	req->blkaddr = 0;
295 
296 	/* The step value is in microseconds. */
297 	req->step = cfg->step;
298 
299 	/* The timeout will be: limit * step microseconds */
300 	req->zombie_limit = cfg->zombie_limit;
301 	req->zombie_thres = cfg->zombie_thres;
302 
303 	/* The timeout will be: limit * step microseconds */
304 	req->active_limit = cfg->active_limit;
305 	req->active_thres = cfg->active_thres;
306 
307 	return mbox_process(dev->mbox);
308 }
309 
310 int
cpt_get_msix_offset(struct dev * dev,struct msix_offset_rsp ** msix_rsp)311 cpt_get_msix_offset(struct dev *dev, struct msix_offset_rsp **msix_rsp)
312 {
313 	struct mbox *mbox = dev->mbox;
314 	int rc;
315 
316 	/* Get MSIX vector offsets */
317 	mbox_alloc_msg_msix_offset(mbox);
318 	rc = mbox_process_msg(mbox, (void *)msix_rsp);
319 
320 	return rc;
321 }
322 
323 int
cpt_lfs_attach(struct dev * dev,uint8_t blkaddr,bool modify,uint16_t nb_lf)324 cpt_lfs_attach(struct dev *dev, uint8_t blkaddr, bool modify, uint16_t nb_lf)
325 {
326 	struct mbox *mbox = dev->mbox;
327 	struct rsrc_attach_req *req;
328 
329 	if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
330 		return -EINVAL;
331 
332 	/* Attach CPT(lf) */
333 	req = mbox_alloc_msg_attach_resources(mbox);
334 	if (req == NULL)
335 		return -ENOSPC;
336 
337 	req->cptlfs = nb_lf;
338 	req->modify = modify;
339 	req->cpt_blkaddr = blkaddr;
340 
341 	return mbox_process(mbox);
342 }
343 
344 int
cpt_lfs_detach(struct dev * dev)345 cpt_lfs_detach(struct dev *dev)
346 {
347 	struct mbox *mbox = dev->mbox;
348 	struct rsrc_detach_req *req;
349 
350 	req = mbox_alloc_msg_detach_resources(mbox);
351 	if (req == NULL)
352 		return -ENOSPC;
353 
354 	req->cptlfs = 1;
355 	req->partial = 1;
356 
357 	return mbox_process(mbox);
358 }
359 
360 static int
cpt_available_lfs_get(struct dev * dev,uint16_t * nb_lf)361 cpt_available_lfs_get(struct dev *dev, uint16_t *nb_lf)
362 {
363 	struct mbox *mbox = dev->mbox;
364 	struct free_rsrcs_rsp *rsp;
365 	int rc;
366 
367 	mbox_alloc_msg_free_rsrc_cnt(mbox);
368 
369 	rc = mbox_process_msg(mbox, (void *)&rsp);
370 	if (rc)
371 		return -EIO;
372 
373 	*nb_lf = PLT_MAX((uint16_t)rsp->cpt, (uint16_t)rsp->cpt1);
374 	return 0;
375 }
376 
377 int
cpt_lfs_alloc(struct dev * dev,uint8_t eng_grpmsk,uint8_t blkaddr,bool inl_dev_sso)378 cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,
379 	      bool inl_dev_sso)
380 {
381 	struct cpt_lf_alloc_req_msg *req;
382 	struct mbox *mbox = dev->mbox;
383 
384 	if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
385 		return -EINVAL;
386 
387 	req = mbox_alloc_msg_cpt_lf_alloc(mbox);
388 	if (!req)
389 		return -ENOSPC;
390 
391 	req->nix_pf_func = 0;
392 	if (inl_dev_sso && nix_inl_dev_pffunc_get())
393 		req->sso_pf_func = nix_inl_dev_pffunc_get();
394 	else
395 		req->sso_pf_func = idev_sso_pffunc_get();
396 	req->eng_grpmsk = eng_grpmsk;
397 	req->blkaddr = blkaddr;
398 
399 	return mbox_process(mbox);
400 }
401 
402 int
cpt_lfs_free(struct dev * dev)403 cpt_lfs_free(struct dev *dev)
404 {
405 	mbox_alloc_msg_cpt_lf_free(dev->mbox);
406 
407 	return mbox_process(dev->mbox);
408 }
409 
410 static int
cpt_hardware_caps_get(struct dev * dev,struct roc_cpt * roc_cpt)411 cpt_hardware_caps_get(struct dev *dev, struct roc_cpt *roc_cpt)
412 {
413 	struct cpt_caps_rsp_msg *rsp;
414 	int ret;
415 
416 	mbox_alloc_msg_cpt_caps_get(dev->mbox);
417 
418 	ret = mbox_process_msg(dev->mbox, (void *)&rsp);
419 	if (ret)
420 		return -EIO;
421 
422 	roc_cpt->cpt_revision = rsp->cpt_revision;
423 	mbox_memcpy(roc_cpt->hw_caps, rsp->eng_caps,
424 		    sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
425 
426 	return 0;
427 }
428 
429 static uint32_t
cpt_lf_iq_mem_calc(uint32_t nb_desc)430 cpt_lf_iq_mem_calc(uint32_t nb_desc)
431 {
432 	uint32_t len;
433 
434 	/* Space for instruction group memory */
435 	len = CPT_IQ_GRP_SIZE(nb_desc);
436 
437 	/* Align to 128B */
438 	len = PLT_ALIGN(len, ROC_ALIGN);
439 
440 	/* Space for FC */
441 	len += CPT_IQ_FC_LEN;
442 
443 	/* For instruction queues */
444 	len += PLT_ALIGN(CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) *
445 				 CPT_IQ_NB_DESC_MULTIPLIER *
446 				 sizeof(struct cpt_inst_s),
447 			 ROC_ALIGN);
448 
449 	return len;
450 }
451 
452 static inline void
cpt_iq_init(struct roc_cpt_lf * lf)453 cpt_iq_init(struct roc_cpt_lf *lf)
454 {
455 	union cpt_lf_q_size lf_q_size = {.u = 0x0};
456 	union cpt_lf_q_base lf_q_base = {.u = 0x0};
457 	uintptr_t addr;
458 
459 	lf->io_addr = lf->rbase + CPT_LF_NQX(0);
460 
461 	/* Disable command queue */
462 	roc_cpt_iq_disable(lf);
463 
464 	/* Set command queue base address */
465 	addr = (uintptr_t)lf->iq_vaddr +
466 	       PLT_ALIGN(CPT_IQ_GRP_SIZE(lf->nb_desc), ROC_ALIGN);
467 
468 	lf_q_base.u = addr;
469 
470 	plt_write64(lf_q_base.u, lf->rbase + CPT_LF_Q_BASE);
471 
472 	/* Set command queue size */
473 	lf_q_size.s.size_div40 = CPT_IQ_NB_DESC_SIZE_DIV40(lf->nb_desc);
474 	plt_write64(lf_q_size.u, lf->rbase + CPT_LF_Q_SIZE);
475 
476 	lf->fc_addr = (uint64_t *)addr;
477 	lf->fc_hyst_bits = plt_log2_u32(lf->nb_desc) / 2;
478 	lf->fc_thresh = lf->nb_desc - (lf->nb_desc % (1 << lf->fc_hyst_bits));
479 }
480 
481 int
roc_cpt_dev_configure(struct roc_cpt * roc_cpt,int nb_lf)482 roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf)
483 {
484 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
485 	uint8_t blkaddr[ROC_CPT_MAX_BLKS];
486 	struct msix_offset_rsp *rsp;
487 	uint8_t eng_grpmsk;
488 	int blknum = 0;
489 	int rc, i;
490 
491 	blkaddr[0] = RVU_BLOCK_ADDR_CPT0;
492 	blkaddr[1] = RVU_BLOCK_ADDR_CPT1;
493 
494 	if ((roc_cpt->cpt_revision == ROC_CPT_REVISION_ID_98XX) &&
495 	    (cpt->dev.pf_func & 0x1))
496 		blknum = (blknum + 1) % ROC_CPT_MAX_BLKS;
497 
498 	/* Request LF resources */
499 	rc = cpt_lfs_attach(&cpt->dev, blkaddr[blknum], true, nb_lf);
500 
501 	/* Request LFs from another block if current block has less LFs */
502 	if (roc_cpt->cpt_revision == ROC_CPT_REVISION_ID_98XX && rc == ENOSPC) {
503 		blknum = (blknum + 1) % ROC_CPT_MAX_BLKS;
504 		rc = cpt_lfs_attach(&cpt->dev, blkaddr[blknum], true, nb_lf);
505 	}
506 	if (rc) {
507 		plt_err("Could not attach LFs");
508 		return rc;
509 	}
510 
511 	for (i = 0; i < nb_lf; i++)
512 		cpt->lf_blkaddr[i] = blkaddr[blknum];
513 
514 	eng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |
515 		     (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) |
516 		     (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]);
517 
518 	rc = cpt_lfs_alloc(&cpt->dev, eng_grpmsk, blkaddr[blknum], false);
519 	if (rc)
520 		goto lfs_detach;
521 
522 	rc = cpt_get_msix_offset(&cpt->dev, &rsp);
523 	if (rc)
524 		goto lfs_free;
525 
526 	for (i = 0; i < nb_lf; i++)
527 		cpt->lf_msix_off[i] =
528 			(cpt->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
529 				rsp->cpt1_lf_msixoff[i] :
530 				rsp->cptlf_msixoff[i];
531 
532 	roc_cpt->nb_lf = nb_lf;
533 
534 	return 0;
535 
536 lfs_free:
537 	cpt_lfs_free(&cpt->dev);
538 lfs_detach:
539 	cpt_lfs_detach(&cpt->dev);
540 	return rc;
541 }
542 
543 uint64_t
cpt_get_blkaddr(struct dev * dev)544 cpt_get_blkaddr(struct dev *dev)
545 {
546 	uint64_t reg;
547 	uint64_t off;
548 
549 	/* Reading the discovery register to know which CPT is the LF
550 	 * attached to. Assume CPT LF's of only one block are attached
551 	 * to a pffunc.
552 	 */
553 	if (dev_is_vf(dev))
554 		off = RVU_VF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
555 	else
556 		off = RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
557 
558 	reg = plt_read64(dev->bar2 + off);
559 
560 	return reg & 0x1FFULL ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
561 }
562 
563 int
cpt_lf_init(struct roc_cpt_lf * lf)564 cpt_lf_init(struct roc_cpt_lf *lf)
565 {
566 	struct dev *dev = lf->dev;
567 	uint64_t blkaddr;
568 	void *iq_mem;
569 	int rc;
570 
571 	if (lf->nb_desc == 0 || lf->nb_desc > CPT_LF_MAX_NB_DESC)
572 		lf->nb_desc = CPT_LF_DEFAULT_NB_DESC;
573 
574 	/* Allocate memory for instruction queue for CPT LF. */
575 	iq_mem = plt_zmalloc(cpt_lf_iq_mem_calc(lf->nb_desc), ROC_ALIGN);
576 	if (iq_mem == NULL)
577 		return -ENOMEM;
578 	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
579 
580 	blkaddr = cpt_get_blkaddr(dev);
581 	lf->rbase = dev->bar2 + ((blkaddr << 20) | (lf->lf_id << 12));
582 	lf->iq_vaddr = iq_mem;
583 	lf->lmt_base = dev->lmt_base;
584 	lf->pf_func = dev->pf_func;
585 
586 	/* Initialize instruction queue */
587 	cpt_iq_init(lf);
588 
589 	rc = cpt_lf_register_irqs(lf);
590 	if (rc)
591 		goto disable_iq;
592 
593 	return 0;
594 
595 disable_iq:
596 	roc_cpt_iq_disable(lf);
597 	plt_free(iq_mem);
598 	return rc;
599 }
600 
601 int
roc_cpt_lf_init(struct roc_cpt * roc_cpt,struct roc_cpt_lf * lf)602 roc_cpt_lf_init(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf)
603 {
604 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
605 	int rc;
606 
607 	lf->dev = &cpt->dev;
608 	lf->roc_cpt = roc_cpt;
609 	lf->msixoff = cpt->lf_msix_off[lf->lf_id];
610 	lf->pci_dev = cpt->pci_dev;
611 
612 	rc = cpt_lf_init(lf);
613 	if (rc)
614 		return rc;
615 
616 	/* LF init successful */
617 	roc_cpt->lf[lf->lf_id] = lf;
618 	return rc;
619 }
620 
621 int
roc_cpt_dev_init(struct roc_cpt * roc_cpt)622 roc_cpt_dev_init(struct roc_cpt *roc_cpt)
623 {
624 	struct plt_pci_device *pci_dev;
625 	uint16_t nb_lf_avail;
626 	struct dev *dev;
627 	struct cpt *cpt;
628 	int rc;
629 
630 	if (roc_cpt == NULL || roc_cpt->pci_dev == NULL)
631 		return -EINVAL;
632 
633 	PLT_STATIC_ASSERT(sizeof(struct cpt) <= ROC_CPT_MEM_SZ);
634 
635 	cpt = roc_cpt_to_cpt_priv(roc_cpt);
636 	memset(cpt, 0, sizeof(*cpt));
637 	pci_dev = roc_cpt->pci_dev;
638 	dev = &cpt->dev;
639 
640 	/* Initialize device  */
641 	rc = dev_init(dev, pci_dev);
642 	if (rc) {
643 		plt_err("Failed to init roc device");
644 		goto fail;
645 	}
646 
647 	cpt->pci_dev = pci_dev;
648 	roc_cpt->lmt_base = dev->lmt_base;
649 
650 	rc = cpt_hardware_caps_get(dev, roc_cpt);
651 	if (rc) {
652 		plt_err("Could not determine hardware capabilities");
653 		goto fail;
654 	}
655 
656 	rc = cpt_available_lfs_get(&cpt->dev, &nb_lf_avail);
657 	if (rc) {
658 		plt_err("Could not get available lfs");
659 		goto fail;
660 	}
661 
662 	/* Reserve 1 CPT LF for inline inbound */
663 	nb_lf_avail = PLT_MIN(nb_lf_avail, (uint16_t)(ROC_CPT_MAX_LFS - 1));
664 
665 	roc_cpt->nb_lf_avail = nb_lf_avail;
666 
667 	dev->roc_cpt = roc_cpt;
668 
669 	/* Set it to idev if not already present */
670 	if (!roc_idev_cpt_get())
671 		roc_idev_cpt_set(roc_cpt);
672 
673 	return 0;
674 
675 fail:
676 	return rc;
677 }
678 
679 int
roc_cpt_lf_ctx_flush(struct roc_cpt_lf * lf,void * cptr,bool inval)680 roc_cpt_lf_ctx_flush(struct roc_cpt_lf *lf, void *cptr, bool inval)
681 {
682 	union cpt_lf_ctx_flush reg;
683 
684 	if (lf == NULL) {
685 		plt_err("Could not trigger CTX flush");
686 		return -ENOTSUP;
687 	}
688 
689 	reg.u = 0;
690 	reg.s.inval = inval;
691 	reg.s.cptr = (uintptr_t)cptr >> 7;
692 
693 	plt_write64(reg.u, lf->rbase + CPT_LF_CTX_FLUSH);
694 
695 	return 0;
696 }
697 
698 int
roc_cpt_lf_ctx_reload(struct roc_cpt_lf * lf,void * cptr)699 roc_cpt_lf_ctx_reload(struct roc_cpt_lf *lf, void *cptr)
700 {
701 	union cpt_lf_ctx_reload reg;
702 
703 	if (lf == NULL) {
704 		plt_err("Could not trigger CTX reload");
705 		return -ENOTSUP;
706 	}
707 
708 	reg.u = 0;
709 	reg.s.cptr = (uintptr_t)cptr >> 7;
710 
711 	plt_write64(reg.u, lf->rbase + CPT_LF_CTX_RELOAD);
712 
713 	return 0;
714 }
715 
716 void
cpt_lf_fini(struct roc_cpt_lf * lf)717 cpt_lf_fini(struct roc_cpt_lf *lf)
718 {
719 	/* Unregister IRQ's */
720 	cpt_lf_unregister_irqs(lf);
721 
722 	/* Disable IQ */
723 	roc_cpt_iq_disable(lf);
724 
725 	/* Free memory */
726 	plt_free(lf->iq_vaddr);
727 	lf->iq_vaddr = NULL;
728 }
729 
730 void
roc_cpt_lf_fini(struct roc_cpt_lf * lf)731 roc_cpt_lf_fini(struct roc_cpt_lf *lf)
732 {
733 	if (lf == NULL)
734 		return;
735 	lf->roc_cpt->lf[lf->lf_id] = NULL;
736 	cpt_lf_fini(lf);
737 }
738 
739 int
roc_cpt_dev_fini(struct roc_cpt * roc_cpt)740 roc_cpt_dev_fini(struct roc_cpt *roc_cpt)
741 {
742 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
743 
744 	if (cpt == NULL)
745 		return -EINVAL;
746 
747 	/* Remove idev references */
748 	if (roc_idev_cpt_get() == roc_cpt)
749 		roc_idev_cpt_set(NULL);
750 
751 	roc_cpt->nb_lf_avail = 0;
752 
753 	roc_cpt->lmt_base = 0;
754 
755 	return dev_fini(&cpt->dev, cpt->pci_dev);
756 }
757 
758 void
roc_cpt_dev_clear(struct roc_cpt * roc_cpt)759 roc_cpt_dev_clear(struct roc_cpt *roc_cpt)
760 {
761 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
762 	int i;
763 
764 	if (cpt == NULL)
765 		return;
766 
767 	for (i = 0; i < roc_cpt->nb_lf; i++)
768 		cpt->lf_msix_off[i] = 0;
769 
770 	roc_cpt->nb_lf = 0;
771 
772 	cpt_lfs_free(&cpt->dev);
773 
774 	cpt_lfs_detach(&cpt->dev);
775 }
776 
777 int
roc_cpt_eng_grp_add(struct roc_cpt * roc_cpt,enum cpt_eng_type eng_type)778 roc_cpt_eng_grp_add(struct roc_cpt *roc_cpt, enum cpt_eng_type eng_type)
779 {
780 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
781 	struct dev *dev = &cpt->dev;
782 	struct cpt_eng_grp_req *req;
783 	struct cpt_eng_grp_rsp *rsp;
784 	int ret;
785 
786 	req = mbox_alloc_msg_cpt_eng_grp_get(dev->mbox);
787 	if (req == NULL)
788 		return -EIO;
789 
790 	switch (eng_type) {
791 	case CPT_ENG_TYPE_AE:
792 	case CPT_ENG_TYPE_SE:
793 	case CPT_ENG_TYPE_IE:
794 		break;
795 	default:
796 		return -EINVAL;
797 	}
798 
799 	req->eng_type = eng_type;
800 	ret = mbox_process_msg(dev->mbox, (void *)&rsp);
801 	if (ret)
802 		return -EIO;
803 
804 	if (rsp->eng_grp_num > 8) {
805 		plt_err("Invalid CPT engine group");
806 		return -ENOTSUP;
807 	}
808 
809 	roc_cpt->eng_grp[eng_type] = rsp->eng_grp_num;
810 
811 	return rsp->eng_grp_num;
812 }
813 
814 void
roc_cpt_iq_disable(struct roc_cpt_lf * lf)815 roc_cpt_iq_disable(struct roc_cpt_lf *lf)
816 {
817 	volatile union cpt_lf_q_grp_ptr grp_ptr = {.u = 0x0};
818 	volatile union cpt_lf_inprog lf_inprog = {.u = 0x0};
819 	union cpt_lf_ctl lf_ctl = {.u = 0x0};
820 	int timeout = 20;
821 	int cnt;
822 
823 	/* Disable instructions enqueuing */
824 	plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
825 
826 	/* Wait for instruction queue to become empty */
827 	do {
828 		lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
829 		if (!lf_inprog.s.inflight)
830 			break;
831 
832 		plt_delay_ms(20);
833 		if (timeout-- < 0) {
834 			plt_err("CPT LF %d is still busy", lf->lf_id);
835 			break;
836 		}
837 
838 	} while (1);
839 
840 	/* Disable executions in the LF's queue.
841 	 * The queue should be empty at this point
842 	 */
843 	lf_inprog.s.eena = 0x0;
844 	plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);
845 
846 	/* Wait for instruction queue to become empty */
847 	cnt = 0;
848 	do {
849 		lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
850 		if (lf_inprog.s.grb_partial)
851 			cnt = 0;
852 		else
853 			cnt++;
854 		grp_ptr.u = plt_read64(lf->rbase + CPT_LF_Q_GRP_PTR);
855 	} while ((cnt < 10) && (grp_ptr.s.nq_ptr != grp_ptr.s.dq_ptr));
856 
857 	cnt = 0;
858 	do {
859 		lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
860 		if ((lf_inprog.s.inflight == 0) && (lf_inprog.s.gwb_cnt < 40) &&
861 		    ((lf_inprog.s.grb_cnt == 0) || (lf_inprog.s.grb_cnt == 40)))
862 			cnt++;
863 		else
864 			cnt = 0;
865 	} while (cnt < 10);
866 }
867 
868 void
roc_cpt_iq_enable(struct roc_cpt_lf * lf)869 roc_cpt_iq_enable(struct roc_cpt_lf *lf)
870 {
871 	union cpt_lf_inprog lf_inprog;
872 	union cpt_lf_ctl lf_ctl;
873 
874 	/* Disable command queue */
875 	roc_cpt_iq_disable(lf);
876 
877 	/* Enable instruction queue enqueuing */
878 	lf_ctl.u = plt_read64(lf->rbase + CPT_LF_CTL);
879 	lf_ctl.s.ena = 1;
880 	lf_ctl.s.fc_ena = 1;
881 	lf_ctl.s.fc_up_crossing = 0;
882 	lf_ctl.s.fc_hyst_bits = lf->fc_hyst_bits;
883 	plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
884 
885 	/* Enable command queue execution */
886 	lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
887 	lf_inprog.s.eena = 1;
888 	plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);
889 
890 	cpt_lf_dump(lf);
891 }
892 
893 int
roc_cpt_lmtline_init(struct roc_cpt * roc_cpt,struct roc_cpt_lmtline * lmtline,int lf_id)894 roc_cpt_lmtline_init(struct roc_cpt *roc_cpt, struct roc_cpt_lmtline *lmtline,
895 		     int lf_id)
896 {
897 	struct roc_cpt_lf *lf;
898 
899 	lf = roc_cpt->lf[lf_id];
900 	if (lf == NULL)
901 		return -ENOTSUP;
902 
903 	lmtline->io_addr = lf->io_addr;
904 	if (roc_model_is_cn10k())
905 		lmtline->io_addr |= ROC_CN10K_CPT_INST_DW_M1 << 4;
906 
907 	lmtline->fc_addr = lf->fc_addr;
908 	lmtline->lmt_base = lf->lmt_base;
909 
910 	return 0;
911 }
912 
913 int
roc_cpt_ctx_write(struct roc_cpt_lf * lf,void * sa_dptr,void * sa_cptr,uint16_t sa_len)914 roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
915 		  uint16_t sa_len)
916 {
917 	uintptr_t lmt_base = lf->lmt_base;
918 	union cpt_res_s res, *hw_res;
919 	uint64_t lmt_arg, io_addr;
920 	struct cpt_inst_s *inst;
921 	uint16_t lmt_id;
922 	uint64_t *dptr;
923 	int i;
924 
925 	ROC_LMT_CPT_BASE_ID_GET(lmt_base, lmt_id);
926 	inst = (struct cpt_inst_s *)lmt_base;
927 
928 	memset(inst, 0, sizeof(struct cpt_inst_s));
929 
930 	hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
931 	if (hw_res == NULL) {
932 		plt_err("Couldn't allocate memory for result address");
933 		return -ENOMEM;
934 	}
935 
936 	dptr = plt_zmalloc(sa_len, 8);
937 	if (dptr == NULL) {
938 		plt_err("Couldn't allocate memory for SA dptr");
939 		plt_free(hw_res);
940 		return -ENOMEM;
941 	}
942 
943 	for (i = 0; i < (sa_len / 8); i++)
944 		dptr[i] = plt_cpu_to_be_64(((uint64_t *)sa_dptr)[i]);
945 
946 	/* Fill CPT_INST_S for WRITE_SA microcode op */
947 	hw_res->cn10k.compcode = CPT_COMP_NOT_DONE;
948 	inst->res_addr = (uint64_t)hw_res;
949 	inst->dptr = (uint64_t)dptr;
950 	inst->w4.s.param2 = sa_len >> 3;
951 	inst->w4.s.dlen = sa_len;
952 	inst->w4.s.opcode_major = ROC_IE_OT_MAJOR_OP_WRITE_SA;
953 	inst->w4.s.opcode_minor = ROC_IE_OT_MINOR_OP_WRITE_SA;
954 	inst->w7.s.cptr = (uint64_t)sa_cptr;
955 	inst->w7.s.ctx_val = 1;
956 	inst->w7.s.egrp = ROC_CPT_DFLT_ENG_GRP_SE_IE;
957 
958 	lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
959 	io_addr = lf->io_addr | ROC_CN10K_CPT_INST_DW_M1 << 4;
960 
961 	roc_lmt_submit_steorl(lmt_arg, io_addr);
962 	plt_io_wmb();
963 
964 	/* Use 1 min timeout for the poll */
965 	const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
966 
967 	/* Wait until CPT instruction completes */
968 	do {
969 		res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
970 		if (unlikely(plt_tsc_cycles() > timeout))
971 			break;
972 	} while (res.cn10k.compcode == CPT_COMP_NOT_DONE);
973 
974 	plt_free(dptr);
975 	plt_free(hw_res);
976 
977 	if (res.cn10k.compcode != CPT_COMP_WARN) {
978 		plt_err("Write SA operation timed out");
979 		return -ETIMEDOUT;
980 	}
981 
982 	return 0;
983 }
984