xref: /dpdk/drivers/net/bnxt/tf_core/tf_em_host.c (revision 08e1af1a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <math.h>
8 #include <sys/param.h>
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_log.h>
12 
13 #include "tf_core.h"
14 #include "tf_util.h"
15 #include "tf_common.h"
16 #include "tf_em.h"
17 #include "tf_em_common.h"
18 #include "tf_msg.h"
19 #include "tfp.h"
20 #include "lookup3.h"
21 #include "tf_ext_flow_handle.h"
22 
23 #include "bnxt.h"
24 
25 #define PTU_PTE_VALID          0x1UL
26 #define PTU_PTE_LAST           0x2UL
27 #define PTU_PTE_NEXT_TO_LAST   0x4UL
28 
29 /* Number of pointers per page_size */
30 #define MAX_PAGE_PTRS(page_size)  ((page_size) / sizeof(void *))
31 
32 /**
33  * Function to free a page table
34  *
35  * [in] tp
36  *   Pointer to the page table to free
37  */
38 static void
tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl * tp)39 tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
40 {
41 	uint32_t i;
42 
43 	for (i = 0; i < tp->pg_count; i++) {
44 		if (!tp->pg_va_tbl[i]) {
45 			TFP_DRV_LOG(WARNING,
46 				    "No mapping for page: %d table: %016" PRIu64 "\n",
47 				    i,
48 				    (uint64_t)(uintptr_t)tp);
49 			continue;
50 		}
51 
52 		tfp_free(tp->pg_va_tbl[i]);
53 		tp->pg_va_tbl[i] = NULL;
54 	}
55 
56 	tp->pg_count = 0;
57 	tfp_free(tp->pg_va_tbl);
58 	tp->pg_va_tbl = NULL;
59 	tfp_free(tp->pg_pa_tbl);
60 	tp->pg_pa_tbl = NULL;
61 }
62 
63 /**
64  * Function to free an EM table
65  *
66  * [in] tbl
67  *   Pointer to the EM table to free
68  */
69 static void
tf_em_free_page_table(struct hcapi_cfa_em_table * tbl)70 tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
71 {
72 	struct hcapi_cfa_em_page_tbl *tp;
73 	int i;
74 
75 	for (i = 0; i < tbl->num_lvl; i++) {
76 		tp = &tbl->pg_tbl[i];
77 		TFP_DRV_LOG(INFO,
78 			   "EEM: Freeing page table: size %u lvl %d cnt %u\n",
79 			   TF_EM_PAGE_SIZE,
80 			    i,
81 			    tp->pg_count);
82 
83 		tf_em_free_pg_tbl(tp);
84 	}
85 
86 	tbl->l0_addr = NULL;
87 	tbl->l0_dma_addr = 0;
88 	tbl->num_lvl = 0;
89 	tbl->num_data_pages = 0;
90 }
91 
92 /**
93  * Allocation of page tables
94  *
95  * [in] tfp
96  *   Pointer to a TruFlow handle
97  *
98  * [in] pg_count
99  *   Page count to allocate
100  *
101  * [in] pg_size
102  *   Size of each page
103  *
104  * Returns:
105  *   0       - Success
106  *   -ENOMEM - Out of memory
107  */
108 static int
tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl * tp,uint32_t pg_count,uint32_t pg_size)109 tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
110 		   uint32_t pg_count,
111 		   uint32_t pg_size)
112 {
113 	uint32_t i;
114 	struct tfp_calloc_parms parms;
115 
116 	parms.nitems = pg_count;
117 	parms.size = sizeof(void *);
118 	parms.alignment = 0;
119 
120 	if (tfp_calloc(&parms) != 0)
121 		return -ENOMEM;
122 
123 	tp->pg_va_tbl = parms.mem_va;
124 
125 	if (tfp_calloc(&parms) != 0) {
126 		tfp_free(tp->pg_va_tbl);
127 		return -ENOMEM;
128 	}
129 
130 	tp->pg_pa_tbl = parms.mem_va;
131 
132 	tp->pg_count = 0;
133 	tp->pg_size = pg_size;
134 
135 	for (i = 0; i < pg_count; i++) {
136 		parms.nitems = 1;
137 		parms.size = pg_size;
138 		parms.alignment = TF_EM_PAGE_ALIGNMENT;
139 
140 		if (tfp_calloc(&parms) != 0)
141 			goto cleanup;
142 
143 		tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
144 		tp->pg_va_tbl[i] = parms.mem_va;
145 
146 		memset(tp->pg_va_tbl[i], 0, pg_size);
147 		tp->pg_count++;
148 	}
149 
150 	return 0;
151 
152 cleanup:
153 	tf_em_free_pg_tbl(tp);
154 	return -ENOMEM;
155 }
156 
157 /**
158  * Allocates EM page tables
159  *
160  * [in] tbl
161  *   Table to allocate pages for
162  *
163  * Returns:
164  *   0       - Success
165  *   -ENOMEM - Out of memory
166  */
167 static int
tf_em_alloc_page_table(struct hcapi_cfa_em_table * tbl)168 tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
169 {
170 	struct hcapi_cfa_em_page_tbl *tp;
171 	int rc = 0;
172 	int i;
173 	uint32_t j;
174 
175 	for (i = 0; i < tbl->num_lvl; i++) {
176 		tp = &tbl->pg_tbl[i];
177 
178 		rc = tf_em_alloc_pg_tbl(tp,
179 					tbl->page_cnt[i],
180 					TF_EM_PAGE_SIZE);
181 		if (rc) {
182 			TFP_DRV_LOG(WARNING,
183 				"Failed to allocate page table: lvl: %d, rc:%s\n",
184 				i,
185 				strerror(-rc));
186 			goto cleanup;
187 		}
188 
189 		for (j = 0; j < tp->pg_count; j++) {
190 			TFP_DRV_LOG(INFO,
191 				"EEM: Allocated page table: size %u lvl %d cnt"
192 				" %u VA:%p PA:%p\n",
193 				TF_EM_PAGE_SIZE,
194 				i,
195 				tp->pg_count,
196 				(void *)(uintptr_t)tp->pg_va_tbl[j],
197 				(void *)(uintptr_t)tp->pg_pa_tbl[j]);
198 		}
199 	}
200 	return rc;
201 
202 cleanup:
203 	tf_em_free_page_table(tbl);
204 	return rc;
205 }
206 
207 /**
208  * Links EM page tables
209  *
210  * [in] tp
211  *   Pointer to page table
212  *
213  * [in] tp_next
214  *   Pointer to the next page table
215  *
216  * [in] set_pte_last
217  *   Flag controlling if the page table is last
218  */
219 static void
tf_em_link_page_table(struct hcapi_cfa_em_page_tbl * tp,struct hcapi_cfa_em_page_tbl * tp_next,bool set_pte_last)220 tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
221 		      struct hcapi_cfa_em_page_tbl *tp_next,
222 		      bool set_pte_last)
223 {
224 	uint64_t *pg_pa = tp_next->pg_pa_tbl;
225 	uint64_t *pg_va;
226 	uint64_t valid;
227 	uint32_t k = 0;
228 	uint32_t i;
229 	uint32_t j;
230 
231 	for (i = 0; i < tp->pg_count; i++) {
232 		pg_va = tp->pg_va_tbl[i];
233 
234 		for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
235 			if (k == tp_next->pg_count - 2 && set_pte_last)
236 				valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
237 			else if (k == tp_next->pg_count - 1 && set_pte_last)
238 				valid = PTU_PTE_LAST | PTU_PTE_VALID;
239 			else
240 				valid = PTU_PTE_VALID;
241 
242 			pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
243 			if (++k >= tp_next->pg_count)
244 				return;
245 		}
246 	}
247 }
248 
249 /**
250  * Setup a EM page table
251  *
252  * [in] tbl
253  *   Pointer to EM page table
254  */
255 static void
tf_em_setup_page_table(struct hcapi_cfa_em_table * tbl)256 tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
257 {
258 	struct hcapi_cfa_em_page_tbl *tp_next;
259 	struct hcapi_cfa_em_page_tbl *tp;
260 	bool set_pte_last = 0;
261 	int i;
262 
263 	for (i = 0; i < tbl->num_lvl - 1; i++) {
264 		tp = &tbl->pg_tbl[i];
265 		tp_next = &tbl->pg_tbl[i + 1];
266 		if (i == tbl->num_lvl - 2)
267 			set_pte_last = 1;
268 		tf_em_link_page_table(tp, tp_next, set_pte_last);
269 	}
270 
271 	tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
272 	tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
273 }
274 
275 /**
276  * Unregisters EM Ctx in Firmware
277  *
278  * [in] tfp
279  *   Pointer to a TruFlow handle
280  *
281  * [in] tbl_scope_cb
282  *   Pointer to a table scope control block
283  *
284  * [in] dir
285  *   Receive or transmit direction
286  */
287 static void
tf_em_ctx_unreg(struct tf * tfp,struct tf_tbl_scope_cb * tbl_scope_cb,int dir)288 tf_em_ctx_unreg(struct tf *tfp,
289 		struct tf_tbl_scope_cb *tbl_scope_cb,
290 		int dir)
291 {
292 	struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
293 	struct hcapi_cfa_em_table *tbl;
294 	int i;
295 
296 	for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
297 		tbl = &ctxp->em_tables[i];
298 
299 		if (tbl->num_entries != 0 && tbl->entry_size != 0) {
300 			tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
301 			tf_em_free_page_table(tbl);
302 		}
303 	}
304 }
305 
306 /**
307  * Registers EM Ctx in Firmware
308  *
309  * [in] tfp
310  *   Pointer to a TruFlow handle
311  *
312  * [in] tbl_scope_cb
313  *   Pointer to a table scope control block
314  *
315  * [in] dir
316  *   Receive or transmit direction
317  *
318  * Returns:
319  *   0       - Success
320  *   -ENOMEM - Out of Memory
321  */
322 static int
tf_em_ctx_reg(struct tf * tfp,struct tf_tbl_scope_cb * tbl_scope_cb,int dir)323 tf_em_ctx_reg(struct tf *tfp,
324 	      struct tf_tbl_scope_cb *tbl_scope_cb,
325 	      int dir)
326 {
327 	struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
328 	struct hcapi_cfa_em_table *tbl;
329 	int rc = 0;
330 	int i;
331 
332 	for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
333 		tbl = &ctxp->em_tables[i];
334 
335 		if (tbl->num_entries && tbl->entry_size) {
336 			rc = tf_em_size_table(tbl, TF_EM_PAGE_SIZE);
337 
338 			if (rc)
339 				goto cleanup;
340 
341 			rc = tf_em_alloc_page_table(tbl);
342 			if (rc)
343 				goto cleanup;
344 
345 			tf_em_setup_page_table(tbl);
346 			rc = tf_msg_em_mem_rgtr(tfp,
347 						tbl->num_lvl - 1,
348 						TF_EM_PAGE_SIZE_ENUM,
349 						tbl->l0_dma_addr,
350 						&tbl->ctx_id);
351 			if (rc)
352 				goto cleanup;
353 		}
354 	}
355 	return rc;
356 
357 cleanup:
358 	tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
359 	return rc;
360 }
361 
362 int
tf_em_ext_alloc(struct tf * tfp,struct tf_alloc_tbl_scope_parms * parms)363 tf_em_ext_alloc(struct tf *tfp,
364 		struct tf_alloc_tbl_scope_parms *parms)
365 {
366 	int rc;
367 	enum tf_dir dir;
368 	struct tf_tbl_scope_cb *tbl_scope_cb;
369 	struct hcapi_cfa_em_table *em_tables;
370 	struct tf_free_tbl_scope_parms free_parms;
371 	struct tf_rm_allocate_parms aparms = { 0 };
372 	struct tf_rm_free_parms fparms = { 0 };
373 	struct tfp_calloc_parms cparms;
374 	struct tf_session *tfs = NULL;
375 	struct em_ext_db *ext_db = NULL;
376 	void *ext_ptr = NULL;
377 	uint16_t pf;
378 
379 
380 	rc = tf_session_get_session_internal(tfp, &tfs);
381 	if (rc) {
382 		TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
383 		strerror(-rc));
384 		return rc;
385 	}
386 
387 	rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
388 	if (rc) {
389 		TFP_DRV_LOG(ERR,
390 			"Failed to get em_ext_db from session, rc:%s\n",
391 			strerror(-rc));
392 		return rc;
393 	}
394 	ext_db = (struct em_ext_db *)ext_ptr;
395 
396 	rc = tfp_get_pf(tfp, &pf);
397 	if (rc) {
398 		TFP_DRV_LOG(ERR,
399 			    "EEM: PF query error rc:%s\n",
400 			    strerror(-rc));
401 		goto cleanup;
402 	}
403 
404 	/* Get Table Scope control block from the session pool */
405 	aparms.rm_db = ext_db->eem_db[TF_DIR_RX];
406 	aparms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
407 	aparms.index = (uint32_t *)&parms->tbl_scope_id;
408 	rc = tf_rm_allocate(&aparms);
409 	if (rc) {
410 		TFP_DRV_LOG(ERR,
411 			    "Failed to allocate table scope\n");
412 		goto cleanup;
413 	}
414 
415 	/* Create tbl_scope, initialize and attach to the session */
416 	cparms.nitems = 1;
417 	cparms.size = sizeof(struct tf_tbl_scope_cb);
418 	cparms.alignment = 0;
419 	rc = tfp_calloc(&cparms);
420 	if (rc) {
421 		/* Log error */
422 		TFP_DRV_LOG(ERR,
423 			"Failed to allocate session table scope, rc:%s\n",
424 			strerror(-rc));
425 		goto cleanup;
426 	}
427 
428 	tbl_scope_cb = cparms.mem_va;
429 	tbl_scope_cb->tbl_scope_id = parms->tbl_scope_id;
430 	tbl_scope_cb->pf = pf;
431 
432 	for (dir = 0; dir < TF_DIR_MAX; dir++) {
433 		rc = tf_msg_em_qcaps(tfp,
434 				     dir,
435 				     &tbl_scope_cb->em_caps[dir]);
436 		if (rc) {
437 			TFP_DRV_LOG(ERR,
438 				    "EEM: Unable to query for EEM capability,"
439 				    " rc:%s\n",
440 				    strerror(-rc));
441 			goto cleanup_ts;
442 		}
443 	}
444 
445 	/*
446 	 * Validate and setup table sizes
447 	 */
448 	if (tf_em_validate_num_entries(tbl_scope_cb, parms))
449 		goto cleanup_ts;
450 
451 	for (dir = 0; dir < TF_DIR_MAX; dir++) {
452 		/*
453 		 * Allocate tables and signal configuration to FW
454 		 */
455 		rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
456 		if (rc) {
457 			TFP_DRV_LOG(ERR,
458 				    "EEM: Unable to register for EEM ctx,"
459 				    " rc:%s\n",
460 				    strerror(-rc));
461 			goto cleanup_ts;
462 		}
463 
464 		em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
465 		rc = tf_msg_em_cfg(tfp,
466 				   em_tables[TF_KEY0_TABLE].num_entries,
467 				   em_tables[TF_KEY0_TABLE].ctx_id,
468 				   em_tables[TF_KEY1_TABLE].ctx_id,
469 				   em_tables[TF_RECORD_TABLE].ctx_id,
470 				   em_tables[TF_EFC_TABLE].ctx_id,
471 				   parms->hw_flow_cache_flush_timer,
472 				   dir);
473 		if (rc) {
474 			TFP_DRV_LOG(ERR,
475 				    "TBL: Unable to configure EEM in firmware"
476 				    " rc:%s\n",
477 				    strerror(-rc));
478 			goto cleanup_full;
479 		}
480 
481 		rc = tf_msg_em_op(tfp,
482 				  dir,
483 				  HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
484 
485 		if (rc) {
486 			TFP_DRV_LOG(ERR,
487 				    "EEM: Unable to enable EEM in firmware"
488 				    " rc:%s\n",
489 				    strerror(-rc));
490 			goto cleanup_full;
491 		}
492 
493 		/* Allocate the pool of offsets of the external memory.
494 		 * Initially, this is a single fixed size pool for all external
495 		 * actions related to a single table scope.
496 		 */
497 		rc = tf_create_tbl_pool_external(dir,
498 					    tbl_scope_cb,
499 					    em_tables[TF_RECORD_TABLE].num_entries,
500 					    em_tables[TF_RECORD_TABLE].entry_size);
501 		if (rc) {
502 			TFP_DRV_LOG(ERR,
503 				    "%s TBL: Unable to allocate idx pools %s\n",
504 				    tf_dir_2_str(dir),
505 				    strerror(-rc));
506 			goto cleanup_full;
507 		}
508 	}
509 
510 	/* Insert into session tbl_scope list */
511 	ll_insert(&ext_db->tbl_scope_ll, &tbl_scope_cb->ll_entry);
512 	return 0;
513 
514 cleanup_full:
515 	free_parms.tbl_scope_id = parms->tbl_scope_id;
516 	/* Insert into session list prior to ext_free */
517 	ll_insert(&ext_db->tbl_scope_ll, &tbl_scope_cb->ll_entry);
518 	tf_em_ext_free(tfp, &free_parms);
519 	return -EINVAL;
520 
521 cleanup_ts:
522 	tfp_free(tbl_scope_cb);
523 
524 cleanup:
525 	/* Free Table control block */
526 	fparms.rm_db = ext_db->eem_db[TF_DIR_RX];
527 	fparms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
528 	fparms.index = parms->tbl_scope_id;
529 	rc = tf_rm_free(&fparms);
530 	if (rc)
531 		TFP_DRV_LOG(ERR, "Failed to free table scope\n");
532 
533 	return -EINVAL;
534 }
535 
536 int
tf_em_ext_free(struct tf * tfp,struct tf_free_tbl_scope_parms * parms)537 tf_em_ext_free(struct tf *tfp,
538 	       struct tf_free_tbl_scope_parms *parms)
539 {
540 	int rc = 0;
541 	enum tf_dir  dir;
542 	struct tf_tbl_scope_cb *tbl_scope_cb;
543 	struct tf_session *tfs;
544 	struct em_ext_db *ext_db = NULL;
545 	void *ext_ptr = NULL;
546 	struct tf_rm_free_parms aparms = { 0 };
547 
548 	rc = tf_session_get_session_internal(tfp, &tfs);
549 	if (rc) {
550 		TFP_DRV_LOG(ERR, "Failed to get tf_session, rc:%s\n",
551 			    strerror(-rc));
552 		return -EINVAL;
553 	}
554 
555 	rc = tf_session_get_em_ext_db(tfp, &ext_ptr);
556 	if (rc) {
557 		TFP_DRV_LOG(ERR,
558 			"Failed to get em_ext_db from session, rc:%s\n",
559 			strerror(-rc));
560 		return rc;
561 	}
562 	ext_db = (struct em_ext_db *)ext_ptr;
563 
564 	tbl_scope_cb = tf_em_ext_common_tbl_scope_find(tfp, parms->tbl_scope_id);
565 	if (tbl_scope_cb == NULL) {
566 		TFP_DRV_LOG(ERR, "Table scope error\n");
567 		return -EINVAL;
568 	}
569 
570 	/* Free Table control block */
571 	aparms.rm_db = ext_db->eem_db[TF_DIR_RX];
572 	aparms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
573 	aparms.index = parms->tbl_scope_id;
574 	rc = tf_rm_free(&aparms);
575 	if (rc) {
576 		TFP_DRV_LOG(ERR,
577 			    "Failed to free table scope\n");
578 	}
579 
580 	/* free table scope locks */
581 	for (dir = 0; dir < TF_DIR_MAX; dir++) {
582 		/* Free associated external pools
583 		 */
584 		tf_destroy_tbl_pool_external(dir,
585 					     tbl_scope_cb);
586 		tf_msg_em_op(tfp,
587 			     dir,
588 			     HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
589 
590 		/* free table scope and all associated resources */
591 		tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
592 	}
593 
594 	/* remove from session list and free tbl_scope */
595 	ll_delete(&ext_db->tbl_scope_ll, &tbl_scope_cb->ll_entry);
596 	tfp_free(tbl_scope_cb);
597 	return rc;
598 }
599