1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
3 * All rights reserved.
4 */
5
6 #include <string.h>
7 #include <math.h>
8 #include <sys/param.h>
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_log.h>
12
13 #include "tf_core.h"
14 #include "tf_util.h"
15 #include "tf_common.h"
16 #include "tf_em.h"
17 #include "tf_em_common.h"
18 #include "tf_msg.h"
19 #include "tfp.h"
20 #include "tf_device.h"
21 #include "tf_ext_flow_handle.h"
22 #include "cfa_resource_types.h"
23
24 #include "bnxt.h"
25
26 /* Number of pointers per page_size */
27 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
28
29 /**
30 * EM DBs.
31 */
32 void *eem_db[TF_DIR_MAX];
33
34 /**
35 * Init flag, set on bind and cleared on unbind
36 */
37 static uint8_t init;
38
39 /**
40 * Host or system
41 */
42 static enum tf_mem_type mem_type;
43
44 /** Table scope array */
45 struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
46
47 /* API defined in tf_em.h */
48 struct tf_tbl_scope_cb *
tbl_scope_cb_find(uint32_t tbl_scope_id)49 tbl_scope_cb_find(uint32_t tbl_scope_id)
50 {
51 int i;
52 struct tf_rm_is_allocated_parms parms = { 0 };
53 int allocated;
54
55 /* Check that id is valid */
56 parms.rm_db = eem_db[TF_DIR_RX];
57 parms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
58 parms.index = tbl_scope_id;
59 parms.allocated = &allocated;
60
61 i = tf_rm_is_allocated(&parms);
62
63 if (i < 0 || allocated != TF_RM_ALLOCATED_ENTRY_IN_USE)
64 return NULL;
65
66 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
67 if (tbl_scopes[i].tbl_scope_id == tbl_scope_id)
68 return &tbl_scopes[i];
69 }
70
71 return NULL;
72 }
73
74 int
tf_create_tbl_pool_external(enum tf_dir dir,struct tf_tbl_scope_cb * tbl_scope_cb,uint32_t num_entries,uint32_t entry_sz_bytes)75 tf_create_tbl_pool_external(enum tf_dir dir,
76 struct tf_tbl_scope_cb *tbl_scope_cb,
77 uint32_t num_entries,
78 uint32_t entry_sz_bytes)
79 {
80 struct tfp_calloc_parms parms;
81 uint32_t i;
82 int32_t j;
83 int rc = 0;
84 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
85
86 parms.nitems = num_entries;
87 parms.size = sizeof(uint32_t);
88 parms.alignment = 0;
89
90 if (tfp_calloc(&parms) != 0) {
91 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
92 tf_dir_2_str(dir), strerror(ENOMEM));
93 return -ENOMEM;
94 }
95
96 /* Create empty stack
97 */
98 rc = stack_init(num_entries, parms.mem_va, pool);
99
100 if (rc != 0) {
101 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
102 tf_dir_2_str(dir), strerror(-rc));
103 goto cleanup;
104 }
105
106 /* Save the malloced memory address so that it can
107 * be freed when the table scope is freed.
108 */
109 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
110
111 /* Fill pool with indexes in reverse
112 */
113 j = (num_entries - 1) * entry_sz_bytes;
114
115 for (i = 0; i < num_entries; i++) {
116 rc = stack_push(pool, j);
117 if (rc != 0) {
118 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
119 tf_dir_2_str(dir), strerror(-rc));
120 goto cleanup;
121 }
122
123 if (j < 0) {
124 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
125 dir, j);
126 goto cleanup;
127 }
128 j -= entry_sz_bytes;
129 }
130
131 if (!stack_is_full(pool)) {
132 rc = -EINVAL;
133 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
134 tf_dir_2_str(dir), strerror(-rc));
135 goto cleanup;
136 }
137 return 0;
138 cleanup:
139 tfp_free((void *)parms.mem_va);
140 return rc;
141 }
142
143 /**
144 * Destroy External Tbl pool of memory indexes.
145 *
146 * [in] dir
147 * direction
148 * [in] tbl_scope_cb
149 * pointer to the table scope
150 */
151 void
tf_destroy_tbl_pool_external(enum tf_dir dir,struct tf_tbl_scope_cb * tbl_scope_cb)152 tf_destroy_tbl_pool_external(enum tf_dir dir,
153 struct tf_tbl_scope_cb *tbl_scope_cb)
154 {
155 uint32_t *ext_act_pool_mem =
156 tbl_scope_cb->ext_act_pool_mem[dir];
157
158 tfp_free(ext_act_pool_mem);
159 }
160
161 /**
162 * Allocate External Tbl entry from the scope pool.
163 *
164 * [in] tfp
165 * Pointer to Truflow Handle
166 * [in] parms
167 * Allocation parameters
168 *
169 * Return:
170 * 0 - Success, entry allocated - no search support
171 * -ENOMEM -EINVAL -EOPNOTSUPP
172 * - Failure, entry not allocated, out of resources
173 */
174 int
tf_tbl_ext_alloc(struct tf * tfp,struct tf_tbl_alloc_parms * parms)175 tf_tbl_ext_alloc(struct tf *tfp,
176 struct tf_tbl_alloc_parms *parms)
177 {
178 int rc;
179 uint32_t index;
180 struct tf_tbl_scope_cb *tbl_scope_cb;
181 struct stack *pool;
182
183 TF_CHECK_PARMS2(tfp, parms);
184
185 /* Get the pool info from the table scope
186 */
187 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
188
189 if (tbl_scope_cb == NULL) {
190 TFP_DRV_LOG(ERR,
191 "%s, table scope not allocated\n",
192 tf_dir_2_str(parms->dir));
193 return -EINVAL;
194 }
195 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
196
197 /* Allocate an element
198 */
199 rc = stack_pop(pool, &index);
200
201 if (rc != 0) {
202 TFP_DRV_LOG(ERR,
203 "%s, Allocation failed, type:%d\n",
204 tf_dir_2_str(parms->dir),
205 parms->type);
206 return rc;
207 }
208
209 *parms->idx = index;
210 return rc;
211 }
212
213 /**
214 * Free External Tbl entry to the scope pool.
215 *
216 * [in] tfp
217 * Pointer to Truflow Handle
218 * [in] parms
219 * Allocation parameters
220 *
221 * Return:
222 * 0 - Success, entry freed
223 *
224 * - Failure, entry not successfully freed for these reasons
225 * -ENOMEM
226 * -EOPNOTSUPP
227 * -EINVAL
228 */
229 int
tf_tbl_ext_free(struct tf * tfp,struct tf_tbl_free_parms * parms)230 tf_tbl_ext_free(struct tf *tfp,
231 struct tf_tbl_free_parms *parms)
232 {
233 int rc = 0;
234 uint32_t index;
235 struct tf_tbl_scope_cb *tbl_scope_cb;
236 struct stack *pool;
237
238 TF_CHECK_PARMS2(tfp, parms);
239
240 /* Get the pool info from the table scope
241 */
242 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
243
244 if (tbl_scope_cb == NULL) {
245 TFP_DRV_LOG(ERR,
246 "%s, table scope error\n",
247 tf_dir_2_str(parms->dir));
248 return -EINVAL;
249 }
250 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
251
252 index = parms->idx;
253
254 rc = stack_push(pool, index);
255
256 if (rc != 0) {
257 TFP_DRV_LOG(ERR,
258 "%s, consistency error, stack full, type:%d, idx:%d\n",
259 tf_dir_2_str(parms->dir),
260 parms->type,
261 index);
262 }
263 return rc;
264 }
265
266 uint32_t
tf_em_get_key_mask(int num_entries)267 tf_em_get_key_mask(int num_entries)
268 {
269 uint32_t mask = num_entries - 1;
270
271 if (num_entries & TF_EM_MAX_MASK)
272 return 0;
273
274 if (num_entries > TF_EM_MAX_ENTRY)
275 return 0;
276
277 return mask;
278 }
279
280 void
tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr * result,uint8_t * in_key,struct cfa_p4_eem_64b_entry * key_entry)281 tf_em_create_key_entry(struct cfa_p4_eem_entry_hdr *result,
282 uint8_t *in_key,
283 struct cfa_p4_eem_64b_entry *key_entry)
284 {
285 key_entry->hdr.word1 = result->word1;
286 key_entry->hdr.pointer = result->pointer;
287 memcpy(key_entry->key, in_key, TF_HW_EM_KEY_MAX_SIZE + 4);
288 }
289
290
291 /**
292 * Return the number of page table pages needed to
293 * reference the given number of next level pages.
294 *
295 * [in] num_pages
296 * Number of EM pages
297 *
298 * [in] page_size
299 * Size of each EM page
300 *
301 * Returns:
302 * Number of EM page table pages
303 */
304 static uint32_t
tf_em_page_tbl_pgcnt(uint32_t num_pages,uint32_t page_size)305 tf_em_page_tbl_pgcnt(uint32_t num_pages,
306 uint32_t page_size)
307 {
308 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
309 MAX_PAGE_PTRS(page_size);
310 return 0;
311 }
312
313 /**
314 * Given the number of data pages, page_size and the maximum
315 * number of page table levels (already determined), size
316 * the number of page table pages required at each level.
317 *
318 * [in] max_lvl
319 * Max number of levels
320 *
321 * [in] num_data_pages
322 * Number of EM data pages
323 *
324 * [in] page_size
325 * Size of an EM page
326 *
327 * [out] *page_cnt
328 * EM page count
329 */
330 static void
tf_em_size_page_tbls(int max_lvl,uint64_t num_data_pages,uint32_t page_size,uint32_t * page_cnt)331 tf_em_size_page_tbls(int max_lvl,
332 uint64_t num_data_pages,
333 uint32_t page_size,
334 uint32_t *page_cnt)
335 {
336 if (max_lvl == TF_PT_LVL_0) {
337 page_cnt[TF_PT_LVL_0] = num_data_pages;
338 } else if (max_lvl == TF_PT_LVL_1) {
339 page_cnt[TF_PT_LVL_1] = num_data_pages;
340 page_cnt[TF_PT_LVL_0] =
341 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
342 } else if (max_lvl == TF_PT_LVL_2) {
343 page_cnt[TF_PT_LVL_2] = num_data_pages;
344 page_cnt[TF_PT_LVL_1] =
345 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
346 page_cnt[TF_PT_LVL_0] =
347 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
348 } else {
349 return;
350 }
351 }
352
353 /**
354 * Given the page size, size of each data item (entry size),
355 * and the total number of entries needed, determine the number
356 * of page table levels and the number of data pages required.
357 *
358 * [in] page_size
359 * Page size
360 *
361 * [in] entry_size
362 * Entry size
363 *
364 * [in] num_entries
365 * Number of entries needed
366 *
367 * [out] num_data_pages
368 * Number of pages required
369 *
370 * Returns:
371 * Success - Number of EM page levels required
372 * -ENOMEM - Out of memory
373 */
374 static int
tf_em_size_page_tbl_lvl(uint32_t page_size,uint32_t entry_size,uint32_t num_entries,uint64_t * num_data_pages)375 tf_em_size_page_tbl_lvl(uint32_t page_size,
376 uint32_t entry_size,
377 uint32_t num_entries,
378 uint64_t *num_data_pages)
379 {
380 uint64_t lvl_data_size = page_size;
381 int lvl = TF_PT_LVL_0;
382 uint64_t data_size;
383
384 *num_data_pages = 0;
385 data_size = (uint64_t)num_entries * entry_size;
386
387 while (lvl_data_size < data_size) {
388 lvl++;
389
390 if (lvl == TF_PT_LVL_1)
391 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
392 page_size;
393 else if (lvl == TF_PT_LVL_2)
394 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
395 MAX_PAGE_PTRS(page_size) * page_size;
396 else
397 return -ENOMEM;
398 }
399
400 *num_data_pages = roundup(data_size, page_size) / page_size;
401
402 return lvl;
403 }
404
405 /**
406 * Size the EM table based on capabilities
407 *
408 * [in] tbl
409 * EM table to size
410 *
411 * Returns:
412 * 0 - Success
413 * - EINVAL - Parameter error
414 * - ENOMEM - Out of memory
415 */
416 int
tf_em_size_table(struct hcapi_cfa_em_table * tbl,uint32_t page_size)417 tf_em_size_table(struct hcapi_cfa_em_table *tbl,
418 uint32_t page_size)
419 {
420 uint64_t num_data_pages;
421 uint32_t *page_cnt;
422 int max_lvl;
423 uint32_t num_entries;
424 uint32_t cnt = TF_EM_MIN_ENTRIES;
425
426 /* Ignore entry if both size and number are zero */
427 if (!tbl->entry_size && !tbl->num_entries)
428 return 0;
429
430 /* If only one is set then error */
431 if (!tbl->entry_size || !tbl->num_entries)
432 return -EINVAL;
433
434 /* Determine number of page table levels and the number
435 * of data pages needed to process the given eem table.
436 */
437 if (tbl->type == TF_RECORD_TABLE) {
438 /*
439 * For action records just a memory size is provided. Work
440 * backwards to resolve to number of entries
441 */
442 num_entries = tbl->num_entries / tbl->entry_size;
443 if (num_entries < TF_EM_MIN_ENTRIES) {
444 num_entries = TF_EM_MIN_ENTRIES;
445 } else {
446 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
447 cnt *= 2;
448 num_entries = cnt;
449 }
450 } else {
451 num_entries = tbl->num_entries;
452 }
453
454 max_lvl = tf_em_size_page_tbl_lvl(page_size,
455 tbl->entry_size,
456 tbl->num_entries,
457 &num_data_pages);
458 if (max_lvl < 0) {
459 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
460 TFP_DRV_LOG(WARNING,
461 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
462 tbl->type, (uint64_t)num_entries * tbl->entry_size,
463 page_size);
464 return -ENOMEM;
465 }
466
467 tbl->num_lvl = max_lvl + 1;
468 tbl->num_data_pages = num_data_pages;
469
470 /* Determine the number of pages needed at each level */
471 page_cnt = tbl->page_cnt;
472 memset(page_cnt, 0, sizeof(tbl->page_cnt));
473 tf_em_size_page_tbls(max_lvl, num_data_pages, page_size,
474 page_cnt);
475
476 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
477 TFP_DRV_LOG(INFO,
478 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 \
479 " l0: %u l1: %u l2: %u\n",
480 max_lvl + 1,
481 (uint64_t)num_data_pages * page_size,
482 num_data_pages,
483 page_cnt[TF_PT_LVL_0],
484 page_cnt[TF_PT_LVL_1],
485 page_cnt[TF_PT_LVL_2]);
486
487 return 0;
488 }
489
490 /**
491 * Validates EM number of entries requested
492 *
493 * [in] tbl_scope_cb
494 * Pointer to table scope control block to be populated
495 *
496 * [in] parms
497 * Pointer to input parameters
498 *
499 * Returns:
500 * 0 - Success
501 * -EINVAL - Parameter error
502 */
503 int
tf_em_validate_num_entries(struct tf_tbl_scope_cb * tbl_scope_cb,struct tf_alloc_tbl_scope_parms * parms)504 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
505 struct tf_alloc_tbl_scope_parms *parms)
506 {
507 uint32_t cnt;
508
509 if (parms->rx_mem_size_in_mb != 0) {
510 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
511 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
512 + 1);
513 uint32_t num_entries = (parms->rx_mem_size_in_mb *
514 TF_MEGABYTE) / (key_b + action_b);
515
516 if (num_entries < TF_EM_MIN_ENTRIES) {
517 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
518 "%uMB\n",
519 parms->rx_mem_size_in_mb);
520 return -EINVAL;
521 }
522
523 cnt = TF_EM_MIN_ENTRIES;
524 while (num_entries > cnt &&
525 cnt <= TF_EM_MAX_ENTRIES)
526 cnt *= 2;
527
528 if (cnt > TF_EM_MAX_ENTRIES) {
529 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
530 "%u\n",
531 (parms->tx_num_flows_in_k * TF_KILOBYTE));
532 return -EINVAL;
533 }
534
535 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
536 } else {
537 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
538 TF_EM_MIN_ENTRIES ||
539 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
540 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
541 TFP_DRV_LOG(ERR,
542 "EEM: Invalid number of Rx flows "
543 "requested:%u max:%u\n",
544 parms->rx_num_flows_in_k * TF_KILOBYTE,
545 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
546 return -EINVAL;
547 }
548
549 /* must be a power-of-2 supported value
550 * in the range 32K - 128M
551 */
552 cnt = TF_EM_MIN_ENTRIES;
553 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
554 cnt <= TF_EM_MAX_ENTRIES)
555 cnt *= 2;
556
557 if (cnt > TF_EM_MAX_ENTRIES) {
558 TFP_DRV_LOG(ERR,
559 "EEM: Invalid number of Rx requested: %u\n",
560 (parms->rx_num_flows_in_k * TF_KILOBYTE));
561 return -EINVAL;
562 }
563 }
564
565 if (parms->tx_mem_size_in_mb != 0) {
566 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
567 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
568 + 1);
569 uint32_t num_entries = (parms->tx_mem_size_in_mb *
570 (TF_KILOBYTE * TF_KILOBYTE)) /
571 (key_b + action_b);
572
573 if (num_entries < TF_EM_MIN_ENTRIES) {
574 TFP_DRV_LOG(ERR,
575 "EEM: Insufficient memory requested:%uMB\n",
576 parms->rx_mem_size_in_mb);
577 return -EINVAL;
578 }
579
580 cnt = TF_EM_MIN_ENTRIES;
581 while (num_entries > cnt &&
582 cnt <= TF_EM_MAX_ENTRIES)
583 cnt *= 2;
584
585 if (cnt > TF_EM_MAX_ENTRIES) {
586 TFP_DRV_LOG(ERR,
587 "EEM: Invalid number of Tx requested: %u\n",
588 (parms->tx_num_flows_in_k * TF_KILOBYTE));
589 return -EINVAL;
590 }
591
592 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
593 } else {
594 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
595 TF_EM_MIN_ENTRIES ||
596 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
597 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
598 TFP_DRV_LOG(ERR,
599 "EEM: Invalid number of Tx flows "
600 "requested:%u max:%u\n",
601 (parms->tx_num_flows_in_k * TF_KILOBYTE),
602 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
603 return -EINVAL;
604 }
605
606 cnt = TF_EM_MIN_ENTRIES;
607 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
608 cnt <= TF_EM_MAX_ENTRIES)
609 cnt *= 2;
610
611 if (cnt > TF_EM_MAX_ENTRIES) {
612 TFP_DRV_LOG(ERR,
613 "EEM: Invalid number of Tx requested: %u\n",
614 (parms->tx_num_flows_in_k * TF_KILOBYTE));
615 return -EINVAL;
616 }
617 }
618
619 if (parms->rx_num_flows_in_k != 0 &&
620 parms->rx_max_key_sz_in_bits / 8 == 0) {
621 TFP_DRV_LOG(ERR,
622 "EEM: Rx key size required: %u\n",
623 (parms->rx_max_key_sz_in_bits));
624 return -EINVAL;
625 }
626
627 if (parms->tx_num_flows_in_k != 0 &&
628 parms->tx_max_key_sz_in_bits / 8 == 0) {
629 TFP_DRV_LOG(ERR,
630 "EEM: Tx key size required: %u\n",
631 (parms->tx_max_key_sz_in_bits));
632 return -EINVAL;
633 }
634 /* Rx */
635 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
636 parms->rx_num_flows_in_k * TF_KILOBYTE;
637 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
638 parms->rx_max_key_sz_in_bits / 8;
639
640 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
641 parms->rx_num_flows_in_k * TF_KILOBYTE;
642 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
643 parms->rx_max_key_sz_in_bits / 8;
644
645 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
646 parms->rx_num_flows_in_k * TF_KILOBYTE;
647 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
648 parms->rx_max_action_entry_sz_in_bits / 8;
649
650 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries = 0;
651
652 /* Tx */
653 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
654 parms->tx_num_flows_in_k * TF_KILOBYTE;
655 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
656 parms->tx_max_key_sz_in_bits / 8;
657
658 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
659 parms->tx_num_flows_in_k * TF_KILOBYTE;
660 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
661 parms->tx_max_key_sz_in_bits / 8;
662
663 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
664 parms->tx_num_flows_in_k * TF_KILOBYTE;
665 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
666 parms->tx_max_action_entry_sz_in_bits / 8;
667
668 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries = 0;
669
670 return 0;
671 }
672
673 /** insert EEM entry API
674 *
675 * returns:
676 * 0
677 * TF_ERR - unable to get lock
678 *
679 * insert callback returns:
680 * 0
681 * TF_ERR_EM_DUP - key is already in table
682 */
683 static int
tf_insert_eem_entry(struct tf_tbl_scope_cb * tbl_scope_cb,struct tf_insert_em_entry_parms * parms)684 tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
685 struct tf_insert_em_entry_parms *parms)
686 {
687 uint32_t mask;
688 uint32_t key0_hash;
689 uint32_t key1_hash;
690 uint32_t key0_index;
691 uint32_t key1_index;
692 struct cfa_p4_eem_64b_entry key_entry;
693 uint32_t index;
694 enum hcapi_cfa_em_table_type table_type;
695 uint32_t gfid;
696 struct hcapi_cfa_hwop op;
697 struct hcapi_cfa_key_tbl key_tbl;
698 struct hcapi_cfa_key_data key_obj;
699 struct hcapi_cfa_key_loc key_loc;
700 uint64_t big_hash;
701 int rc;
702
703 /* Get mask to use on hash */
704 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
705
706 if (!mask)
707 return -EINVAL;
708
709 #ifdef TF_EEM_DEBUG
710 dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
711 #endif
712
713 big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
714 (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
715 key0_hash = (uint32_t)(big_hash >> 32);
716 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
717
718 key0_index = key0_hash & mask;
719 key1_index = key1_hash & mask;
720
721 #ifdef TF_EEM_DEBUG
722 TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
723 TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
724 #endif
725 /*
726 * Use the "result" arg to populate all of the key entry then
727 * store the byte swapped "raw" entry in a local copy ready
728 * for insertion in to the table.
729 */
730 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
731 ((uint8_t *)parms->key),
732 &key_entry);
733
734 /*
735 * Try to add to Key0 table, if that does not work then
736 * try the key1 table.
737 */
738 index = key0_index;
739 op.opcode = HCAPI_CFA_HWOPS_ADD;
740 key_tbl.base0 =
741 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
742 key_tbl.page_size = TF_EM_PAGE_SIZE;
743 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
744 key_obj.data = (uint8_t *)&key_entry;
745 key_obj.size = TF_EM_KEY_RECORD_SIZE;
746
747 rc = hcapi_cfa_key_hw_op(&op,
748 &key_tbl,
749 &key_obj,
750 &key_loc);
751
752 if (rc == 0) {
753 table_type = TF_KEY0_TABLE;
754 } else {
755 index = key1_index;
756
757 key_tbl.base0 =
758 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
759 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
760
761 rc = hcapi_cfa_key_hw_op(&op,
762 &key_tbl,
763 &key_obj,
764 &key_loc);
765 if (rc != 0)
766 return rc;
767
768 table_type = TF_KEY1_TABLE;
769 }
770
771 TF_SET_GFID(gfid,
772 index,
773 table_type);
774 TF_SET_FLOW_ID(parms->flow_id,
775 gfid,
776 TF_GFID_TABLE_EXTERNAL,
777 parms->dir);
778 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
779 0,
780 0,
781 0,
782 index,
783 0,
784 table_type);
785
786 return 0;
787 }
788
789 /** delete EEM hash entry API
790 *
791 * returns:
792 * 0
793 * -EINVAL - parameter error
794 * TF_NO_SESSION - bad session ID
795 * TF_ERR_TBL_SCOPE - invalid table scope
796 * TF_ERR_TBL_IF - invalid table interface
797 *
798 * insert callback returns
799 * 0
800 * TF_NO_EM_MATCH - entry not found
801 */
802 static int
tf_delete_eem_entry(struct tf_tbl_scope_cb * tbl_scope_cb,struct tf_delete_em_entry_parms * parms)803 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
804 struct tf_delete_em_entry_parms *parms)
805 {
806 enum hcapi_cfa_em_table_type hash_type;
807 uint32_t index;
808 struct hcapi_cfa_hwop op;
809 struct hcapi_cfa_key_tbl key_tbl;
810 struct hcapi_cfa_key_data key_obj;
811 struct hcapi_cfa_key_loc key_loc;
812 int rc;
813
814 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
815 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
816
817 op.opcode = HCAPI_CFA_HWOPS_DEL;
818 key_tbl.base0 =
819 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables
820 [(hash_type == 0 ? TF_KEY0_TABLE : TF_KEY1_TABLE)];
821 key_tbl.page_size = TF_EM_PAGE_SIZE;
822 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
823 key_obj.data = NULL;
824 key_obj.size = TF_EM_KEY_RECORD_SIZE;
825
826 rc = hcapi_cfa_key_hw_op(&op,
827 &key_tbl,
828 &key_obj,
829 &key_loc);
830
831 if (!rc)
832 return rc;
833
834 return 0;
835 }
836
837 /** insert EM hash entry API
838 *
839 * returns:
840 * 0 - Success
841 * -EINVAL - Error
842 */
843 int
tf_em_insert_ext_entry(struct tf * tfp __rte_unused,struct tf_insert_em_entry_parms * parms)844 tf_em_insert_ext_entry(struct tf *tfp __rte_unused,
845 struct tf_insert_em_entry_parms *parms)
846 {
847 struct tf_tbl_scope_cb *tbl_scope_cb;
848
849 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
850 if (tbl_scope_cb == NULL) {
851 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
852 return -EINVAL;
853 }
854
855 return tf_insert_eem_entry
856 (tbl_scope_cb,
857 parms);
858 }
859
860 /** Delete EM hash entry API
861 *
862 * returns:
863 * 0 - Success
864 * -EINVAL - Error
865 */
866 int
tf_em_delete_ext_entry(struct tf * tfp __rte_unused,struct tf_delete_em_entry_parms * parms)867 tf_em_delete_ext_entry(struct tf *tfp __rte_unused,
868 struct tf_delete_em_entry_parms *parms)
869 {
870 struct tf_tbl_scope_cb *tbl_scope_cb;
871
872 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
873 if (tbl_scope_cb == NULL) {
874 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
875 return -EINVAL;
876 }
877
878 return tf_delete_eem_entry(tbl_scope_cb, parms);
879 }
880
881
882 int
tf_em_ext_common_bind(struct tf * tfp,struct tf_em_cfg_parms * parms)883 tf_em_ext_common_bind(struct tf *tfp,
884 struct tf_em_cfg_parms *parms)
885 {
886 int rc;
887 int i;
888 struct tf_rm_create_db_parms db_cfg = { 0 };
889 uint8_t db_exists = 0;
890
891 TF_CHECK_PARMS2(tfp, parms);
892
893 if (init) {
894 TFP_DRV_LOG(ERR,
895 "EM Ext DB already initialized\n");
896 return -EINVAL;
897 }
898
899 db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
900 db_cfg.num_elements = parms->num_elements;
901 db_cfg.cfg = parms->cfg;
902
903 for (i = 0; i < TF_DIR_MAX; i++) {
904 db_cfg.dir = i;
905 db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt;
906
907 /* Check if we got any request to support EEM, if so
908 * we build an EM Ext DB holding Table Scopes.
909 */
910 if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_TBL_SCOPE] == 0)
911 continue;
912
913 db_cfg.rm_db = &eem_db[i];
914 rc = tf_rm_create_db(tfp, &db_cfg);
915 if (rc) {
916 TFP_DRV_LOG(ERR,
917 "%s: EM Ext DB creation failed\n",
918 tf_dir_2_str(i));
919
920 return rc;
921 }
922 db_exists = 1;
923 }
924
925 if (db_exists)
926 init = 1;
927
928 mem_type = parms->mem_type;
929
930 return 0;
931 }
932
933 int
tf_em_ext_common_unbind(struct tf * tfp)934 tf_em_ext_common_unbind(struct tf *tfp)
935 {
936 int rc;
937 int i;
938 struct tf_rm_free_db_parms fparms = { 0 };
939
940 TF_CHECK_PARMS1(tfp);
941
942 /* Bail if nothing has been initialized */
943 if (!init) {
944 TFP_DRV_LOG(INFO,
945 "No EM Ext DBs created\n");
946 return 0;
947 }
948
949 for (i = 0; i < TF_DIR_MAX; i++) {
950 fparms.dir = i;
951 fparms.rm_db = eem_db[i];
952 rc = tf_rm_free_db(tfp, &fparms);
953 if (rc)
954 return rc;
955
956 eem_db[i] = NULL;
957 }
958
959 init = 0;
960
961 return 0;
962 }
963
964 /**
965 * Sets the specified external table type element.
966 *
967 * This API sets the specified element data
968 *
969 * [in] tfp
970 * Pointer to TF handle
971 *
972 * [in] parms
973 * Pointer to table set parameters
974 *
975 * Returns
976 * - (0) if successful.
977 * - (-EINVAL) on failure.
978 */
tf_tbl_ext_common_set(struct tf * tfp,struct tf_tbl_set_parms * parms)979 int tf_tbl_ext_common_set(struct tf *tfp,
980 struct tf_tbl_set_parms *parms)
981 {
982 int rc = 0;
983 struct tf_tbl_scope_cb *tbl_scope_cb;
984 uint32_t tbl_scope_id;
985 struct hcapi_cfa_hwop op;
986 struct hcapi_cfa_key_tbl key_tbl;
987 struct hcapi_cfa_key_data key_obj;
988 struct hcapi_cfa_key_loc key_loc;
989
990 TF_CHECK_PARMS2(tfp, parms);
991
992 if (parms->data == NULL) {
993 TFP_DRV_LOG(ERR,
994 "%s, invalid parms->data\n",
995 tf_dir_2_str(parms->dir));
996 return -EINVAL;
997 }
998
999 tbl_scope_id = parms->tbl_scope_id;
1000
1001 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1002 TFP_DRV_LOG(ERR,
1003 "%s, Table scope not allocated\n",
1004 tf_dir_2_str(parms->dir));
1005 return -EINVAL;
1006 }
1007
1008 /* Get the table scope control block associated with the
1009 * external pool
1010 */
1011 tbl_scope_cb = tbl_scope_cb_find(tbl_scope_id);
1012
1013 if (tbl_scope_cb == NULL) {
1014 TFP_DRV_LOG(ERR,
1015 "%s, table scope error\n",
1016 tf_dir_2_str(parms->dir));
1017 return -EINVAL;
1018 }
1019
1020 op.opcode = HCAPI_CFA_HWOPS_PUT;
1021 key_tbl.base0 =
1022 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1023 key_tbl.page_size = TF_EM_PAGE_SIZE;
1024 key_obj.offset = parms->idx;
1025 key_obj.data = parms->data;
1026 key_obj.size = parms->data_sz_in_bytes;
1027
1028 rc = hcapi_cfa_key_hw_op(&op,
1029 &key_tbl,
1030 &key_obj,
1031 &key_loc);
1032
1033 return rc;
1034 }
1035
1036 int
tf_em_ext_common_alloc(struct tf * tfp,struct tf_alloc_tbl_scope_parms * parms)1037 tf_em_ext_common_alloc(struct tf *tfp,
1038 struct tf_alloc_tbl_scope_parms *parms)
1039 {
1040 return tf_em_ext_alloc(tfp, parms);
1041 }
1042
1043 int
tf_em_ext_common_free(struct tf * tfp,struct tf_free_tbl_scope_parms * parms)1044 tf_em_ext_common_free(struct tf *tfp,
1045 struct tf_free_tbl_scope_parms *parms)
1046 {
1047 return tf_em_ext_free(tfp, parms);
1048 }
1049
tf_em_ext_map_tbl_scope(struct tf * tfp,struct tf_map_tbl_scope_parms * parms)1050 int tf_em_ext_map_tbl_scope(struct tf *tfp,
1051 struct tf_map_tbl_scope_parms *parms)
1052 {
1053 int rc = 0;
1054 struct tf_session *tfs;
1055 struct tf_tbl_scope_cb *tbl_scope_cb;
1056 struct tf_global_cfg_parms gcfg_parms = { 0 };
1057 struct tfp_calloc_parms aparms;
1058 uint32_t *data, *mask;
1059 uint32_t sz_in_bytes = 8;
1060 struct tf_dev_info *dev;
1061
1062 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
1063
1064 if (tbl_scope_cb == NULL) {
1065 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb tbl_scope_id(%d)\n",
1066 parms->tbl_scope_id);
1067 return -EINVAL;
1068 }
1069
1070 /* Retrieve the session information */
1071 rc = tf_session_get_session_internal(tfp, &tfs);
1072 if (rc)
1073 return rc;
1074
1075 /* Retrieve the device information */
1076 rc = tf_session_get_device(tfs, &dev);
1077 if (rc)
1078 return rc;
1079
1080 if (dev->ops->tf_dev_map_tbl_scope == NULL) {
1081 rc = -EOPNOTSUPP;
1082 TFP_DRV_LOG(ERR,
1083 "Map table scope operation not supported, rc:%s\n",
1084 strerror(-rc));
1085 return rc;
1086 }
1087
1088 aparms.nitems = 2;
1089 aparms.size = sizeof(uint32_t);
1090 aparms.alignment = 0;
1091
1092 if (tfp_calloc(&aparms) != 0) {
1093 TFP_DRV_LOG(ERR, "Map tbl scope alloc data error %s\n",
1094 strerror(ENOMEM));
1095 return -ENOMEM;
1096 }
1097 data = aparms.mem_va;
1098
1099 if (tfp_calloc(&aparms) != 0) {
1100 TFP_DRV_LOG(ERR, "Map tbl scope alloc mask error %s\n",
1101 strerror(ENOMEM));
1102 rc = -ENOMEM;
1103 goto clean;
1104 }
1105 mask = aparms.mem_va;
1106
1107 rc = dev->ops->tf_dev_map_parif(tfp, parms->parif_bitmask,
1108 tbl_scope_cb->pf,
1109 (uint8_t *)data, (uint8_t *)mask,
1110 sz_in_bytes);
1111
1112 if (rc) {
1113 TFP_DRV_LOG(ERR,
1114 "Map table scope config failure, rc:%s\n",
1115 strerror(-rc));
1116 goto cleaner;
1117 }
1118
1119 gcfg_parms.type =
1120 (enum tf_global_config_type)TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF;
1121 gcfg_parms.offset = 0;
1122 gcfg_parms.config = (uint8_t *)data;
1123 gcfg_parms.config_mask = (uint8_t *)mask;
1124 gcfg_parms.config_sz_in_bytes = sizeof(uint64_t);
1125
1126
1127 rc = tf_msg_set_global_cfg(tfp, &gcfg_parms);
1128 if (rc) {
1129 TFP_DRV_LOG(ERR,
1130 "Map tbl scope, set failed, rc:%s\n",
1131 strerror(-rc));
1132 }
1133 cleaner:
1134 tfp_free(mask);
1135 clean:
1136 tfp_free(data);
1137
1138 return rc;
1139 }
1140